From 1be5069a26c4073ea4b0adc56369603cdcdb03af Mon Sep 17 00:00:00 2001 From: xiaoyunzhao Date: Thu, 31 Jul 2025 14:34:14 +0800 Subject: [PATCH 1/8] remove python2 requires and add patch for python3 --- qt5-qtwebengine.spec | 38 +- qtwebengine-python2-to-python3.patch | 74112 ++++++++++++++++ qtwebengine-python311.patch | 108 + ...ngine-qt5-webengine-chromium-python3.patch | 1812 + qtwebengine-qt5-webengine-python3.patch | 161 + 5 files changed, 76226 insertions(+), 5 deletions(-) create mode 100644 qtwebengine-python2-to-python3.patch create mode 100644 qtwebengine-python311.patch create mode 100644 qtwebengine-qt5-webengine-chromium-python3.patch create mode 100644 qtwebengine-qt5-webengine-python3.patch diff --git a/qt5-qtwebengine.spec b/qt5-qtwebengine.spec index 199558c..4d9f8a2 100644 --- a/qt5-qtwebengine.spec +++ b/qt5-qtwebengine.spec @@ -29,7 +29,9 @@ Summary: Qt5 - QtWebEngine components Name: qt5-qtwebengine Version: 5.15.16 -Release: 4%{?dist} +Release: 5%{?dist} + +ExcludeArch: loongarch64 License: (LGPLv2 with exceptions or GPLv3 with exceptions) and BSD and LGPLv2+ and ASL 2.0 and IJG and MIT and GPLv2+ and ISC and OpenSSL and (MPLv1.1 or GPLv2 or LGPLv2) URL: http://www.qt.io @@ -53,6 +55,12 @@ Patch35: qt5-qtwebengine-c99.patch Patch50: 0001-avcodec-x86-mathops-clip-constants-used-with-shift-i.patch Patch51: qtwebengine-icu-74.patch + +Patch61: qtwebengine-qt5-webengine-chromium-python3.patch +Patch62: qtwebengine-qt5-webengine-python3.patch +Patch63: qtwebengine-python311.patch +Patch64: qtwebengine-python2-to-python3.patch + #Patch60: qtwebengine-ffmpeg5.patch Patch100: v8.patch @@ -134,7 +142,7 @@ BuildRequires: pkgconfig(lcms2) BuildRequires: pkgconfig(xkbcommon) BuildRequires: pkgconfig(xkbfile) BuildRequires: perl-interpreter -BuildRequires: /usr/bin/python2 +#BuildRequires: /usr/bin/python2 %if 0%{?use_system_libvpx} BuildRequires: pkgconfig(vpx) >= 1.8.0 %endif @@ -254,6 +262,10 @@ popd %patch -P34 -p1 -b .fix-build %patch -P35 -p1 -b .c99 +%patch -P61 -p1 -b .c99 +%patch -P62 -p1 -b .c99 +%patch -P63 -p1 -b .c99 +%patch -P64 -p1 %patch -P50 -p1 -b .0001-avcodec-x86-mathops-clip-constants-used-with-shift-i %patch -P51 -p1 -b .icu-74 @@ -282,12 +294,13 @@ test -f "./include/QtWebEngineCore/qtwebenginecoreglobal.h" %build -export PATH=$(pwd)/python2/usr/bin:$PATH -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/python2/usr/lib64 - +export PATH=$(pwd)/python3/usr/bin:$PATH +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/python3/usr/lib64 +export LT_SYS_LIBRARY_PATH=/usr/lib64 export STRIP=strip export NINJAFLAGS="%{__ninja_common_opts}" export NINJA_PATH=%{__ninja} +export QT5_LIBRARY_PATH=%{_qt5_libdir} %{qmake_qt5} \ %{?debug_config:CONFIG+="%{debug_config}}" \ @@ -295,6 +308,7 @@ export NINJA_PATH=%{__ninja} CONFIG+="link_pulseaudio" \ %else CONFIG+="link_pulseaudio use_gold_linker" \ + CONFIG+="libdir %{_qt5_libdir}" \ %endif QMAKE_EXTRA_ARGS+="-system-webengine-ffmpeg -system-webengine-webp -system-webengine-opus" \ QMAKE_EXTRA_ARGS+="-webengine-kerberos" \ @@ -303,6 +317,17 @@ export NINJA_PATH=%{__ninja} %{?pipewire:QMAKE_EXTRA_ARGS+="-webengine-webrtc-pipewire"} \ . +ncpus=%{_smp_build_ncpus} +%ifarch aarch64 +maxcpus=$(( ($(awk '/^MemTotal:/{print $2}' /proc/meminfo)/1024/1024+1)/4 )) +%else +maxcpus=$(( ($(awk '/^MemTotal:/{print $2}' /proc/meminfo)/1024/1024+1)/2*4/5)) +%endif +if [ "$maxcpus" -ge 1 -a "$maxcpus" -lt "$ncpus" ]; then + ncpus=$maxcpus +fi +%define _smp_mflags -j$ncpus + make %{?_smp_mflags} %install @@ -435,6 +460,9 @@ done %{_qt5_examplesdir}/ %changelog +* Thu Jun 31 2025 xiaoyunzhao - 5.15.16-5 +- remove python2 requires and add patch for python3 + * Thu Jun 12 2025 bbrucezhang - 5.15.16-4 - Rebuilt for loongarch64 diff --git a/qtwebengine-python2-to-python3.patch b/qtwebengine-python2-to-python3.patch new file mode 100644 index 0000000..beb9969 --- /dev/null +++ b/qtwebengine-python2-to-python3.patch @@ -0,0 +1,74112 @@ +--- a/src/3rdparty/chromium/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/PRESUBMIT.py 2025-01-16 02:26:08.512264221 +0800 +@@ -1987,7 +1987,7 @@ + file_filter = lambda f: f.LocalPath().endswith(('.cc', '.mm', '.h')) + for f in input_api.AffectedFiles(file_filter=file_filter): + # Don't check //components/arc, not yet migrated (see crrev.com/c/1868870). +- if any(map(lambda path: f.LocalPath().startswith(path), ok_paths)): ++ if any([f.LocalPath().startswith(path) for path in ok_paths]): + continue + + for line_num, line in f.ChangedContents(): +@@ -1997,7 +1997,7 @@ + + if problems: + # Raise errors inside |error_paths| and warnings everywhere else. +- if any(map(lambda path: f.LocalPath().startswith(path), error_paths)): ++ if any([f.LocalPath().startswith(path) for path in error_paths]): + errors.extend(problems) + else: + warnings.extend(problems) +@@ -2336,7 +2336,7 @@ + if rule.startswith('+') or rule.startswith('!') + ]) + for _, rules in parsed_deps.get('specific_include_rules', +- {}).iteritems(): ++ {}).items(): + add_rules.update([ + rule[1:] for rule in rules + if rule.startswith('+') or rule.startswith('!') +@@ -2364,7 +2364,7 @@ + 'Var': _VarImpl(local_scope).Lookup, + 'Str': str, + } +- exec contents in global_scope, local_scope ++ exec(contents, global_scope, local_scope) + return local_scope + + +@@ -3062,11 +3062,11 @@ + + # Go through the OWNERS files to check, filtering out rules that are already + # present in that OWNERS file. +- for owners_file, patterns in to_check.iteritems(): ++ for owners_file, patterns in to_check.items(): + try: + with file(owners_file) as f: + lines = set(f.read().splitlines()) +- for entry in patterns.itervalues(): ++ for entry in patterns.values(): + entry['rules'] = [rule for rule in entry['rules'] if rule not in lines + ] + except IOError: +@@ -3075,10 +3075,10 @@ + + # All the remaining lines weren't found in OWNERS files, so emit an error. + errors = [] +- for owners_file, patterns in to_check.iteritems(): ++ for owners_file, patterns in to_check.items(): + missing_lines = [] + files = [] +- for _, entry in patterns.iteritems(): ++ for _, entry in patterns.items(): + missing_lines.extend(entry['rules']) + files.extend([' %s' % f.LocalPath() for f in entry['files']]) + if missing_lines: +@@ -3118,7 +3118,7 @@ + } + _PATTERNS_TO_CHECK = { + k: input_api.re.compile(v) +- for k, v in _PATTERNS_TO_CHECK.items() ++ for k, v in list(_PATTERNS_TO_CHECK.items()) + } + + # Scan all affected files for changes touching _FUNCTIONS_TO_CHECK. +@@ -3131,7 +3131,7 @@ + # as adding or changing the arguments. + if line.startswith('-') or (line.startswith('+') and + not line.startswith('++')): +- for name, pattern in _PATTERNS_TO_CHECK.items(): ++ for name, pattern in list(_PATTERNS_TO_CHECK.items()): + if pattern.search(line): + path = f.LocalPath() + if not path in files_to_functions: +@@ -3161,7 +3161,7 @@ + if not has_security_owner: + msg = 'The following files change calls to security-sensive functions\n' \ + 'that need to be reviewed by {}.\n'.format(owners_file) +- for path, names in files_to_functions.items(): ++ for path, names in list(files_to_functions.items()): + msg += ' {}\n'.format(path) + for name in names: + msg += ' {}\n'.format(name) +@@ -3883,7 +3883,7 @@ + return [] + + error_descriptions = [] +- for file_path, bad_lines in bad_files.iteritems(): ++ for file_path, bad_lines in bad_files.items(): + error_description = file_path + for line in bad_lines: + error_description += '\n ' + line +@@ -4845,8 +4845,8 @@ + git_footers = input_api.change.GitFootersFromDescription() + skip_screenshot_check_footer = [ + footer.lower() +- for footer in git_footers.get(u'Skip-Translation-Screenshots-Check', [])] +- run_screenshot_check = u'true' not in skip_screenshot_check_footer ++ for footer in git_footers.get('Skip-Translation-Screenshots-Check', [])] ++ run_screenshot_check = 'true' not in skip_screenshot_check_footer + + import os + import re +@@ -5070,18 +5070,18 @@ + if file_path.endswith('.grdp'): + if f.OldContents(): + old_id_to_msg_map = grd_helper.GetGrdpMessagesFromString( +- unicode('\n'.join(f.OldContents()))) ++ str('\n'.join(f.OldContents()))) + if f.NewContents(): + new_id_to_msg_map = grd_helper.GetGrdpMessagesFromString( +- unicode('\n'.join(f.NewContents()))) ++ str('\n'.join(f.NewContents()))) + else: + file_dir = input_api.os_path.dirname(file_path) or '.' + if f.OldContents(): + old_id_to_msg_map = grd_helper.GetGrdMessages( +- StringIO(unicode('\n'.join(f.OldContents()))), file_dir) ++ StringIO(str('\n'.join(f.OldContents()))), file_dir) + if f.NewContents(): + new_id_to_msg_map = grd_helper.GetGrdMessages( +- StringIO(unicode('\n'.join(f.NewContents()))), file_dir) ++ StringIO(str('\n'.join(f.NewContents()))), file_dir) + + grd_name, ext = input_api.os_path.splitext( + input_api.os_path.basename(file_path)) +--- a/src/3rdparty/chromium/PRESUBMIT_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/PRESUBMIT_test.py 2025-01-16 02:26:08.513347536 +0800 +@@ -669,7 +669,7 @@ + 'win_rel_naclmore', + ], + } +- for master, bots in bots.iteritems(): ++ for master, bots in bots.items(): + for bot in bots: + self.assertEqual(master, PRESUBMIT.GetTryServerMasterForBot(bot), + 'bot=%s: expected %s, computed %s' % ( +@@ -2254,9 +2254,9 @@ + self._mockChangeOwnerAndReviewers( + mock_input_api, 'owner@chromium.org', ['banana@chromium.org']) + result = PRESUBMIT.CheckSecurityChanges(mock_input_api, mock_output_api) +- self.assertEquals(1, len(result)) +- self.assertEquals(result[0].type, 'notify') +- self.assertEquals(result[0].message, ++ self.assertEqual(1, len(result)) ++ self.assertEqual(result[0].type, 'notify') ++ self.assertEqual(result[0].message, + 'The following files change calls to security-sensive functions\n' \ + 'that need to be reviewed by ipc/SECURITY_OWNERS.\n' + ' file.cc\n' +@@ -2273,9 +2273,9 @@ + self._mockChangeOwnerAndReviewers( + mock_input_api, 'owner@chromium.org', ['banana@chromium.org']) + result = PRESUBMIT.CheckSecurityChanges(mock_input_api, mock_output_api) +- self.assertEquals(1, len(result)) +- self.assertEquals(result[0].type, 'error') +- self.assertEquals(result[0].message, ++ self.assertEqual(1, len(result)) ++ self.assertEqual(result[0].type, 'error') ++ self.assertEqual(result[0].message, + 'The following files change calls to security-sensive functions\n' \ + 'that need to be reviewed by ipc/SECURITY_OWNERS.\n' + ' file.cc\n' +@@ -2292,7 +2292,7 @@ + mock_input_api, 'owner@chromium.org', + ['apple@chromium.org', 'banana@chromium.org']) + result = PRESUBMIT.CheckSecurityChanges(mock_input_api, mock_output_api) +- self.assertEquals(0, len(result)) ++ self.assertEqual(0, len(result)) + + def testChangeOwnerIsSecurityOwner(self): + mock_input_api = MockInputApi() +@@ -2304,7 +2304,7 @@ + self._mockChangeOwnerAndReviewers( + mock_input_api, 'orange@chromium.org', ['pear@chromium.org']) + result = PRESUBMIT.CheckSecurityChanges(mock_input_api, mock_output_api) +- self.assertEquals(1, len(result)) ++ self.assertEqual(1, len(result)) + + + class BannedTypeCheckTest(unittest.TestCase): +@@ -2726,8 +2726,8 @@ + MockFile('dir/jumbo.h', ['#include "sphelper.h"']), + ] + results = PRESUBMIT._CheckNoStrCatRedefines(mock_input_api, MockOutputApi()) +- self.assertEquals(1, len(results)) +- self.assertEquals(4, len(results[0].items)) ++ self.assertEqual(1, len(results)) ++ self.assertEqual(4, len(results[0].items)) + self.assertTrue('StrCat' in results[0].message) + self.assertTrue('foo_win.cc' in results[0].items[0]) + self.assertTrue('bar.h' in results[0].items[1]) +@@ -2741,7 +2741,7 @@ + MockFile('dir/baz-win.h', ['#include "base/win/atl.h"']), + ] + results = PRESUBMIT._CheckNoStrCatRedefines(mock_input_api, MockOutputApi()) +- self.assertEquals(0, len(results)) ++ self.assertEqual(0, len(results)) + + def testAllowsToCreateWrapper(self): + mock_input_api = MockInputApi() +@@ -2751,7 +2751,7 @@ + '#include "base/win/windows_defines.inc"']), + ] + results = PRESUBMIT._CheckNoStrCatRedefines(mock_input_api, MockOutputApi()) +- self.assertEquals(0, len(results)) ++ self.assertEqual(0, len(results)) + + + class StringTest(unittest.TestCase): +@@ -3523,7 +3523,7 @@ + def _check(self, files): + mock_input_api = MockInputApi() + mock_input_api.files = [] +- for fname, contents in files.items(): ++ for fname, contents in list(files.items()): + mock_input_api.files.append(MockFile(fname, contents.splitlines())) + return PRESUBMIT.CheckBuildtoolsRevisionsAreInSync(mock_input_api, + MockOutputApi()) +@@ -3560,7 +3560,7 @@ + def _check(self, files): + mock_input_api = MockInputApi() + mock_input_api.files = [] +- for fname, contents in files.items(): ++ for fname, contents in list(files.items()): + mock_input_api.files.append(MockFile(fname, contents.splitlines())) + return PRESUBMIT.CheckFuzzTargetsOnUpload(mock_input_api, MockOutputApi()) + +--- a/src/3rdparty/chromium/PRESUBMIT_test_mocks.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/PRESUBMIT_test_mocks.py 2025-01-16 02:26:08.513347536 +0800 +@@ -126,7 +126,7 @@ + if file_.LocalPath() == filename: + return '\n'.join(file_.NewContents()) + # Otherwise, file is not in our mock API. +- raise IOError, "No such file or directory: '%s'" % filename ++ raise IOError("No such file or directory: '%s'" % filename) + + + class MockOutputApi(object): +--- a/src/3rdparty/chromium/base/third_party/libevent/event_rpcgen.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/base/third_party/libevent/event_rpcgen.py 2025-01-16 02:26:08.513347536 +0800 +@@ -27,18 +27,18 @@ + self._name = name + self._entries = [] + self._tags = {} +- print >>sys.stderr, ' Created struct: %s' % name ++ print(' Created struct: %s' % name, file=sys.stderr) + + def AddEntry(self, entry): +- if self._tags.has_key(entry.Tag()): +- print >>sys.stderr, ( 'Entry "%s" duplicates tag number ' ++ if entry.Tag() in self._tags: ++ print(( 'Entry "%s" duplicates tag number ' + '%d from "%s" around line %d' ) % ( + entry.Name(), entry.Tag(), +- self._tags[entry.Tag()], line_count) ++ self._tags[entry.Tag()], line_count), file=sys.stderr) + sys.exit(1) + self._entries.append(entry) + self._tags[entry.Tag()] = entry.Name() +- print >>sys.stderr, ' Added entry: %s' % entry.Name() ++ print(' Added entry: %s' % entry.Name(), file=sys.stderr) + + def Name(self): + return self._name +@@ -52,24 +52,24 @@ + def PrintIdented(self, file, ident, code): + """Takes an array, add indentation to each entry and prints it.""" + for entry in code: +- print >>file, '%s%s' % (ident, entry) ++ print('%s%s' % (ident, entry), file=file) + + def PrintTags(self, file): + """Prints the tag definitions for a structure.""" +- print >>file, '/* Tag definition for %s */' % self._name +- print >>file, 'enum %s_ {' % self._name.lower() ++ print('/* Tag definition for %s */' % self._name, file=file) ++ print('enum %s_ {' % self._name.lower(), file=file) + for entry in self._entries: +- print >>file, ' %s=%d,' % (self.EntryTagName(entry), +- entry.Tag()) +- print >>file, ' %s_MAX_TAGS' % (self._name.upper()) +- print >>file, '};\n' ++ print(' %s=%d,' % (self.EntryTagName(entry), ++ entry.Tag()), file=file) ++ print(' %s_MAX_TAGS' % (self._name.upper()), file=file) ++ print('};\n', file=file) + + def PrintForwardDeclaration(self, file): +- print >>file, 'struct %s;' % self._name ++ print('struct %s;' % self._name, file=file) + + def PrintDeclaration(self, file): +- print >>file, '/* Structure declaration for %s */' % self._name +- print >>file, 'struct %s_access_ {' % self._name ++ print('/* Structure declaration for %s */' % self._name, file=file) ++ print('struct %s_access_ {' % self._name, file=file) + for entry in self._entries: + dcl = entry.AssignDeclaration('(*%s_assign)' % entry.Name()) + dcl.extend( +@@ -78,20 +78,19 @@ + dcl.extend( + entry.AddDeclaration('(*%s_add)' % entry.Name())) + self.PrintIdented(file, ' ', dcl) +- print >>file, '};\n' ++ print('};\n', file=file) + +- print >>file, 'struct %s {' % self._name +- print >>file, ' struct %s_access_ *base;\n' % self._name ++ print('struct %s {' % self._name, file=file) ++ print(' struct %s_access_ *base;\n' % self._name, file=file) + for entry in self._entries: + dcl = entry.Declaration() + self.PrintIdented(file, ' ', dcl) +- print >>file, '' ++ print('', file=file) + for entry in self._entries: +- print >>file, ' ev_uint8_t %s_set;' % entry.Name() +- print >>file, '};\n' ++ print(' ev_uint8_t %s_set;' % entry.Name(), file=file) ++ print('};\n', file=file) + +- print >>file, \ +-"""struct %(name)s *%(name)s_new(void); ++ print("""struct %(name)s *%(name)s_new(void); + void %(name)s_free(struct %(name)s *); + void %(name)s_clear(struct %(name)s *); + void %(name)s_marshal(struct evbuffer *, const struct %(name)s *); +@@ -100,7 +99,7 @@ + void evtag_marshal_%(name)s(struct evbuffer *, ev_uint32_t, + const struct %(name)s *); + int evtag_unmarshal_%(name)s(struct evbuffer *, ev_uint32_t, +- struct %(name)s *);""" % { 'name' : self._name } ++ struct %(name)s *);""" % { 'name' : self._name }, file=file) + + + # Write a setting function of every variable +@@ -113,22 +112,21 @@ + self.PrintIdented(file, '', entry.AddDeclaration( + entry.AddFuncName())) + +- print >>file, '/* --- %s done --- */\n' % self._name ++ print('/* --- %s done --- */\n' % self._name, file=file) + + def PrintCode(self, file): +- print >>file, ('/*\n' ++ print(('/*\n' + ' * Implementation of %s\n' +- ' */\n') % self._name ++ ' */\n') % self._name, file=file) + +- print >>file, \ +- 'static struct %(name)s_access_ __%(name)s_base = {' % \ +- { 'name' : self._name } ++ print('static struct %(name)s_access_ __%(name)s_base = {' % \ ++ { 'name' : self._name }, file=file) + for entry in self._entries: + self.PrintIdented(file, ' ', entry.CodeBase()) +- print >>file, '};\n' ++ print('};\n', file=file) + + # Creation +- print >>file, ( ++ print(( + 'struct %(name)s *\n' + '%(name)s_new(void)\n' + '{\n' +@@ -137,75 +135,75 @@ + ' event_warn("%%s: malloc", __func__);\n' + ' return (NULL);\n' + ' }\n' +- ' tmp->base = &__%(name)s_base;\n') % { 'name' : self._name } ++ ' tmp->base = &__%(name)s_base;\n') % { 'name' : self._name }, file=file) + + for entry in self._entries: + self.PrintIdented(file, ' ', entry.CodeNew('tmp')) +- print >>file, ' tmp->%s_set = 0;\n' % entry.Name() ++ print(' tmp->%s_set = 0;\n' % entry.Name(), file=file) + +- print >>file, ( ++ print(( + ' return (tmp);\n' +- '}\n') ++ '}\n'), file=file) + + # Adding + for entry in self._entries: + if entry.Array(): + self.PrintIdented(file, '', entry.CodeAdd()) +- print >>file, '' ++ print('', file=file) + + # Assigning + for entry in self._entries: + self.PrintIdented(file, '', entry.CodeAssign()) +- print >>file, '' ++ print('', file=file) + + # Getting + for entry in self._entries: + self.PrintIdented(file, '', entry.CodeGet()) +- print >>file, '' ++ print('', file=file) + + # Clearing +- print >>file, ( 'void\n' ++ print(( 'void\n' + '%(name)s_clear(struct %(name)s *tmp)\n' + '{' +- ) % { 'name' : self._name } ++ ) % { 'name' : self._name }, file=file) + for entry in self._entries: + self.PrintIdented(file, ' ', entry.CodeClear('tmp')) + +- print >>file, '}\n' ++ print('}\n', file=file) + + # Freeing +- print >>file, ( 'void\n' ++ print(( 'void\n' + '%(name)s_free(struct %(name)s *tmp)\n' + '{' +- ) % { 'name' : self._name } ++ ) % { 'name' : self._name }, file=file) + + for entry in self._entries: + self.PrintIdented(file, ' ', entry.CodeFree('tmp')) + +- print >>file, (' free(tmp);\n' +- '}\n') ++ print((' free(tmp);\n' ++ '}\n'), file=file) + + # Marshaling +- print >>file, ('void\n' ++ print(('void\n' + '%(name)s_marshal(struct evbuffer *evbuf, ' + 'const struct %(name)s *tmp)' +- '{') % { 'name' : self._name } ++ '{') % { 'name' : self._name }, file=file) + for entry in self._entries: + indent = ' ' + # Optional entries do not have to be set + if entry.Optional(): + indent += ' ' +- print >>file, ' if (tmp->%s_set) {' % entry.Name() ++ print(' if (tmp->%s_set) {' % entry.Name(), file=file) + self.PrintIdented( + file, indent, + entry.CodeMarshal('evbuf', self.EntryTagName(entry), 'tmp')) + if entry.Optional(): +- print >>file, ' }' ++ print(' }', file=file) + +- print >>file, '}\n' ++ print('}\n', file=file) + + # Unmarshaling +- print >>file, ('int\n' ++ print(('int\n' + '%(name)s_unmarshal(struct %(name)s *tmp, ' + ' struct evbuffer *evbuf)\n' + '{\n' +@@ -214,50 +212,50 @@ + ' if (evtag_peek(evbuf, &tag) == -1)\n' + ' return (-1);\n' + ' switch (tag) {\n' +- ) % { 'name' : self._name } ++ ) % { 'name' : self._name }, file=file) + for entry in self._entries: +- print >>file, ' case %s:\n' % self.EntryTagName(entry) ++ print(' case %s:\n' % self.EntryTagName(entry), file=file) + if not entry.Array(): +- print >>file, ( ++ print(( + ' if (tmp->%s_set)\n' + ' return (-1);' +- ) % (entry.Name()) ++ ) % (entry.Name()), file=file) + + self.PrintIdented( + file, ' ', + entry.CodeUnmarshal('evbuf', + self.EntryTagName(entry), 'tmp')) + +- print >>file, ( ' tmp->%s_set = 1;\n' % entry.Name() + +- ' break;\n' ) +- print >>file, ( ' default:\n' ++ print(( ' tmp->%s_set = 1;\n' % entry.Name() + ++ ' break;\n' ), file=file) ++ print(( ' default:\n' + ' return -1;\n' + ' }\n' +- ' }\n' ) ++ ' }\n' ), file=file) + # Check if it was decoded completely +- print >>file, ( ' if (%(name)s_complete(tmp) == -1)\n' ++ print(( ' if (%(name)s_complete(tmp) == -1)\n' + ' return (-1);' +- ) % { 'name' : self._name } ++ ) % { 'name' : self._name }, file=file) + + # Successfully decoded +- print >>file, ( ' return (0);\n' +- '}\n') ++ print(( ' return (0);\n' ++ '}\n'), file=file) + + # Checking if a structure has all the required data +- print >>file, ( ++ print(( + 'int\n' + '%(name)s_complete(struct %(name)s *msg)\n' +- '{' ) % { 'name' : self._name } ++ '{' ) % { 'name' : self._name }, file=file) + for entry in self._entries: + self.PrintIdented( + file, ' ', + entry.CodeComplete('msg')) +- print >>file, ( ++ print(( + ' return (0);\n' +- '}\n' ) ++ '}\n' ), file=file) + + # Complete message unmarshaling +- print >>file, ( ++ print(( + 'int\n' + 'evtag_unmarshal_%(name)s(struct evbuffer *evbuf, ' + 'ev_uint32_t need_tag, struct %(name)s *msg)\n' +@@ -279,10 +277,10 @@ + ' error:\n' + ' evbuffer_free(tmp);\n' + ' return (res);\n' +- '}\n' ) % { 'name' : self._name } ++ '}\n' ) % { 'name' : self._name }, file=file) + + # Complete message marshaling +- print >>file, ( ++ print(( + 'void\n' + 'evtag_marshal_%(name)s(struct evbuffer *evbuf, ev_uint32_t tag, ' + 'const struct %(name)s *msg)\n' +@@ -294,7 +292,7 @@ + ' evtag_marshal(evbuf, tag, EVBUFFER_DATA(_buf), ' + 'EVBUFFER_LENGTH(_buf));\n' + ' evbuffer_free(_buf);\n' +- '}\n' ) % { 'name' : self._name } ++ '}\n' ) % { 'name' : self._name }, file=file) + + class Entry: + def __init__(self, type, name, tag): +@@ -425,19 +423,19 @@ + + def Verify(self): + if self.Array() and not self._can_be_array: +- print >>sys.stderr, ( ++ print(( + 'Entry "%s" cannot be created as an array ' +- 'around line %d' ) % (self._name, self.LineCount()) ++ 'around line %d' ) % (self._name, self.LineCount()), file=sys.stderr) + sys.exit(1) + if not self._struct: +- print >>sys.stderr, ( ++ print(( + 'Entry "%s" does not know which struct it belongs to ' +- 'around line %d' ) % (self._name, self.LineCount()) ++ 'around line %d' ) % (self._name, self.LineCount()), file=sys.stderr) + sys.exit(1) + if self._optional and self._array: +- print >>sys.stderr, ( 'Entry "%s" has illegal combination of ' ++ print(( 'Entry "%s" has illegal combination of ' + 'optional and array around line %d' ) % ( +- self._name, self.LineCount() ) ++ self._name, self.LineCount() ), file=sys.stderr) + sys.exit(1) + + class EntryBytes(Entry): +@@ -522,8 +520,8 @@ + + def Verify(self): + if not self._length: +- print >>sys.stderr, 'Entry "%s" needs a length around line %d' % ( +- self._name, self.LineCount() ) ++ print('Entry "%s" needs a length around line %d' % ( ++ self._name, self.LineCount() ), file=sys.stderr) + sys.exit(1) + + Entry.Verify(self) +@@ -1089,8 +1087,8 @@ + if not name: + res = re.match(r'^([^\[\]]+)(\[.*\])?$', token) + if not res: +- print >>sys.stderr, 'Cannot parse name: \"%s\" around %d' % ( +- entry, line_count) ++ print('Cannot parse name: \"%s\" around %d' % ( ++ entry, line_count), file=sys.stderr) + sys.exit(1) + name = res.group(1) + fixed_length = res.group(2) +@@ -1101,24 +1099,24 @@ + if not separator: + separator = token + if separator != '=': +- print >>sys.stderr, 'Expected "=" after name \"%s\" got %s' % ( +- name, token) ++ print('Expected "=" after name \"%s\" got %s' % ( ++ name, token), file=sys.stderr) + sys.exit(1) + continue + + if not tag_set: + tag_set = 1 + if not re.match(r'^(0x)?[0-9]+$', token): +- print >>sys.stderr, 'Expected tag number: \"%s\"' % entry ++ print('Expected tag number: \"%s\"' % entry, file=sys.stderr) + sys.exit(1) + tag = int(token, 0) + continue + +- print >>sys.stderr, 'Cannot parse \"%s\"' % entry ++ print('Cannot parse \"%s\"' % entry, file=sys.stderr) + sys.exit(1) + + if not tag_set: +- print >>sys.stderr, 'Need tag number: \"%s\"' % entry ++ print('Need tag number: \"%s\"' % entry, file=sys.stderr) + sys.exit(1) + + # Create the right entry +@@ -1138,7 +1136,7 @@ + # References another struct defined in our file + newentry = EntryStruct(entry_type, name, tag, res.group(1)) + else: +- print >>sys.stderr, 'Bad type: "%s" in "%s"' % (entry_type, entry) ++ print('Bad type: "%s" in "%s"' % (entry_type, entry), file=sys.stderr) + sys.exit(1) + + structs = [] +@@ -1240,8 +1238,8 @@ + + if not re.match(r'^struct %s {$' % _STRUCT_RE, + line, re.IGNORECASE): +- print >>sys.stderr, 'Missing struct on line %d: %s' % ( +- line_count, line) ++ print('Missing struct on line %d: %s' % ( ++ line_count, line), file=sys.stderr) + sys.exit(1) + else: + got_struct = 1 +@@ -1255,8 +1253,8 @@ + continue + + if len(tokens[1]): +- print >>sys.stderr, 'Trailing garbage after struct on line %d' % ( +- line_count ) ++ print('Trailing garbage after struct on line %d' % ( ++ line_count ), file=sys.stderr) + sys.exit(1) + + # We found the end of the struct +@@ -1377,17 +1375,17 @@ + + def main(argv): + if len(argv) < 2 or not argv[1]: +- print >>sys.stderr, 'Need RPC description file as first argument.' ++ print('Need RPC description file as first argument.', file=sys.stderr) + sys.exit(1) + + filename = argv[1] + + ext = filename.split('.')[-1] + if ext != 'rpc': +- print >>sys.stderr, 'Unrecognized file extension: %s' % ext ++ print('Unrecognized file extension: %s' % ext, file=sys.stderr) + sys.exit(1) + +- print >>sys.stderr, 'Reading \"%s\"' % filename ++ print('Reading \"%s\"' % filename, file=sys.stderr) + + fp = open(filename, 'r') + entities = Parse(fp) +@@ -1396,25 +1394,25 @@ + header_file = '.'.join(filename.split('.')[:-1]) + '.gen.h' + impl_file = '.'.join(filename.split('.')[:-1]) + '.gen.c' + +- print >>sys.stderr, '... creating "%s"' % header_file ++ print('... creating "%s"' % header_file, file=sys.stderr) + header_fp = open(header_file, 'w') +- print >>header_fp, HeaderPreamble(filename) ++ print(HeaderPreamble(filename), file=header_fp) + + # Create forward declarations: allows other structs to reference + # each other + for entry in entities: + entry.PrintForwardDeclaration(header_fp) +- print >>header_fp, '' ++ print('', file=header_fp) + + for entry in entities: + entry.PrintTags(header_fp) + entry.PrintDeclaration(header_fp) +- print >>header_fp, HeaderPostamble(filename) ++ print(HeaderPostamble(filename), file=header_fp) + header_fp.close() + +- print >>sys.stderr, '... creating "%s"' % impl_file ++ print('... creating "%s"' % impl_file, file=sys.stderr) + impl_fp = open(impl_file, 'w') +- print >>impl_fp, BodyPreamble(filename) ++ print(BodyPreamble(filename), file=impl_fp) + for entry in entities: + entry.PrintCode(impl_fp) + impl_fp.close() +--- a/src/3rdparty/chromium/base/win/embedded_i18n/create_string_rc.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/base/win/embedded_i18n/create_string_rc.py 2025-01-16 02:26:08.513347536 +0800 +@@ -58,7 +58,7 @@ + # and IDS_L10N_OFFSET_* for the language we are interested in. + # + +-from __future__ import print_function ++ + + import argparse + import glob +@@ -280,7 +280,7 @@ + def __AddModeSpecificStringIds(self): + """Adds the mode-specific strings for all of the current brand's install + modes to self.string_id_set.""" +- for string_id, brands in self.mode_specific_strings.items(): ++ for string_id, brands in list(self.mode_specific_strings.items()): + brand_strings = brands.get(self.brand) + if not brand_strings: + raise RuntimeError( +@@ -358,7 +358,7 @@ + # Manually put the source strings as en-US in the list of translated + # strings. + translated_strings = [] +- for string_id, message_text in source_strings.items(): ++ for string_id, message_text in list(source_strings.items()): + translated_strings.append(self.__TranslationData(string_id, + 'EN_US', + message_text)) +@@ -368,7 +368,7 @@ + # message text; hence the message id is mapped to a list of string ids + # instead of a single value. + translation_ids = {} +- for (string_id, message_text) in source_strings.items(): ++ for (string_id, message_text) in list(source_strings.items()): + message_id = tclib.GenerateMessageId(message_text) + translation_ids.setdefault(message_id, []).append(string_id); + +@@ -383,7 +383,7 @@ + if not xtb_filename in source_xtb_files: + extra_xtb_files.append(xtb_filename) + sax_parser.parse(xtb_filename) +- for string_id, message_text in source_strings.items(): ++ for string_id, message_text in list(source_strings.items()): + translated_string = xtb_handler.translations.get(string_id, + message_text) + translated_strings.append(self.__TranslationData(string_id, +@@ -407,13 +407,13 @@ + """Writes a resource file with the strings provided in |translated_strings|. + """ + HEADER_TEXT = ( +- u'#include "%s"\n\n' +- u'STRINGTABLE\n' +- u'BEGIN\n' ++ '#include "%s"\n\n' ++ 'STRINGTABLE\n' ++ 'BEGIN\n' + ) % os.path.basename(self.header_file) + + FOOTER_TEXT = ( +- u'END\n' ++ 'END\n' + ) + + with io.open(self.rc_file, +@@ -426,7 +426,7 @@ + escaped_text = (translation.translation.replace('"', '""') + .replace('\t', '\\t') + .replace('\n', '\\n')) +- outfile.write(u' %s "%s"\n' % ++ outfile.write(' %s "%s"\n' % + (translation.resource_id_str + '_' + translation.language, + escaped_text)) + outfile.write(FOOTER_TEXT) +@@ -463,7 +463,7 @@ + resource_id += 1 + + # Handle mode-specific strings. +- for string_id, brands in self.mode_specific_strings.items(): ++ for string_id, brands in list(self.mode_specific_strings.items()): + # Populate the DO_MODE_STRINGS macro. + brand_strings = brands.get(self.brand) + if not brand_strings: +@@ -575,7 +575,7 @@ + parser.error('A brand was specified (' + brand + ') but no mode ' + 'specific strings were given.') + valid_brands = [b for b in +- next(iter(mode_specific_strings.values())).keys()] ++ list(next(iter(list(mode_specific_strings.values()))).keys())] + if not brand in valid_brands: + parser.error('A brand was specified (' + brand + ') but it is not ' + 'a valid brand [' + ', '.join(valid_brands) + '].') +@@ -590,7 +590,7 @@ + parser.error('Mismatch in number of grd files ({}) and xtb relative ' + 'paths ({})'.format(len(grd_files), len(xtb_relative_paths))) + +- inputs = zip(grd_files, xtb_relative_paths) ++ inputs = list(zip(grd_files, xtb_relative_paths)) + + StringRcMaker(inputs, args.expected_xtb_input_files, args.header_file, + args.rc_file, brand, args.first_resource_id, string_ids_to_extract, +--- a/src/3rdparty/chromium/build/apply_locales.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/apply_locales.py 2025-01-16 02:26:08.513347536 +0800 +@@ -5,7 +5,7 @@ + + # TODO: remove this script when GYP has for loops + +-from __future__ import print_function ++ + + import sys + import optparse +--- a/src/3rdparty/chromium/build/check_gn_headers.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/check_gn_headers.py 2025-01-16 02:26:08.513347536 +0800 +@@ -9,7 +9,7 @@ + dependency generated by the compiler, and report if they don't exist in GN. + """ + +-from __future__ import print_function ++ + + import argparse + import json +@@ -112,7 +112,7 @@ + """Parse GN output and get the header files""" + all_headers = set() + +- for _target, properties in gn['targets'].iteritems(): ++ for _target, properties in gn['targets'].items(): + sources = properties.get('sources', []) + public = properties.get('public', []) + # Exclude '"public": "*"'. +@@ -294,7 +294,7 @@ + print(' ', cc) + + print('\nMissing headers sorted by number of affected object files:') +- count = {k: len(v) for (k, v) in d.iteritems()} ++ count = {k: len(v) for (k, v) in d.items()} + for f in sorted(count, key=count.get, reverse=True): + if f in missing: + print(count[f], f) +--- a/src/3rdparty/chromium/build/check_gn_headers_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/check_gn_headers_unittest.py 2025-01-16 02:26:08.513347536 +0800 +@@ -71,7 +71,7 @@ + 'dir3/path/b.h': ['obj/c.o'], + 'c3.hh': ['obj/c.o'], + } +- self.assertEquals(headers, expected) ++ self.assertEqual(headers, expected) + + def testGn(self): + headers = check_gn_headers.ParseGNProjectJSON(gn_input, +@@ -83,7 +83,7 @@ + 'base/p.h', + 'out/Release/gen/a.h', + ]) +- self.assertEquals(headers, expected) ++ self.assertEqual(headers, expected) + + def testWhitelist(self): + output = check_gn_headers.ParseWhiteList(whitelist) +@@ -93,7 +93,7 @@ + 'dir/white-both.c', + 'a/b/c', + ]) +- self.assertEquals(output, expected) ++ self.assertEqual(output, expected) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/build/check_return_value.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/check_return_value.py 2025-01-16 02:26:08.513347536 +0800 +@@ -6,7 +6,7 @@ + """This program wraps an arbitrary command and prints "1" if the command ran + successfully.""" + +-from __future__ import print_function ++ + + import os + import subprocess +--- a/src/3rdparty/chromium/build/compute_build_timestamp.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/compute_build_timestamp.py 2025-01-16 02:26:08.513347536 +0800 +@@ -27,7 +27,7 @@ + # the symbol server, so rarely changing timestamps can cause conflicts there + # as well. We only upload symbols for official builds to the symbol server. + +-from __future__ import print_function ++ + + import argparse + import calendar +--- a/src/3rdparty/chromium/build/copy_test_data_ios.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/copy_test_data_ios.py 2025-01-16 02:26:08.513347536 +0800 +@@ -5,7 +5,7 @@ + + """Copies test data files or directories into a given output directory.""" + +-from __future__ import print_function ++ + + import optparse + import os +--- a/src/3rdparty/chromium/build/detect_host_arch.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/detect_host_arch.py 2025-01-16 02:26:08.513347536 +0800 +@@ -5,7 +5,7 @@ + + """Outputs host CPU architecture in format recognized by gyp.""" + +-from __future__ import print_function ++ + + import platform + import re +--- a/src/3rdparty/chromium/build/download_nacl_toolchains.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/download_nacl_toolchains.py 2025-01-16 02:26:08.513347536 +0800 +@@ -5,7 +5,7 @@ + + """Shim to run nacl toolchain download script only if there is a nacl dir.""" + +-from __future__ import print_function ++ + + import os + import shutil +--- a/src/3rdparty/chromium/build/env_dump.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/env_dump.py 2025-01-16 02:26:08.513347536 +0800 +@@ -44,7 +44,7 @@ + + env_diff = {} + new_env = json.loads(output) +- for k, val in new_env.items(): ++ for k, val in list(new_env.items()): + if k == '_' or (k in os.environ and os.environ[k] == val): + continue + env_diff[k] = val +--- a/src/3rdparty/chromium/build/extract_from_cab.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/extract_from_cab.py 2025-01-16 02:26:08.513347536 +0800 +@@ -5,7 +5,7 @@ + + """Extracts a single file from a CAB archive.""" + +-from __future__ import print_function ++ + + import os + import shutil +--- a/src/3rdparty/chromium/build/find_depot_tools.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/find_depot_tools.py 2025-01-16 02:26:08.513347536 +0800 +@@ -11,7 +11,7 @@ + directory location. + """ + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/build/fix_gn_headers.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fix_gn_headers.py 2025-01-16 02:26:08.514430851 +0800 +@@ -10,7 +10,7 @@ + Manual cleaning up is likely required afterwards. + """ + +-from __future__ import print_function ++ + + import argparse + import os +@@ -80,7 +80,7 @@ + if skip_ambiguous: + continue + +- picked = raw_input('Pick the matches ("2,3" for multiple): ') ++ picked = input('Pick the matches ("2,3" for multiple): ') + try: + matches = [matches[int(i) - 1] for i in picked.split(',')] + except (ValueError, IndexError): +@@ -93,7 +93,7 @@ + + for gnfile in edits: + lines = open(gnfile).read().splitlines() +- for l in sorted(edits[gnfile].keys(), reverse=True): ++ for l in sorted(list(edits[gnfile].keys()), reverse=True): + lines.insert(l, edits[gnfile][l]) + open(gnfile, 'w').write('\n'.join(lines) + '\n') + +@@ -173,7 +173,7 @@ + if skip_ambiguous: + continue + +- picked = raw_input('Pick the matches ("2,3" for multiple): ') ++ picked = input('Pick the matches ("2,3" for multiple): ') + try: + matches = [matches[int(i) - 1] for i in picked.split(',')] + except (ValueError, IndexError): +--- a/src/3rdparty/chromium/build/get_landmines.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/get_landmines.py 2025-01-16 02:26:08.514430851 +0800 +@@ -8,7 +8,7 @@ + (or a list of 'landmines'). + """ + +-from __future__ import print_function ++ + + import sys + +--- a/src/3rdparty/chromium/build/get_symlink_targets.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/get_symlink_targets.py 2025-01-16 02:26:08.514430851 +0800 +@@ -19,7 +19,7 @@ + target = os.readlink(link_name) + if not os.path.isabs(target): + target = os.path.join(os.path.dirname(link_name), target) +- print(os.path.realpath(target)) ++ print((os.path.realpath(target))) + return 0 + + +--- a/src/3rdparty/chromium/build/gn_helpers.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/gn_helpers.py 2025-01-16 02:26:08.514430851 +0800 +@@ -67,7 +67,7 @@ + """ + + if sys.version_info.major < 3: +- basestring_compat = basestring ++ basestring_compat = str + else: + basestring_compat = str + +--- a/src/3rdparty/chromium/build/gn_helpers_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/gn_helpers_unittest.py 2025-01-16 02:26:08.514430851 +0800 +@@ -17,7 +17,7 @@ + (False, 'false', 'false'), ('', '""', '""'), + ('\\$"$\\', '"\\\\\\$\\"\\$\\\\"', '"\\\\\\$\\"\\$\\\\"'), + (' \t\r\n', '" $0x09$0x0D$0x0A"', '" $0x09$0x0D$0x0A"'), +- (u'\u2713', '"$0xE2$0x9C$0x93"', '"$0xE2$0x9C$0x93"'), ++ ('\u2713', '"$0xE2$0x9C$0x93"', '"$0xE2$0x9C$0x93"'), + ([], '[ ]', '[]'), ([1], '[ 1 ]', '[\n 1\n]\n'), + ([3, 1, 4, 1], '[ 3, 1, 4, 1 ]', '[\n 3,\n 1,\n 4,\n 1\n]\n'), + (['a', True, 2], '[ "a", true, 2 ]', '[\n "a",\n true,\n 2\n]\n'), +@@ -31,7 +31,7 @@ + '_42A_Zaz_ = [\n false,\n true\n]\nkEy = 137\n'), + ([1, 'two', + ['"thr,.$\\', True, False, [], +- u'(\u2713)']], '[ 1, "two", [ "\\"thr,.\\$\\\\", true, false, ' + ++ '(\u2713)']], '[ 1, "two", [ "\\"thr,.\\$\\\\", true, false, ' + + '[ ], "($0xE2$0x9C$0x93)" ] ]', '''[ + 1, + "two", +--- a/src/3rdparty/chromium/build/gn_run_binary.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/gn_run_binary.py 2025-01-16 02:26:08.514430851 +0800 +@@ -8,7 +8,7 @@ + python gn_run_binary.py [args ...] + """ + +-from __future__ import print_function ++ + + import os + import subprocess +--- a/src/3rdparty/chromium/build/locale_tool.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/locale_tool.py 2025-01-16 02:26:08.514430851 +0800 +@@ -28,7 +28,7 @@ + trying to fix it too, but at least the file will not be modified. + """ + +-from __future__ import print_function ++ + + import argparse + import json +@@ -160,7 +160,7 @@ + """ + result = input_list[:start] + inputs = [] +- for pos in xrange(start, end): ++ for pos in range(start, end): + line = input_list[pos] + key = key_func(line) + inputs.append((key, line)) +@@ -522,8 +522,8 @@ + } + + def test_GetXmlLangAttribute(self): +- for test_line, expected in self.TEST_DATA.iteritems(): +- self.assertEquals(_GetXmlLangAttribute(test_line), expected) ++ for test_line, expected in self.TEST_DATA.items(): ++ self.assertEqual(_GetXmlLangAttribute(test_line), expected) + + + def _SortGrdElementsRanges(grd_lines, element_predicate): +@@ -550,7 +550,7 @@ + """ + errors = [] + locales = set() +- for pos in xrange(start, end): ++ for pos in range(start, end): + line = grd_lines[pos] + lang = _GetXmlLangAttribute(line) + if not lang: +@@ -613,7 +613,7 @@ + List of error message strings for this input. Empty on success. + """ + errors = [] +- for pos in xrange(start, end): ++ for pos in range(start, end): + line = grd_lines[pos] + lang = _GetXmlLangAttribute(line) + if not lang: +@@ -673,7 +673,7 @@ + intervals = _BuildIntervalList(grd_lines, _IsGrdAndroidOutputLine) + for start, end in reversed(intervals): + locales = set() +- for pos in xrange(start, end): ++ for pos in range(start, end): + lang = _GetXmlLangAttribute(grd_lines[pos]) + locale = _FixChromiumLangAttribute(lang) + locales.add(locale) +@@ -685,7 +685,7 @@ + src_locale = 'bg' + src_lang_attribute = 'lang="%s"' % src_locale + src_line = None +- for pos in xrange(start, end): ++ for pos in range(start, end): + if src_lang_attribute in grd_lines[pos]: + src_line = grd_lines[pos] + break +@@ -762,7 +762,7 @@ + List of error message strings for this input. Empty on success. + """ + errors = [] +- for pos in xrange(start, end): ++ for pos in range(start, end): + line = grd_lines[pos] + lang = _GetXmlLangAttribute(line) + if not lang: +@@ -845,7 +845,7 @@ + intervals = _BuildIntervalList(grd_lines, _IsTranslationGrdOutputLine) + for start, end in reversed(intervals): + locales = set() +- for pos in xrange(start, end): ++ for pos in range(start, end): + lang = _GetXmlLangAttribute(grd_lines[pos]) + locale = _FixChromiumLangAttribute(lang) + locales.add(locale) +@@ -857,7 +857,7 @@ + src_locale = 'en-GB' + src_lang_attribute = 'lang="%s"' % src_locale + src_line = None +- for pos in xrange(start, end): ++ for pos in range(start, end): + if src_lang_attribute in grd_lines[pos]: + src_line = grd_lines[pos] + break +@@ -938,7 +938,7 @@ + These are non-localized strings, and should be ignored. This function is + used to detect them quickly. + """ +- for pos in xrange(start, end): ++ for pos in range(start, end): + if not 'values/' in gn_lines[pos]: + return True + return False +@@ -950,7 +950,7 @@ + + errors = [] + locales = set() +- for pos in xrange(start, end): ++ for pos in range(start, end): + line = gn_lines[pos] + android_locale = _GetAndroidGnOutputLocale(line) + assert android_locale != None +@@ -991,7 +991,7 @@ + continue + + locales = set() +- for pos in xrange(start, end): ++ for pos in range(start, end): + lang = _GetAndroidGnOutputLocale(gn_lines[pos]) + locale = resource_utils.ToChromiumLocaleName(lang) + locales.add(locale) +@@ -1003,7 +1003,7 @@ + src_locale = 'bg' + src_values = 'values-%s/' % resource_utils.ToAndroidLocaleName(src_locale) + src_line = None +- for pos in xrange(start, end): ++ for pos in range(start, end): + if src_values in gn_lines[pos]: + src_line = gn_lines[pos] + break +@@ -1310,7 +1310,7 @@ + help='Output as JSON list.') + group.add_argument( + '--type', +- choices=tuple(self.TYPE_MAP.viewkeys()), ++ choices=tuple(self.TYPE_MAP.keys()), + default='all', + help='Select type of locale list to print.') + +--- a/src/3rdparty/chromium/build/mac_toolchain.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/mac_toolchain.py 2025-01-16 02:26:08.514430851 +0800 +@@ -17,7 +17,7 @@ + the full revision, e.g. 9A235. + """ + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/build/print_python_deps.py 2025-01-14 21:29:17.869478871 +0800 ++++ b/src/3rdparty/chromium/build/print_python_deps.py 2025-01-16 02:26:08.514430851 +0800 +@@ -28,7 +28,7 @@ + A path is assumed to be a "system" import if it is outside of chromium's + src/. The paths will be relative to the current directory. + """ +- module_paths = (m.__file__ for m in sys.modules.values() ++ module_paths = (m.__file__ for m in list(sys.modules.values()) + if m and hasattr(m, '__file__')) + + src_paths = set() +--- a/src/3rdparty/chromium/build/protoc_java.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/protoc_java.py 2025-01-16 02:26:08.514430851 +0800 +@@ -15,7 +15,7 @@ + 4. Creates a new stamp file. + """ + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/build/redirect_stdout.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/redirect_stdout.py 2025-01-16 02:26:08.514430851 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import subprocess + import sys +--- a/src/3rdparty/chromium/build/rm.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/rm.py 2025-01-16 02:26:08.514430851 +0800 +@@ -8,7 +8,7 @@ + This module works much like the rm posix command. + """ + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/build/run_swarming_xcode_install.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/run_swarming_xcode_install.py 2025-01-16 02:26:08.514430851 +0800 +@@ -15,7 +15,7 @@ + --isolate-server touch-isolate.appspot.com + """ + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/build/swarming_xcode_install.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/swarming_xcode_install.py 2025-01-16 02:26:08.514430851 +0800 +@@ -7,7 +7,7 @@ + Script used to install Xcode on the swarming bots. + """ + +-from __future__ import print_function ++ + + import os + import shutil +--- a/src/3rdparty/chromium/build/vs_toolchain.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/vs_toolchain.py 2025-01-16 02:26:08.514430851 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import collections + import glob +@@ -120,12 +120,12 @@ + contents of the registry key's value, or None on failure. Throws + ImportError if _winreg is unavailable. + """ +- import _winreg ++ import winreg + try: + root, subkey = key.split('\\', 1) + assert root == 'HKLM' # Only need HKLM for now. +- with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey: +- return _winreg.QueryValueEx(hkey, value)[0] ++ with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, subkey) as hkey: ++ return winreg.QueryValueEx(hkey, value)[0] + except WindowsError: + return None + +@@ -148,7 +148,7 @@ + + # VS installed in system for external developers + supported_versions_str = ', '.join('{} ({})'.format(v,k) +- for k,v in MSVS_VERSIONS.items()) ++ for k,v in list(MSVS_VERSIONS.items())) + available_versions = [] + for version in supported_versions: + # Checking vs%s_install environment variables. +--- a/src/3rdparty/chromium/build/android/adb_command_line.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/adb_command_line.py 2025-01-16 02:26:08.514430851 +0800 +@@ -5,7 +5,7 @@ + + """Utility for reading / writing command-line flag files on device(s).""" + +-from __future__ import print_function ++ + + import argparse + import logging +--- a/src/3rdparty/chromium/build/android/adb_logcat_monitor.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/adb_logcat_monitor.py 2025-01-16 02:26:08.514430851 +0800 +@@ -16,7 +16,7 @@ + early enough to not miss anything. + """ + +-from __future__ import print_function ++ + + import logging + import os +@@ -141,7 +141,7 @@ + except: # pylint: disable=bare-except + logging.exception('Unexpected exception in main.') + finally: +- for process, _ in devices.itervalues(): ++ for process, _ in devices.values(): + if process: + try: + process.terminate() +--- a/src/3rdparty/chromium/build/android/adb_logcat_printer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/adb_logcat_printer.py 2025-01-16 02:26:08.514430851 +0800 +@@ -19,7 +19,7 @@ + """ + # pylint: disable=W0702 + +-import cStringIO ++import io + import logging + import optparse + import os +@@ -108,7 +108,7 @@ + """ + device_logs = [] + +- for device, device_files in log_filenames.iteritems(): ++ for device, device_files in log_filenames.items(): + logger.debug('%s: %s', device, str(device_files)) + device_file_lines = [] + for cur_file in device_files: +@@ -160,7 +160,7 @@ + parser.error('Wrong number of unparsed args') + base_dir = args[0] + +- log_stringio = cStringIO.StringIO() ++ log_stringio = io.StringIO() + logger = logging.getLogger('LogcatPrinter') + logger.setLevel(LOG_LEVEL) + sh = logging.StreamHandler(log_stringio) +--- a/src/3rdparty/chromium/build/android/adb_reverse_forwarder.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/adb_reverse_forwarder.py 2025-01-16 02:26:08.515514166 +0800 +@@ -62,7 +62,7 @@ + if len(args.ports) < 2 or len(args.ports) % 2: + parser.error('Need even number of port pairs') + +- port_pairs = zip(args.ports[::2], args.ports[1::2]) ++ port_pairs = list(zip(args.ports[::2], args.ports[1::2])) + + if args.build_type: + constants.SetBuildType(args.build_type) +--- a/src/3rdparty/chromium/build/android/apk_operations.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/apk_operations.py 2025-01-16 02:26:08.515514166 +0800 +@@ -6,7 +6,7 @@ + # Using colorama.Fore/Back/Style members + # pylint: disable=no-member + +-from __future__ import print_function ++ + + import argparse + import collections +@@ -531,8 +531,8 @@ + compilation_filter) + + def print_sizes(desc, sizes): +- print('%s: %d KiB' % (desc, sum(sizes.itervalues()))) +- for path, size in sorted(sizes.iteritems()): ++ print('%s: %d KiB' % (desc, sum(sizes.values()))) ++ for path, size in sorted(sizes.items()): + print(' %s: %s KiB' % (path, size)) + + parallel_devices = device_utils.DeviceUtils.parallel(devices) +@@ -544,7 +544,7 @@ + + (data_dir_sizes, code_cache_sizes, apk_sizes, lib_sizes, odex_sizes, + compilation_filter) = result +- total = sum(sum(sizes.itervalues()) for sizes in result[:-1]) ++ total = sum(sum(sizes.values()) for sizes in result[:-1]) + + print_sizes('Apk', apk_sizes) + print_sizes('App Data (non-code cache)', data_dir_sizes) +--- a/src/3rdparty/chromium/build/android/asan_symbolize.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/asan_symbolize.py 2025-01-16 02:26:08.515514166 +0800 +@@ -4,7 +4,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import collections + import optparse +@@ -98,7 +98,7 @@ + # Maps library -> { address -> [(symbol, location, obj_sym_with_offset)...] } + all_symbols = collections.defaultdict(dict) + +- for library, items in libraries.iteritems(): ++ for library, items in libraries.items(): + libname = _TranslateLibPath(library, asan_libs) + lib_relative_addrs = set([i.rel_address for i in items]) + # pylint: disable=no-member +--- a/src/3rdparty/chromium/build/android/convert_dex_profile_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/convert_dex_profile_tests.py 2025-01-16 02:26:08.515514166 +0800 +@@ -167,14 +167,14 @@ + dex = cp.ProcessDex(DEX_DUMP.splitlines()) + self.assertIsNotNone(dex['a']) + +- self.assertEquals(len(dex['a'].FindMethodsAtLine('', 311, 313)), 1) +- self.assertEquals(len(dex['a'].FindMethodsAtLine('', 309, 315)), 1) ++ self.assertEqual(len(dex['a'].FindMethodsAtLine('', 311, 313)), 1) ++ self.assertEqual(len(dex['a'].FindMethodsAtLine('', 309, 315)), 1) + clinit = dex['a'].FindMethodsAtLine('', 311, 313)[0] +- self.assertEquals(clinit.name, '') +- self.assertEquals(clinit.return_type, 'V') +- self.assertEquals(clinit.param_types, 'Ljava/lang/String;') ++ self.assertEqual(clinit.name, '') ++ self.assertEqual(clinit.return_type, 'V') ++ self.assertEqual(clinit.param_types, 'Ljava/lang/String;') + +- self.assertEquals(len(dex['a'].FindMethodsAtLine('a', 8, None)), 2) ++ self.assertEqual(len(dex['a'].FindMethodsAtLine('a', 8, None)), 2) + self.assertIsNone(dex['a'].FindMethodsAtLine('a', 100, None)) + + # pylint: disable=protected-access +@@ -183,7 +183,7 @@ + mapping, reverse = cp.ProcessProguardMapping( + PROGUARD_MAPPING.splitlines(), dex) + +- self.assertEquals('La;', reverse.GetClassMapping('Lorg/chromium/Original;')) ++ self.assertEqual('La;', reverse.GetClassMapping('Lorg/chromium/Original;')) + + getInstance = cp.Method( + 'getInstance', 'Lorg/chromium/Original;', '', 'Lorg/chromium/Original;') +@@ -196,7 +196,7 @@ + + mapped = mapping.GetMethodMapping( + cp.Method('a', 'La;', 'Ljava/lang/String;', 'I')) +- self.assertEquals(len(mapped), 2) ++ self.assertEqual(len(mapped), 2) + self.assertIn(getInstance, mapped) + self.assertNotIn(subclassInit, mapped) + self.assertNotIn( +@@ -205,18 +205,18 @@ + + mapped = mapping.GetMethodMapping( + cp.Method('a', 'La;', 'Ljava/lang/Object;', 'I')) +- self.assertEquals(len(mapped), 1) ++ self.assertEqual(len(mapped), 1) + self.assertIn(getInstance, mapped) + + mapped = mapping.GetMethodMapping(cp.Method('b', 'La;', '', 'La;')) +- self.assertEquals(len(mapped), 1) ++ self.assertEqual(len(mapped), 1) + self.assertIn(another, mapped) + +- for from_method, to_methods in mapping._method_mapping.iteritems(): ++ for from_method, to_methods in mapping._method_mapping.items(): + for to_method in to_methods: + self.assertIn(from_method, reverse.GetMethodMapping(to_method)) +- for from_class, to_class in mapping._class_mapping.iteritems(): +- self.assertEquals(from_class, reverse.GetClassMapping(to_class)) ++ for from_class, to_class in mapping._class_mapping.items(): ++ self.assertEqual(from_class, reverse.GetClassMapping(to_class)) + + def testProcessProfile(self): + dex = cp.ProcessDex(DEX_DUMP.splitlines()) +@@ -234,9 +234,9 @@ + self.assertIn(initialize, profile._methods) + self.assertIn(another, profile._methods) + +- self.assertEquals(profile._methods[getInstance], set(['H', 'S', 'P'])) +- self.assertEquals(profile._methods[initialize], set(['H', 'P'])) +- self.assertEquals(profile._methods[another], set(['P'])) ++ self.assertEqual(profile._methods[getInstance], set(['H', 'S', 'P'])) ++ self.assertEqual(profile._methods[initialize], set(['H', 'P'])) ++ self.assertEqual(profile._methods[another], set(['P'])) + + def testEndToEnd(self): + dex = cp.ProcessDex(DEX_DUMP.splitlines()) +@@ -247,7 +247,7 @@ + profile.WriteToFile(temp.name) + with open(temp.name, 'r') as f: + for a, b in zip(sorted(f), sorted(UNOBFUSCATED_PROFILE.splitlines())): +- self.assertEquals(a.strip(), b.strip()) ++ self.assertEqual(a.strip(), b.strip()) + + def testObfuscateProfile(self): + with build_utils.TempDir() as temp_dir: +@@ -269,7 +269,7 @@ + obfuscated_profile = sorted(obfuscated_file.readlines()) + for a, b in zip( + sorted(OBFUSCATED_PROFILE_2.splitlines()), obfuscated_profile): +- self.assertEquals(a.strip(), b.strip()) ++ self.assertEqual(a.strip(), b.strip()) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/build/android/devil_chromium.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/devil_chromium.py 2025-01-16 02:26:08.515514166 +0800 +@@ -152,7 +152,7 @@ + for dep_config in dep_configs + } + } +- for dep_name, dep_configs in _DEVIL_BUILD_PRODUCT_DEPS.iteritems() ++ for dep_name, dep_configs in _DEVIL_BUILD_PRODUCT_DEPS.items() + } + if custom_deps: + devil_dynamic_config['dependencies'].update(custom_deps) +--- a/src/3rdparty/chromium/build/android/diff_resource_sizes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/diff_resource_sizes.py 2025-01-16 02:26:08.515514166 +0800 +@@ -5,7 +5,7 @@ + + """Runs resource_sizes.py on two apks and outputs the diff.""" + +-from __future__ import print_function ++ + + import argparse + import json +@@ -49,8 +49,8 @@ + base_results: The chartjson-formatted size results of the base APK. + diff_results: The chartjson-formatted size results of the diff APK. + """ +- for graph_title, graph in base_results['charts'].iteritems(): +- for trace_title, trace in graph.iteritems(): ++ for graph_title, graph in base_results['charts'].items(): ++ for trace_title, trace in graph.items(): + perf_tests_results_helper.ReportPerfResult( + chartjson, graph_title, trace_title, + diff_results['charts'][graph_title][trace_title]['value'] +@@ -67,8 +67,8 @@ + base_results: The chartjson-formatted size results of the base APK. + diff_results: The chartjson-formatted size results of the diff APK. + """ +- for graph_title, graph in base_results['charts'].iteritems(): +- for trace_title, trace in graph.iteritems(): ++ for graph_title, graph in base_results['charts'].items(): ++ for trace_title, trace in graph.items(): + perf_tests_results_helper.ReportPerfResult( + chartjson, graph_title + '_base_apk', trace_title, + trace['value'], trace['units'], trace['improvement_direction'], +@@ -76,8 +76,8 @@ + + # Both base_results and diff_results should have the same charts/traces, but + # loop over them separately in case they don't +- for graph_title, graph in diff_results['charts'].iteritems(): +- for trace_title, trace in graph.iteritems(): ++ for graph_title, graph in diff_results['charts'].items(): ++ for trace_title, trace in graph.items(): + perf_tests_results_helper.ReportPerfResult( + chartjson, graph_title + '_diff_apk', trace_title, + trace['value'], trace['units'], trace['improvement_direction'], +--- a/src/3rdparty/chromium/build/android/dump_apk_resource_strings.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/dump_apk_resource_strings.py 2025-01-16 02:26:08.515514166 +0800 +@@ -6,7 +6,7 @@ + + """A script to parse and dump localized strings in resource.arsc files.""" + +-from __future__ import print_function ++ + + import argparse + import collections +@@ -220,7 +220,7 @@ + def ToStringList(self, res_id): + """Convert entry to string list for human-friendly output.""" + values = sorted( +- [(str(config), value) for config, value in self.res_values.iteritems()]) ++ [(str(config), value) for config, value in self.res_values.items()]) + if res_id is None: + # res_id will be None when the resource ID should not be part + # of the output. +@@ -256,7 +256,7 @@ + + def RemapResourceNames(self, id_name_map): + """Rename all entries according to a given {res_id -> res_name} map.""" +- for res_id, res_name in id_name_map.iteritems(): ++ for res_id, res_name in id_name_map.items(): + if res_id in self._res_map: + self._res_map[res_id].res_name = res_name + +@@ -286,7 +286,7 @@ + result = cmp(a[0], b[0]) + return result + +- for res_id, _ in sorted(res_map.iteritems(), cmp=cmp_id_name): ++ for res_id, _ in sorted(iter(res_map.items()), cmp=cmp_id_name): + result += res_map[res_id].ToStringList(None if omit_ids else res_id) + result.append('} # Resource strings') + return result +@@ -386,7 +386,7 @@ + _RE_BUNDLE_STRING_LOCALIZED_VALUE = re.compile( + r'^\s+locale: "([0-9a-zA-Z-]+)" - \[STR\] "(.*)"$') + assert _RE_BUNDLE_STRING_LOCALIZED_VALUE.match( +- u' locale: "ar" - [STR] "گزینه\u200cهای بیشتر"'.encode('utf-8')) ++ ' locale: "ar" - [STR] "گزینه\u200cهای بیشتر"'.encode('utf-8')) + + + def ParseBundleResources(bundle_tool_jar_path, bundle_path): +--- a/src/3rdparty/chromium/build/android/emma_coverage_stats.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/emma_coverage_stats.py 2025-01-16 02:26:08.515514166 +0800 +@@ -183,7 +183,7 @@ + } + + package_to_emma = {} +- for package_emma_file_path, package_name in package_links.iteritems(): ++ for package_emma_file_path, package_name in package_links.items(): + # These elements contain each class name in the current package and + # the path of the file where the coverage info is stored for each class. + coverage_file_link_elements = self._FindElements( +@@ -257,7 +257,7 @@ + |lines_for_coverage|. + """ + file_coverage = {} +- for file_path, line_numbers in lines_for_coverage.iteritems(): ++ for file_path, line_numbers in lines_for_coverage.items(): + file_coverage_dict = self.GetCoverageDictForFile(file_path, line_numbers) + if file_coverage_dict: + file_coverage[file_path] = file_coverage_dict +@@ -265,7 +265,7 @@ + logging.warning( + 'No code coverage data for %s, skipping.', file_path) + +- covered_statuses = [s['incremental'] for s in file_coverage.itervalues()] ++ covered_statuses = [s['incremental'] for s in file_coverage.values()] + num_covered_lines = sum(s['covered'] for s in covered_statuses) + num_total_lines = sum(s['total'] for s in covered_statuses) + return { +@@ -382,7 +382,7 @@ + # Finally, we have a dict mapping Java file paths to EMMA report files. + # Example: /usr/code/file.java -> out/coverage/1a.html. + source_to_emma = {source: package_to_emma[package] +- for source, package in source_to_package.iteritems() ++ for source, package in source_to_package.items() + if package in package_to_emma} + return source_to_emma + +@@ -442,12 +442,12 @@ + potential_files_for_coverage = json.load(f) + + files_for_coverage = {f: lines +- for f, lines in potential_files_for_coverage.iteritems() ++ for f, lines in potential_files_for_coverage.items() + if _EmmaCoverageStats.NeedsCoverage(f)} + + coverage_results = {} + if files_for_coverage: +- code_coverage = _EmmaCoverageStats(coverage_dir, files_for_coverage.keys()) ++ code_coverage = _EmmaCoverageStats(coverage_dir, list(files_for_coverage.keys())) + coverage_results = code_coverage.GetCoverageDict(files_for_coverage) + else: + logging.info('No Java files requiring coverage were included in %s.', +--- a/src/3rdparty/chromium/build/android/emma_coverage_stats_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/emma_coverage_stats_test.py 2025-01-16 02:26:08.515514166 +0800 +@@ -182,7 +182,7 @@ + read_values = ['
' + multiple_trs + '
'] + found, _ = MockOpenForFunction(self.parser._FindElements, read_values, + file_path='fake', xpath_selector='.//TR') +- self.assertEquals(2, len(found)) ++ self.assertEqual(2, len(found)) + + def testFindElements_noMatch(self): + read_values = [self.simple_html] +@@ -377,7 +377,7 @@ + return_value=package_to_emma) + coverage_stats.GetPackageNameFromFile = lambda x: package_names[x] + result_dict = coverage_stats._GetSourceFileToEmmaFileDict( +- package_names.keys()) ++ list(package_names.keys())) + self.assertDictEqual(result_dict, self.good_source_to_emma) + + def testGetCoverageDictForFile(self): +--- a/src/3rdparty/chromium/build/android/generate_jacoco_report.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/generate_jacoco_report.py 2025-01-16 02:26:08.516597481 +0800 +@@ -6,7 +6,7 @@ + + """Aggregates Jacoco coverage files to produce output.""" + +-from __future__ import print_function ++ + + import argparse + import fnmatch +--- a/src/3rdparty/chromium/build/android/lighttpd_server.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/lighttpd_server.py 2025-01-16 02:26:08.516597481 +0800 +@@ -10,11 +10,11 @@ + lighttpd_server PATH_TO_DOC_ROOT + """ + +-from __future__ import print_function ++ + + import codecs + import contextlib +-import httplib ++import http.client + import os + import random + import shutil +@@ -122,10 +122,10 @@ + def _TestServerConnection(self): + # Wait for server to start + server_msg = '' +- for timeout in xrange(1, 5): ++ for timeout in range(1, 5): + client_error = None + try: +- with contextlib.closing(httplib.HTTPConnection( ++ with contextlib.closing(http.client.HTTPConnection( + '127.0.0.1', self.port, timeout=timeout)) as http: + http.set_debuglevel(timeout > 3) + http.request('HEAD', '/') +@@ -137,7 +137,7 @@ + client_error = ('Bad response: %s %s version %s\n ' % + (r.status, r.reason, r.version) + + '\n '.join([': '.join(h) for h in r.getheaders()])) +- except (httplib.HTTPException, socket.error) as client_error: ++ except (http.client.HTTPException, socket.error) as client_error: + pass # Probably too quick connecting: try again + # Check for server startup error messages + # pylint: disable=no-member +@@ -248,7 +248,7 @@ + server = LighttpdServer(*argv[1:]) + try: + if server.StartupHttpServer(): +- raw_input('Server running at http://127.0.0.1:%s -' ++ input('Server running at http://127.0.0.1:%s -' + ' press Enter to exit it.' % server.port) + else: + print('Server exit code:', server.process.exitstatus) +--- a/src/3rdparty/chromium/build/android/list_class_verification_failures.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/list_class_verification_failures.py 2025-01-16 02:26:08.516597481 +0800 +@@ -9,7 +9,7 @@ + and accommodating API-level-specific details, such as file paths. + """ + +-from __future__ import print_function ++ + + import argparse + import exceptions +--- a/src/3rdparty/chromium/build/android/method_count.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/method_count.py 2025-01-16 02:26:08.516597481 +0800 +@@ -27,10 +27,10 @@ + + def _ExtractSizesFromDexFile(dexfile): + count_by_item = {} +- for item_name, readable_name in _CONTRIBUTORS_TO_DEX_CACHE.iteritems(): ++ for item_name, readable_name in _CONTRIBUTORS_TO_DEX_CACHE.items(): + count_by_item[readable_name] = getattr(dexfile.header, item_name) + return count_by_item, sum( +- count_by_item[x] for x in _CONTRIBUTORS_TO_DEX_CACHE.itervalues()) * 4 ++ count_by_item[x] for x in _CONTRIBUTORS_TO_DEX_CACHE.values()) * 4 + + + def ExtractSizesFromZip(path): +@@ -44,11 +44,11 @@ + dexfile_name = os.path.basename(subpath) + dexfiles[dexfile_name] = dex_parser.DexFile(bytearray(z.read(subpath))) + +- for dexfile_name, dexfile in dexfiles.iteritems(): ++ for dexfile_name, dexfile in dexfiles.items(): + cur_dex_counts, cur_dexcache_size = _ExtractSizesFromDexFile(dexfile) + dex_counts_by_file[dexfile_name] = cur_dex_counts + dexcache_size += cur_dexcache_size +- num_unique_methods = dex_parser.CountUniqueDexMethods(dexfiles.values()) ++ num_unique_methods = dex_parser.CountUniqueDexMethods(list(dexfiles.values())) + return dex_counts_by_file, dexcache_size, num_unique_methods + + +@@ -68,8 +68,8 @@ + num_unique_methods = single_set_of_sizes['methods'] + + file_basename = os.path.basename(args.filename) +- for classes_dex_file, classes_dex_sizes in sizes.iteritems(): +- for readable_name in _CONTRIBUTORS_TO_DEX_CACHE.itervalues(): ++ for classes_dex_file, classes_dex_sizes in sizes.items(): ++ for readable_name in _CONTRIBUTORS_TO_DEX_CACHE.values(): + if readable_name in classes_dex_sizes: + perf_tests_results_helper.PrintPerfResult( + '%s_%s_%s' % (file_basename, classes_dex_file, readable_name), +--- a/src/3rdparty/chromium/build/android/resource_sizes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/resource_sizes.py 2025-01-16 02:26:08.516597481 +0800 +@@ -8,7 +8,7 @@ + More information at //docs/speed/binary_size/metrics.md. + """ + +-from __future__ import print_function ++ + + import argparse + import collections +@@ -140,17 +140,17 @@ + grouped_section_sizes = collections.defaultdict(int) + no_bits_section_sizes, section_sizes = _CreateSectionNameSizeMap( + extracted_lib_path, tool_prefix) +- for group_name, section_names in _READELF_SIZES_METRICS.iteritems(): ++ for group_name, section_names in _READELF_SIZES_METRICS.items(): + for section_name in section_names: + if section_name in section_sizes: + grouped_section_sizes[group_name] += section_sizes.pop(section_name) + + # Consider all NOBITS sections as .bss. + grouped_section_sizes['bss'] = sum( +- v for v in no_bits_section_sizes.itervalues()) ++ v for v in no_bits_section_sizes.values()) + + # Group any unknown section headers into the "other" group. +- for section_header, section_size in section_sizes.iteritems(): ++ for section_header, section_size in section_sizes.items(): + sys.stderr.write('Unknown elf section header: %s\n' % section_header) + grouped_section_sizes['other'] += section_size + +@@ -222,7 +222,7 @@ + config_count = num_translations - 2 + + size = 0 +- for res_id, string_val in en_strings.iteritems(): ++ for res_id, string_val in en_strings.items(): + if string_val == fr_strings[res_id]: + string_size = len(string_val) + # 7 bytes is the per-entry overhead (not specific to any string). See +@@ -263,7 +263,7 @@ + if module_name != 'base' and config_name[:-4] in ('master', 'hi'): + sizes[module_name] += info.file_size + +- for module_name, size in sorted(sizes.iteritems()): ++ for module_name, size in sorted(sizes.items()): + report_func('DFM_' + module_name, 'Size with hindi', size, 'bytes') + + +@@ -489,7 +489,7 @@ + section_sizes = _ExtractLibSectionSizesFromApk( + apk_filename, lib_info.filename, tool_prefix) + native_code_unaligned_size += sum( +- v for k, v in section_sizes.iteritems() if k != 'bss') ++ v for k, v in section_sizes.items() if k != 'bss') + # Size of main .so vs remaining. + if lib_info == main_lib_info: + main_lib_size = lib_info.file_size +@@ -497,7 +497,7 @@ + secondary_size = native_code.ComputeUncompressedSize() - main_lib_size + report_func('Specifics', 'other lib size', secondary_size, 'bytes') + +- for metric_name, size in section_sizes.iteritems(): ++ for metric_name, size in section_sizes.items(): + report_func('MainLibInfo', metric_name, size, 'bytes') + + # Main metric that we want to monitor for jumps. +@@ -598,10 +598,10 @@ + sizes, total_size, num_unique_methods = method_count.ExtractSizesFromZip( + apk_filename) + cumulative_sizes = collections.defaultdict(int) +- for classes_dex_sizes in sizes.itervalues(): +- for count_type, count in classes_dex_sizes.iteritems(): ++ for classes_dex_sizes in sizes.values(): ++ for count_type, count in classes_dex_sizes.items(): + cumulative_sizes[count_type] += count +- for count_type, count in cumulative_sizes.iteritems(): ++ for count_type, count in cumulative_sizes.items(): + report_func('Dex', count_type, count, 'entries') + + report_func('Dex', 'unique methods', num_unique_methods, 'entries') +@@ -675,7 +675,7 @@ + value, units) + + def SynthesizeTotals(self): +- for tup, value in sorted(self._combined_metrics.iteritems()): ++ for tup, value in sorted(self._combined_metrics.items()): + graph_title, trace_title, units = tup + perf_tests_results_helper.ReportPerfResult( + self._chartjson, graph_title, 'Combined_' + trace_title, value, units) +--- a/src/3rdparty/chromium/build/android/test_runner.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/test_runner.py 2025-01-16 02:26:08.516597481 +0800 +@@ -694,7 +694,7 @@ + + parser.add_argument( + '--browser', +- required=True, choices=constants.PACKAGE_INFO.keys(), ++ required=True, choices=list(constants.PACKAGE_INFO.keys()), + metavar='BROWSER', help='Browser under test.') + parser.add_argument( + '--category', +@@ -723,7 +723,7 @@ + parser.add_argument( + '-s', '--suite', + dest='suite_name', metavar='SUITE_NAME', +- choices=constants.PYTHON_UNIT_TEST_SUITES.keys(), ++ choices=list(constants.PYTHON_UNIT_TEST_SUITES.keys()), + help='Name of the test suite to run.') + + +@@ -907,7 +907,7 @@ + with out_manager, json_finalizer(): + with json_writer(), logcats_uploader, env, test_instance, test_run: + +- repetitions = (xrange(args.repeat + 1) if args.repeat >= 0 ++ repetitions = (range(args.repeat + 1) if args.repeat >= 0 + else itertools.count()) + result_counts = collections.defaultdict( + lambda: collections.defaultdict(int)) +--- a/src/3rdparty/chromium/build/android/update_verification.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/update_verification.py 2025-01-16 02:26:08.516597481 +0800 +@@ -38,7 +38,7 @@ + + def CreateAppData(device, old_apk, app_data, package_name): + device.Install(old_apk) +- raw_input('Set the application state. Once ready, press enter and ' ++ input('Set the application state. Once ready, press enter and ' + 'select "Backup my data" on the device.') + device.adb.Backup(app_data, packages=[package_name]) + logging.critical('Application data saved to %s', app_data) +@@ -47,7 +47,7 @@ + device.Install(old_apk) + device.adb.Restore(app_data) + # Restore command is not synchronous +- raw_input('Select "Restore my data" on the device. Then press enter to ' ++ input('Select "Restore my data" on the device. Then press enter to ' + 'continue.') + if not device.IsApplicationInstalled(package_name): + raise Exception('Expected package %s to already be installed. ' +--- a/src/3rdparty/chromium/build/android/gradle/generate_gradle.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gradle/generate_gradle.py 2025-01-16 02:26:08.516597481 +0800 +@@ -83,7 +83,7 @@ + """ + if path_or_list is None: + return [] +- if not isinstance(path_or_list, basestring): ++ if not isinstance(path_or_list, str): + return [_RebasePath(p, new_cwd, old_cwd) for p in path_or_list] + if old_cwd is None: + old_cwd = constants.GetOutDirectory() +@@ -435,10 +435,10 @@ + if java_files: + java_files = _RebasePath(java_files) + computed_dirs = _ComputeJavaSourceDirs(java_files) +- java_dirs = computed_dirs.keys() ++ java_dirs = list(computed_dirs.keys()) + all_found_java_files = set() + +- for directory, files in computed_dirs.iteritems(): ++ for directory, files in computed_dirs.items(): + found_java_files = build_utils.FindInDirectory(directory, '*.java') + all_found_java_files.update(found_java_files) + unwanted_java_files = set(found_java_files) - set(files) +@@ -570,7 +570,7 @@ + test_entry = generator.Generate(e) + test_entry['android_manifest'] = generator.GenerateManifest(e) + variables['android_test'].append(test_entry) +- for key, value in test_entry.iteritems(): ++ for key, value in test_entry.items(): + if isinstance(value, list): + test_entry[key] = sorted(set(value) - set(variables['main'][key])) + +@@ -725,7 +725,7 @@ + entry.android_test_entries = android_test_entries[target_name] + del android_test_entries[target_name] + # Add unmatched test entries as individual targets. +- combined_entries.extend(e for l in android_test_entries.values() for e in l) ++ combined_entries.extend(e for l in list(android_test_entries.values()) for e in l) + return combined_entries + + +--- a/src/3rdparty/chromium/build/android/gradle/gn_to_cmake.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gradle/gn_to_cmake.py 2025-01-16 02:26:08.516597481 +0800 +@@ -16,7 +16,7 @@ + The first is recommended, as it will auto-update. + """ + +-from __future__ import print_function ++ + + import functools + import json +@@ -267,7 +267,7 @@ + out.write('\n') + + out.write(' DEPENDS ') +- for sources_type_name in sources.values(): ++ for sources_type_name in list(sources.values()): + WriteVariable(out, sources_type_name, ' ') + out.write('\n') + +@@ -384,7 +384,7 @@ + out.write('"\n') + + out.write(' DEPENDS ') +- for sources_type_name in sources.values(): ++ for sources_type_name in list(sources.values()): + WriteVariable(out, sources_type_name, ' ') + out.write('\n') + +@@ -502,7 +502,7 @@ + source_types['obj_target'].append(obj_target_sources) + + sources = {} +- for source_type, sources_of_type in source_types.items(): ++ for source_type, sources_of_type in list(source_types.items()): + if sources_of_type: + sources[source_type] = '${target}__' + source_type + '_srcs' + SetVariableList(out, sources[source_type], sources_of_type) +@@ -536,7 +536,7 @@ + if target.cmake_type.modifier is not None: + out.write(' ') + out.write(target.cmake_type.modifier) +- for sources_type_name in sources.values(): ++ for sources_type_name in list(sources.values()): + WriteVariable(out, sources_type_name, ' ') + if synthetic_dependencies: + out.write(' DEPENDS') +@@ -667,7 +667,7 @@ + out.write(' configure_file(${gn_dep} "CMakeLists.devnull" COPYONLY)\n') + out.write('endforeach("gn_dep")\n') + +- for target_name in project.targets.keys(): ++ for target_name in list(project.targets.keys()): + out.write('\n') + WriteTarget(out, Target(target_name, project), project) + +--- a/src/3rdparty/chromium/build/android/gyp/allot_native_libraries.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/allot_native_libraries.py 2025-01-16 02:26:08.516597481 +0800 +@@ -114,7 +114,7 @@ + Exception if some libraries can only be allotted to the None root. + """ + allotment_map = collections.defaultdict(set) +- for library, modules in libraries_map.items(): ++ for library, modules in list(libraries_map.items()): + ancestor = _ClosestCommonAncestor(module_tree, modules) + if not ancestor: + raise Exception('Cannot allot libraries for given dependency tree') +@@ -175,7 +175,7 @@ + with open(options.output, 'w') as f: + # Write native libraries config and ensure the output is deterministic. + json.dump({m: sorted(l) +- for m, l in allotment_map.items()}, ++ for m, l in list(allotment_map.items())}, + f, + sort_keys=True, + indent=2) +--- a/src/3rdparty/chromium/build/android/gyp/assert_static_initializers.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/assert_static_initializers.py 2025-01-16 02:26:08.516597481 +0800 +@@ -5,7 +5,7 @@ + + """Checks the number of static initializers in an APK's library.""" + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/build/android/gyp/compile_java.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/compile_java.py 2025-01-16 02:26:08.516597481 +0800 +@@ -233,7 +233,7 @@ + line = Colorize(line, marker_re, marker_color) + return line + +- return '\n'.join(map(ApplyColors, filter(ApplyFilters, output.split('\n')))) ++ return '\n'.join(map(ApplyColors, list(filter(ApplyFilters, output.split('\n'))))) + + + def CheckErrorproneStderrWarning(jar_path, expected_warning_regex, +--- a/src/3rdparty/chromium/build/android/gyp/copy_ex.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/copy_ex.py 2025-01-16 02:26:08.516597481 +0800 +@@ -6,7 +6,7 @@ + + """Copies files to a directory.""" + +-from __future__ import print_function ++ + + import filecmp + import itertools +@@ -74,7 +74,7 @@ + print('Renaming source and destination files not match.') + sys.exit(-1) + +- for src, dest in itertools.izip(src_files, dest_files): ++ for src, dest in zip(src_files, dest_files): + if os.path.isdir(src): + print('renaming diretory is not supported.') + sys.exit(-1) +--- a/src/3rdparty/chromium/build/android/gyp/create_apk_operations_script.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/create_apk_operations_script.py 2025-01-16 02:26:08.516597481 +0800 +@@ -87,7 +87,7 @@ + 'TARGET_CPU': repr(args.target_cpu), + } + script.write(SCRIPT_TEMPLATE.substitute(script_dict)) +- os.chmod(args.script_output_path, 0750) ++ os.chmod(args.script_output_path, 0o750) + return 0 + + +--- a/src/3rdparty/chromium/build/android/gyp/create_app_bundle.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/create_app_bundle.py 2025-01-16 02:26:08.517680796 +0800 +@@ -331,7 +331,7 @@ + """ + ids_map = resource_utils.GenerateStringResourcesAllowList( + base_module_rtxt_path, base_allowlist_rtxt_path) +- return ids_map.keys() ++ return list(ids_map.keys()) + + + def _ConcatTextFiles(in_paths, out_path): +@@ -386,7 +386,7 @@ + if not os.path.exists(module_pathmap_path): + continue + module_pathmap = _LoadPathmap(module_pathmap_path) +- for short_path, long_path in module_pathmap.iteritems(): ++ for short_path, long_path in module_pathmap.items(): + rebased_long_path = '{}/{}'.format(module_name, long_path) + rebased_short_path = '{}/{}'.format(module_name, short_path) + line = '{} -> {}\n'.format(rebased_long_path, rebased_short_path) +@@ -432,11 +432,11 @@ + classes = set() + base_package_name = manifest_utils.GetPackage(base_manifest) + for package in dexdump.Dump(base_zip): +- for name, package_dict in package.items(): ++ for name, package_dict in list(package.items()): + if not name: + name = base_package_name + classes.update('%s.%s' % (name, c) +- for c in package_dict['classes'].keys()) ++ for c in list(package_dict['classes'].keys())) + + # Ensure all services are present in base module. + for service_name in service_names: +--- a/src/3rdparty/chromium/build/android/gyp/create_bundle_wrapper_script.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/create_bundle_wrapper_script.py 2025-01-16 02:26:08.517680796 +0800 +@@ -119,7 +119,7 @@ + repr(args.default_modules), + } + script.write(SCRIPT_TEMPLATE.substitute(script_dict)) +- os.chmod(args.script_output_path, 0750) ++ os.chmod(args.script_output_path, 0o750) + return 0 + + +--- a/src/3rdparty/chromium/build/android/gyp/create_java_binary_script.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/create_java_binary_script.py 2025-01-16 02:26:08.517680796 +0800 +@@ -103,7 +103,7 @@ + extra_program_args=repr(extra_program_args), + noverify_flag=noverify_flag)) + +- os.chmod(options.output, 0750) ++ os.chmod(options.output, 0o750) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/build/android/gyp/dex.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/dex.py 2025-01-16 02:26:08.517680796 +0800 +@@ -290,7 +290,7 @@ + if not ordered_files: + raise Exception('Could not find classes.dex multidex file in %s', + dex_files) +- for dex_idx in xrange(2, len(dex_files) + 1): ++ for dex_idx in range(2, len(dex_files) + 1): + archive_name = 'classes%d.dex' % dex_idx + for f in dex_files: + if f.endswith(archive_name): +--- a/src/3rdparty/chromium/build/android/gyp/dexsplitter.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/dexsplitter.py 2025-01-16 02:26:08.517680796 +0800 +@@ -81,7 +81,7 @@ + options = _ParseOptions(args) + + input_paths = [options.input_dex_zip] +- for feature_jars in options.features.itervalues(): ++ for feature_jars in options.features.values(): + for feature_jar in feature_jars: + input_paths.append(feature_jar) + +--- a/src/3rdparty/chromium/build/android/gyp/extract_unwind_tables.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/extract_unwind_tables.py 2025-01-16 02:26:08.517680796 +0800 +@@ -197,7 +197,7 @@ + # Store mapping between the functions to the index. + func_addr_to_index = {} + previous_func_end = 0 +- for addr, function in sorted(cfi_data.iteritems()): ++ for addr, function in sorted(cfi_data.items()): + # Add an empty function entry when functions CFIs are missing between 2 + # functions. + if previous_func_end != 0 and addr - previous_func_end > 4: +@@ -243,7 +243,7 @@ + _Write4Bytes(out_file, len(func_addr_to_index)) + + # Write the UNW_INDEX table. First list of addresses and then indices. +- sorted_unw_index = sorted(func_addr_to_index.iteritems()) ++ sorted_unw_index = sorted(func_addr_to_index.items()) + for addr, index in sorted_unw_index: + _Write4Bytes(out_file, addr) + for addr, index in sorted_unw_index: +--- a/src/3rdparty/chromium/build/android/gyp/extract_unwind_tables_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/extract_unwind_tables_tests.py 2025-01-16 02:26:08.517680796 +0800 +@@ -109,7 +109,7 @@ + + func_start = index + 1 + func_end = func_start + unw_data[index] * 2 +- self.assertEquals( ++ self.assertEqual( + len(expected_cfi_data[func_addr]), func_end - func_start) + func_cfi = unw_data[func_start : func_end] + self.assertEqual(expected_cfi_data[func_addr], func_cfi) +--- a/src/3rdparty/chromium/build/android/gyp/find.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/find.py 2025-01-16 02:26:08.517680796 +0800 +@@ -7,7 +7,7 @@ + """Finds files in directories. + """ + +-from __future__ import print_function ++ + + import fnmatch + import optparse +--- a/src/3rdparty/chromium/build/android/gyp/gcc_preprocess.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/gcc_preprocess.py 2025-01-16 02:26:08.517680796 +0800 +@@ -15,7 +15,7 @@ + + gcc_cmd = [ 'gcc' ] # invoke host gcc. + if options.defines: +- gcc_cmd.extend(sum(map(lambda w: ['-D', w], options.defines), [])) ++ gcc_cmd.extend(sum([['-D', w] for w in options.defines], [])) + + with build_utils.AtomicOutput(options.output) as f: + gcc_cmd.extend([ +--- a/src/3rdparty/chromium/build/android/gyp/jacoco_instr.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/jacoco_instr.py 2025-01-16 02:26:08.517680796 +0800 +@@ -13,7 +13,7 @@ + + """ + +-from __future__ import print_function ++ + + import argparse + import json +--- a/src/3rdparty/chromium/build/android/gyp/java_cpp_enum.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/java_cpp_enum.py 2025-01-16 02:26:08.517680796 +0800 +@@ -71,7 +71,7 @@ + # Enums, if given no value, are given the value of the previous enum + 1. + if not all(self.entries.values()): + prev_enum_value = -1 +- for key, value in self.entries.items(): ++ for key, value in list(self.entries.items()): + if not value: + self.entries[key] = prev_enum_value + 1 + elif value in self.entries: +@@ -96,7 +96,7 @@ + 'k' + self.original_enum_name] + + for prefix in prefixes: +- if all([w.startswith(prefix) for w in self.entries.keys()]): ++ if all([w.startswith(prefix) for w in list(self.entries.keys())]): + prefix_to_strip = prefix + break + else: +@@ -104,7 +104,7 @@ + + def StripEntries(entries): + ret = collections.OrderedDict() +- for k, v in entries.items(): ++ for k, v in list(entries.items()): + stripped_key = k.replace(prefix_to_strip, '', 1) + if isinstance(v, str): + stripped_value = v.replace(prefix_to_strip, '') +@@ -126,7 +126,7 @@ + """Normalize keys in |d| and update references to old keys in |d| values.""" + keys_map = {k: func(k) for k in d} + ret = collections.OrderedDict() +- for k, v in d.items(): ++ for k, v in list(d.items()): + # Need to transform values as well when the entry value was explicitly set + # (since it could contain references to other enum entry values). + if isinstance(v, str): +@@ -135,7 +135,7 @@ + if v in d: + v = keys_map[v] + else: +- for old_key, new_key in keys_map.items(): ++ for old_key, new_key in list(keys_map.items()): + v = v.replace(old_key, new_key) + ret[keys_map[k]] = v + return ret +@@ -375,7 +375,7 @@ + enum_template = Template(' int ${NAME} = ${VALUE};') + enum_entries_string = [] + enum_names = [] +- for enum_name, enum_value in enum_definition.entries.items(): ++ for enum_name, enum_value in list(enum_definition.entries.items()): + values = { + 'NAME': enum_name, + 'VALUE': enum_value, +--- a/src/3rdparty/chromium/build/android/gyp/java_google_api_keys.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/java_google_api_keys.py 2025-01-16 02:26:08.517680796 +0800 +@@ -48,7 +48,7 @@ + constant_template = string.Template( + ' public static final String ${NAME} = "${VALUE}";') + constant_entries_list = [] +- for constant_name, constant_value in constant_definitions.iteritems(): ++ for constant_name, constant_value in constant_definitions.items(): + values = { + 'NAME': constant_name, + 'VALUE': constant_value, +--- a/src/3rdparty/chromium/build/android/gyp/jetify_jar.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/jetify_jar.py 2025-01-16 02:26:08.517680796 +0800 +@@ -4,7 +4,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/build/android/gyp/lint.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/lint.py 2025-01-16 02:26:08.517680796 +0800 +@@ -5,7 +5,7 @@ + # found in the LICENSE file. + """Runs Android's lint tool.""" + +-from __future__ import print_function ++ + + import argparse + import functools +--- a/src/3rdparty/chromium/build/android/gyp/write_build_config.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/write_build_config.py 2025-01-16 02:26:08.517680796 +0800 +@@ -573,7 +573,7 @@ + --------------- END_MARKDOWN --------------------------------------------------- + """ + +-from __future__ import print_function ++ + + import collections + import itertools +@@ -755,7 +755,7 @@ + dest_map = uncompressed if disable_compression else compressed + other_map = compressed if disable_compression else uncompressed + outputs = entry.get('outputs', []) +- for src, dest in itertools.izip_longest(entry['sources'], outputs): ++ for src, dest in itertools.zip_longest(entry['sources'], outputs): + if not dest: + dest = os.path.basename(src) + # Merge so that each path shows up in only one of the lists, and that +@@ -766,7 +766,7 @@ + locale_paks.add(dest) + + def create_list(asset_map): +- ret = ['%s:%s' % (src, dest) for dest, src in asset_map.iteritems()] ++ ret = ['%s:%s' % (src, dest) for dest, src in asset_map.items()] + # Sort to ensure deterministic ordering. + ret.sort() + return ret +@@ -1170,7 +1170,7 @@ + if any(getattr(options, x) for x in lib_options): + for attr in lib_options: + if not getattr(options, attr): +- raise('Expected %s to be set.' % attr) ++ raise 'Expected %s to be set.' + + if options.requires_android and not options.supports_android: + raise Exception( +@@ -1664,7 +1664,7 @@ + configs_by_classpath_entry = collections.defaultdict(list) + static_lib_jar_paths = {} + for config_path, dep_config in (sorted( +- static_library_dependent_configs_by_path.iteritems())): ++ static_library_dependent_configs_by_path.items())): + # For bundles, only the jar path and jni sources of the base module + # are relevant for proguard. Should be updated when bundle feature + # modules support JNI. +@@ -1690,7 +1690,7 @@ + for cp_entry in device_classpath: + configs_by_classpath_entry[cp_entry].append(options.build_config) + +- for cp_entry, candidate_configs in configs_by_classpath_entry.iteritems(): ++ for cp_entry, candidate_configs in configs_by_classpath_entry.items(): + config_path = (candidate_configs[0] + if len(candidate_configs) == 1 else options.build_config) + classpath_entries_by_owning_config[config_path].append(cp_entry) +@@ -1700,11 +1700,11 @@ + + deps_info['static_library_proguard_mapping_output_paths'] = sorted([ + d['proguard_mapping_path'] +- for d in static_library_dependent_configs_by_path.itervalues() ++ for d in static_library_dependent_configs_by_path.values() + ]) + deps_info['static_library_dependent_classpath_configs'] = { + path: sorted(set(classpath)) +- for path, classpath in classpath_entries_by_owning_config.iteritems() ++ for path, classpath in classpath_entries_by_owning_config.items() + } + deps_info['extra_main_r_text_files'] = sorted(extra_main_r_text_files) + +@@ -1981,7 +1981,7 @@ + _AddJarMapping(jar_to_target, [base_module_build_config['deps_info']]) + if options.tested_apk_config: + _AddJarMapping(jar_to_target, [tested_apk_config]) +- for jar, target in itertools.izip( ++ for jar, target in zip( + tested_apk_config['javac_full_classpath'], + tested_apk_config['javac_full_classpath_targets']): + jar_to_target[jar] = target +--- a/src/3rdparty/chromium/build/android/gyp/proto/Resources_pb2.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/proto/Resources_pb2.py 2025-01-16 02:26:08.518764111 +0800 +@@ -13,7 +13,7 @@ + _sym_db = _symbol_database.Default() + + +-import Configuration_pb2 as frameworks_dot_base_dot_tools_dot_aapt2_dot_Configuration__pb2 ++from . import Configuration_pb2 as frameworks_dot_base_dot_tools_dot_aapt2_dot_Configuration__pb2 + + + DESCRIPTOR = _descriptor.FileDescriptor( +--- a/src/3rdparty/chromium/build/android/gyp/util/build_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/util/build_utils.py 2025-01-16 02:26:08.518764111 +0800 +@@ -39,7 +39,7 @@ + 'java_8', 'jre', 'lib', 'rt.jar') + + try: +- string_types = basestring ++ string_types = str + except NameError: + string_types = (str, bytes) + +--- a/src/3rdparty/chromium/build/android/gyp/util/build_utils_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/util/build_utils_test.py 2025-01-16 02:26:08.518764111 +0800 +@@ -26,7 +26,7 @@ + + class BuildUtilsTest(unittest.TestCase): + def testGetSortedTransitiveDependencies_all(self): +- TOP = _DEPS.keys() ++ TOP = list(_DEPS.keys()) + EXPECTED = ['a', 'b', 'c', 'd', 'f', 'e', 'g', 'h', 'i'] + actual = build_utils.GetSortedTransitiveDependencies(TOP, _DEPS.get) + self.assertEqual(EXPECTED, actual) +--- a/src/3rdparty/chromium/build/android/gyp/util/jar_info_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/util/jar_info_utils.py 2025-01-16 02:26:08.518764111 +0800 +@@ -50,7 +50,7 @@ + path of Java source files that where extracted from an .srcjar into a + temporary location. + """ +- for fully_qualified_name, path in sorted(info_data.iteritems()): ++ for fully_qualified_name, path in sorted(info_data.items()): + if source_file_map and path in source_file_map: + path = source_file_map[path] + assert not path.startswith('/tmp'), ( +--- a/src/3rdparty/chromium/build/android/gyp/util/manifest_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/util/manifest_utils.py 2025-01-16 02:26:08.518764111 +0800 +@@ -264,14 +264,14 @@ + if app_node is not None: + for node in app_node.getchildren(): + if (node.tag in ['uses-static-library', 'static-library'] +- and '{%s}version' % ANDROID_NAMESPACE in node.keys() +- and '{%s}name' % ANDROID_NAMESPACE in node.keys()): ++ and '{%s}version' % ANDROID_NAMESPACE in list(node.keys()) ++ and '{%s}name' % ANDROID_NAMESPACE in list(node.keys())): + node.set('{%s}version' % ANDROID_NAMESPACE, '$VERSION_NUMBER') + + # We also remove the exact package name (except the one at the root level) + # to avoid noise during manifest comparison. + def blur_package_name(node): +- for key in node.keys(): ++ for key in list(node.keys()): + node.set(key, node.get(key).replace(package, '$PACKAGE')) + + for child in node.getchildren(): +--- a/src/3rdparty/chromium/build/android/gyp/util/md5_check.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/util/md5_check.py 2025-01-16 02:26:08.518764111 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import difflib + import hashlib +--- a/src/3rdparty/chromium/build/android/gyp/util/parallel.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/util/parallel.py 2025-01-16 02:26:08.518764111 +0800 +@@ -205,7 +205,7 @@ + pool = _MakeProcessPool(arg_tuples, **kwargs) + wrapped_func = _FuncWrapper(func) + try: +- for result in pool.imap(wrapped_func, xrange(len(arg_tuples))): ++ for result in pool.imap(wrapped_func, range(len(arg_tuples))): + _CheckForException(result) + yield result + finally: +--- a/src/3rdparty/chromium/build/android/gyp/util/resource_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/gyp/util/resource_utils.py 2025-01-16 02:26:08.518764111 +0800 +@@ -305,7 +305,7 @@ + """ + entries = self._ApplyRenames() + lines = [] +- for archive_path, source_path in entries.iteritems(): ++ for archive_path, source_path in entries.items(): + lines.append('{}\t{}\n'.format(archive_path, source_path)) + with open(info_file_path, 'w') as info_file: + info_file.writelines(sorted(lines)) +@@ -648,7 +648,7 @@ + """Render an R.java source file. See _CreateRJaveSourceFile for args info.""" + final_resources_by_type = collections.defaultdict(list) + non_final_resources_by_type = collections.defaultdict(list) +- for res_type, resources in all_resources_by_type.iteritems(): ++ for res_type, resources in all_resources_by_type.items(): + for entry in resources: + # Entries in stylable that are not int[] are not actually resource ids + # but constants. +@@ -1021,13 +1021,13 @@ + result = '\n' + result += ' (1 ms)', + ] + actual = gtest_test_instance.ParseGTestOutput(raw_output, None, None) +- self.assertEquals(1, len(actual)) +- self.assertEquals('Baz/FooTest.Bar/0', actual[0].GetName()) +- self.assertEquals(1, actual[0].GetDuration()) +- self.assertEquals(base_test_result.ResultType.FAIL, actual[0].GetType()) ++ self.assertEqual(1, len(actual)) ++ self.assertEqual('Baz/FooTest.Bar/0', actual[0].GetName()) ++ self.assertEqual(1, actual[0].GetDuration()) ++ self.assertEqual(base_test_result.ResultType.FAIL, actual[0].GetType()) + + def testParseGTestOutput_typeAndValueParameterized(self): + raw_output = [ +@@ -207,18 +207,18 @@ + ' where TypeParam = and GetParam() = (1 ms)', + ] + actual = gtest_test_instance.ParseGTestOutput(raw_output, None, None) +- self.assertEquals(1, len(actual)) +- self.assertEquals('Baz/FooTest.Bar/0', actual[0].GetName()) +- self.assertEquals(1, actual[0].GetDuration()) +- self.assertEquals(base_test_result.ResultType.FAIL, actual[0].GetType()) ++ self.assertEqual(1, len(actual)) ++ self.assertEqual('Baz/FooTest.Bar/0', actual[0].GetName()) ++ self.assertEqual(1, actual[0].GetDuration()) ++ self.assertEqual(base_test_result.ResultType.FAIL, actual[0].GetType()) + + def testParseGTestXML_none(self): + actual = gtest_test_instance.ParseGTestXML(None) +- self.assertEquals([], actual) ++ self.assertEqual([], actual) + + def testParseGTestJSON_none(self): + actual = gtest_test_instance.ParseGTestJSON(None) +- self.assertEquals([], actual) ++ self.assertEqual([], actual) + + def testParseGTestJSON_example(self): + raw_json = """ +@@ -253,10 +253,10 @@ + } + }""" + actual = gtest_test_instance.ParseGTestJSON(raw_json) +- self.assertEquals(1, len(actual)) +- self.assertEquals('mojom_tests.parse.ast_unittest.ASTTest.testNodeBase', ++ self.assertEqual(1, len(actual)) ++ self.assertEqual('mojom_tests.parse.ast_unittest.ASTTest.testNodeBase', + actual[0].GetName()) +- self.assertEquals(base_test_result.ResultType.PASS, actual[0].GetType()) ++ self.assertEqual(base_test_result.ResultType.PASS, actual[0].GetType()) + + def testTestNameWithoutDisabledPrefix_disabled(self): + test_name_list = [ +@@ -268,7 +268,7 @@ + actual = gtest_test_instance \ + .TestNameWithoutDisabledPrefix(test_name) + expected = 'A.B' +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testTestNameWithoutDisabledPrefix_flaky(self): + test_name_list = [ +@@ -280,14 +280,14 @@ + actual = gtest_test_instance \ + .TestNameWithoutDisabledPrefix(test_name) + expected = 'A.B' +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testTestNameWithoutDisabledPrefix_notDisabledOrFlaky(self): + test_name = 'A.B' + actual = gtest_test_instance \ + .TestNameWithoutDisabledPrefix(test_name) + expected = 'A.B' +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/build/android/pylib/instrumentation/instrumentation_test_instance.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/instrumentation/instrumentation_test_instance.py 2025-01-16 02:26:08.518764111 +0800 +@@ -199,7 +199,7 @@ + if current_result.GetType() == base_test_result.ResultType.UNKNOWN: + crashed = (result_code == _ACTIVITY_RESULT_CANCELED + and any(_NATIVE_CRASH_RE.search(l) +- for l in result_bundle.itervalues())) ++ for l in result_bundle.values())) + if crashed: + current_result.SetType(base_test_result.ResultType.CRASH) + +@@ -402,8 +402,8 @@ + } for m in methods if m.startswith('test')] + + for dump in dex_dumps: +- for package_name, package_info in dump.iteritems(): +- for class_name, class_info in package_info['classes'].iteritems(): ++ for package_name, package_info in dump.items(): ++ for class_name, class_info in package_info['classes'].items(): + if class_name.endswith('Test'): + tests.append({ + 'class': '%s.%s' % (package_name, class_name), +@@ -664,7 +664,7 @@ + self._package_info = None + if self._apk_under_test: + package_under_test = self._apk_under_test.GetPackageName() +- for package_info in constants.PACKAGE_INFO.itervalues(): ++ for package_info in constants.PACKAGE_INFO.values(): + if package_under_test == package_info.package: + self._package_info = package_info + break +@@ -1021,7 +1021,7 @@ + elif clazz == _PARAMETERIZED_COMMAND_LINE_FLAGS: + list_of_switches = [] + for annotation in methods['value']: +- for clazz, methods in annotation.iteritems(): ++ for clazz, methods in annotation.items(): + list_of_switches += _annotationToSwitches(clazz, methods) + return list_of_switches + else: +@@ -1039,7 +1039,7 @@ + list_of_switches = [] + _checkParameterization(annotations) + if _SKIP_PARAMETERIZATION not in annotations: +- for clazz, methods in annotations.iteritems(): ++ for clazz, methods in annotations.items(): + list_of_switches += _annotationToSwitches(clazz, methods) + if list_of_switches: + _setTestFlags(t, _switchesToFlags(list_of_switches[0])) +--- a/src/3rdparty/chromium/build/android/pylib/instrumentation/instrumentation_test_instance_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/instrumentation/instrumentation_test_instance_test.py 2025-01-16 02:26:08.518764111 +0800 +@@ -60,7 +60,7 @@ + o = self.createTestInstance() + args = self.createFlagAttributesArgs(command_line_flags=['--foo', '--bar']) + o._initializeFlagAttributes(args) +- self.assertEquals(o._flags, ['--enable-test-intents', '--foo', '--bar']) ++ self.assertEqual(o._flags, ['--enable-test-intents', '--foo', '--bar']) + + def test_initializeFlagAttributes_deviceFlagsFile(self): + o = self.createTestInstance() +@@ -70,26 +70,26 @@ + + args = self.createFlagAttributesArgs(device_flags_file=flags_file.name) + o._initializeFlagAttributes(args) +- self.assertEquals(o._flags, ['--enable-test-intents', '--foo', '--bar']) ++ self.assertEqual(o._flags, ['--enable-test-intents', '--foo', '--bar']) + + def test_initializeFlagAttributes_strictModeOn(self): + o = self.createTestInstance() + args = self.createFlagAttributesArgs(strict_mode='on') + o._initializeFlagAttributes(args) +- self.assertEquals(o._flags, ['--enable-test-intents', '--strict-mode=on']) ++ self.assertEqual(o._flags, ['--enable-test-intents', '--strict-mode=on']) + + def test_initializeFlagAttributes_strictModeOn_coverageOn(self): + o = self.createTestInstance() + args = self.createFlagAttributesArgs( + strict_mode='on', coverage_dir='/coverage/dir') + o._initializeFlagAttributes(args) +- self.assertEquals(o._flags, ['--enable-test-intents']) ++ self.assertEqual(o._flags, ['--enable-test-intents']) + + def test_initializeFlagAttributes_strictModeOff(self): + o = self.createTestInstance() + args = self.createFlagAttributesArgs(strict_mode='off') + o._initializeFlagAttributes(args) +- self.assertEquals(o._flags, ['--enable-test-intents']) ++ self.assertEqual(o._flags, ['--enable-test-intents']) + + def testGetTests_noFilter(self): + o = self.createTestInstance() +@@ -156,7 +156,7 @@ + o._junit4_runner_class = 'J4Runner' + actual_tests = o.ProcessRawTests(raw_tests) + +- self.assertEquals(actual_tests, expected_tests) ++ self.assertEqual(actual_tests, expected_tests) + + def testGetTests_simpleGtestFilter(self): + o = self.createTestInstance() +@@ -195,7 +195,7 @@ + o._junit4_runner_class = 'J4Runner' + actual_tests = o.ProcessRawTests(raw_tests) + +- self.assertEquals(actual_tests, expected_tests) ++ self.assertEqual(actual_tests, expected_tests) + + def testGetTests_simpleGtestUnqualifiedNameFilter(self): + o = self.createTestInstance() +@@ -234,7 +234,7 @@ + o._junit4_runner_class = 'J4Runner' + actual_tests = o.ProcessRawTests(raw_tests) + +- self.assertEquals(actual_tests, expected_tests) ++ self.assertEqual(actual_tests, expected_tests) + + def testGetTests_parameterizedTestGtestFilter(self): + o = self.createTestInstance() +@@ -293,7 +293,7 @@ + o._test_filter = 'org.chromium.test.SampleTest.testMethod1' + actual_tests = o.ProcessRawTests(raw_tests) + +- self.assertEquals(actual_tests, expected_tests) ++ self.assertEqual(actual_tests, expected_tests) + + def testGetTests_wildcardGtestFilter(self): + o = self.createTestInstance() +@@ -343,7 +343,7 @@ + o._junit4_runner_class = 'J4Runner' + actual_tests = o.ProcessRawTests(raw_tests) + +- self.assertEquals(actual_tests, expected_tests) ++ self.assertEqual(actual_tests, expected_tests) + + def testGetTests_negativeGtestFilter(self): + o = self.createTestInstance() +@@ -402,7 +402,7 @@ + o._junit4_runner_class = 'J4Runner' + actual_tests = o.ProcessRawTests(raw_tests) + +- self.assertEquals(actual_tests, expected_tests) ++ self.assertEqual(actual_tests, expected_tests) + + def testGetTests_annotationFilter(self): + o = self.createTestInstance() +@@ -461,7 +461,7 @@ + o._junit4_runner_class = 'J4Runner' + actual_tests = o.ProcessRawTests(raw_tests) + +- self.assertEquals(actual_tests, expected_tests) ++ self.assertEqual(actual_tests, expected_tests) + + def testGetTests_excludedAnnotationFilter(self): + o = self.createTestInstance() +@@ -513,7 +513,7 @@ + o._junit4_runner_class = 'J4Runner' + actual_tests = o.ProcessRawTests(raw_tests) + +- self.assertEquals(actual_tests, expected_tests) ++ self.assertEqual(actual_tests, expected_tests) + + def testGetTests_annotationSimpleValueFilter(self): + o = self.createTestInstance() +@@ -575,7 +575,7 @@ + o._junit4_runner_class = 'J4Runner' + actual_tests = o.ProcessRawTests(raw_tests) + +- self.assertEquals(actual_tests, expected_tests) ++ self.assertEqual(actual_tests, expected_tests) + + def testGetTests_annotationDictValueFilter(self): + o = self.createTestInstance() +@@ -625,7 +625,7 @@ + o._junit4_runner_class = 'J4Runner' + actual_tests = o.ProcessRawTests(raw_tests) + +- self.assertEquals(actual_tests, expected_tests) ++ self.assertEqual(actual_tests, expected_tests) + + def testGetTestName(self): + test = { +@@ -643,10 +643,10 @@ + 'method': test['method'] + } + +- self.assertEquals( ++ self.assertEqual( + instrumentation_test_instance.GetTestName(test, sep='.'), + 'org.chromium.TestA.testSimple') +- self.assertEquals( ++ self.assertEqual( + instrumentation_test_instance.GetTestName( + unqualified_class_test, sep='.'), + 'TestA.testSimple') +@@ -662,7 +662,7 @@ + 'flags': ['enable_features=abc'], + 'is_junit4': True, + 'method': 'testSimple'} +- self.assertEquals( ++ self.assertEqual( + instrumentation_test_instance.GetUniqueTestName( + test, sep='.'), + 'org.chromium.TestA.testSimple_with_enable_features=abc') +@@ -682,11 +682,11 @@ + 'class': test['class'].split('.')[-1], + 'method': test['method'] + } +- self.assertEquals( ++ self.assertEqual( + instrumentation_test_instance.GetTestNameWithoutParameterPostfix( + test, sep='.'), + 'org.chromium.TestA') +- self.assertEquals( ++ self.assertEqual( + instrumentation_test_instance.GetTestNameWithoutParameterPostfix( + unqualified_class_test, sep='.'), + 'TestA') +@@ -755,7 +755,7 @@ + o._junit4_runner_class = 'J4Runner' + actual_tests = o.ProcessRawTests(raw_tests) + +- self.assertEquals(actual_tests, expected_tests) ++ self.assertEqual(actual_tests, expected_tests) + + def testGenerateTestResults_noStatus(self): + results = instrumentation_test_instance.GenerateTestResults( +@@ -952,7 +952,7 @@ + o._test_jar = 'path/to/test.jar' + o._junit4_runner_class = 'J4Runner' + actual_tests = o.ProcessRawTests(raw_tests) +- self.assertEquals(actual_tests, expected_tests) ++ self.assertEqual(actual_tests, expected_tests) + + def testParameterizedCommandLineFlags(self): + o = self.createTestInstance() +@@ -1075,7 +1075,7 @@ + o._test_jar = 'path/to/test.jar' + o._junit4_runner_class = 'J4Runner' + actual_tests = o.ProcessRawTests(raw_tests) +- self.assertEquals(actual_tests, expected_tests) ++ self.assertEqual(actual_tests, expected_tests) + + def testDifferentCommandLineParameterizations(self): + o = self.createTestInstance() +@@ -1136,7 +1136,7 @@ + o._test_jar = 'path/to/test.jar' + o._junit4_runner_class = 'J4Runner' + actual_tests = o.ProcessRawTests(raw_tests) +- self.assertEquals(actual_tests, expected_tests) ++ self.assertEqual(actual_tests, expected_tests) + + def testMultipleCommandLineParameterizations_raises(self): + o = self.createTestInstance() +--- a/src/3rdparty/chromium/build/android/pylib/local/local_test_server_spawner.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/local/local_test_server_spawner.py 2025-01-16 02:26:08.518764111 +0800 +@@ -25,7 +25,7 @@ + Whether the provided predicate was satisfied once (before the timeout). + """ + sleep_time_sec = 0.025 +- for _ in xrange(1, max_attempts): ++ for _ in range(1, max_attempts): + if predicate(): + return True + time.sleep(sleep_time_sec) +--- a/src/3rdparty/chromium/build/android/pylib/local/device/local_device_gtest_run.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/local/device/local_device_gtest_run.py 2025-01-16 02:26:08.518764111 +0800 +@@ -488,11 +488,11 @@ + + batch_size = self._test_instance.test_launcher_batch_limit + +- for i in xrange(0, device_count): ++ for i in range(0, device_count): + unbounded_shard = tests[i::device_count] + shards += [ + unbounded_shard[j:j + batch_size] +- for j in xrange(0, len(unbounded_shard), batch_size) ++ for j in range(0, len(unbounded_shard), batch_size) + ] + return shards + +--- a/src/3rdparty/chromium/build/android/pylib/local/device/local_device_instrumentation_test_run.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/local/device/local_device_instrumentation_test_run.py 2025-01-16 02:26:08.518764111 +0800 +@@ -121,7 +121,7 @@ + # Dismiss any error dialogs. Limit the number in case we have an error + # loop or we are failing to dismiss. + try: +- for _ in xrange(10): ++ for _ in range(10): + package = device.DismissCrashDialogIfNeeded(timeout=10, retries=1) + if not package: + return False +@@ -492,7 +492,7 @@ + other_tests.append(test) + + all_tests = [] +- for _, tests in batched_tests.items(): ++ for _, tests in list(batched_tests.items()): + tests.sort() # Ensure a consistent ordering across external shards. + all_tests.extend([ + tests[i:i + _TEST_BATCH_MAX_GROUP_SIZE] +@@ -558,7 +558,7 @@ + i = self._GetTimeoutFromAnnotations(t['annotations'], n) + return (n, i) + +- test_names, timeouts = zip(*(name_and_timeout(t) for t in test)) ++ test_names, timeouts = list(zip(*(name_and_timeout(t) for t in test))) + + test_name = instrumentation_test_instance.GetTestName(test[0]) + '_batch' + extras['class'] = ','.join(test_names) +--- a/src/3rdparty/chromium/build/android/pylib/local/device/local_device_test_run.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/local/device/local_device_test_run.py 2025-01-16 02:26:08.518764111 +0800 +@@ -6,7 +6,7 @@ + import logging + import posixpath + import signal +-import thread ++import _thread + import threading + + from devil import base_error +@@ -65,7 +65,7 @@ + consecutive_device_errors = 0 + for test in tests: + if exit_now.isSet(): +- thread.exit() ++ _thread.exit() + + result = None + rerun = None +@@ -220,13 +220,13 @@ + if name.endswith('*'): + tests_and_results[name] = ( + test, +- [r for n, r in all_test_results.iteritems() ++ [r for n, r in all_test_results.items() + if fnmatch.fnmatch(n, name)]) + else: + tests_and_results[name] = (test, all_test_results.get(name)) + + failed_tests_and_results = ( +- (test, result) for test, result in tests_and_results.itervalues() ++ (test, result) for test, result in tests_and_results.values() + if is_failure_result(result) + ) + +--- a/src/3rdparty/chromium/build/android/pylib/local/device/local_device_test_run_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/local/device/local_device_test_run_test.py 2025-01-16 02:26:08.518764111 +0800 +@@ -16,25 +16,25 @@ + class SubstituteDeviceRootTest(unittest.TestCase): + + def testNoneDevicePath(self): +- self.assertEquals( ++ self.assertEqual( + '/fake/device/root', + local_device_test_run.SubstituteDeviceRoot( + None, '/fake/device/root')) + + def testStringDevicePath(self): +- self.assertEquals( ++ self.assertEqual( + '/another/fake/device/path', + local_device_test_run.SubstituteDeviceRoot( + '/another/fake/device/path', '/fake/device/root')) + + def testListWithNoneDevicePath(self): +- self.assertEquals( ++ self.assertEqual( + '/fake/device/root/subpath', + local_device_test_run.SubstituteDeviceRoot( + [None, 'subpath'], '/fake/device/root')) + + def testListWithoutNoneDevicePath(self): +- self.assertEquals( ++ self.assertEqual( + '/another/fake/device/path', + local_device_test_run.SubstituteDeviceRoot( + ['/', 'another', 'fake', 'device', 'path'], +@@ -79,7 +79,7 @@ + + test_run = TestLocalDeviceTestRun() + tests_to_retry = test_run._GetTestsToRetry(tests, try_results) +- self.assertEquals(0, len(tests_to_retry)) ++ self.assertEqual(0, len(tests_to_retry)) + + def testGetTestsToRetry_testFailed(self): + results = [ +@@ -95,7 +95,7 @@ + + test_run = TestLocalDeviceTestRun() + tests_to_retry = test_run._GetTestsToRetry(tests, try_results) +- self.assertEquals(1, len(tests_to_retry)) ++ self.assertEqual(1, len(tests_to_retry)) + self.assertIn('Test1', tests_to_retry) + + def testGetTestsToRetry_testUnknown(self): +@@ -110,7 +110,7 @@ + + test_run = TestLocalDeviceTestRun() + tests_to_retry = test_run._GetTestsToRetry(tests, try_results) +- self.assertEquals(1, len(tests_to_retry)) ++ self.assertEqual(1, len(tests_to_retry)) + self.assertIn('Test1', tests_to_retry) + + def testGetTestsToRetry_wildcardFilter_allPass(self): +@@ -127,7 +127,7 @@ + + test_run = TestLocalDeviceTestRun() + tests_to_retry = test_run._GetTestsToRetry(tests, try_results) +- self.assertEquals(0, len(tests_to_retry)) ++ self.assertEqual(0, len(tests_to_retry)) + + def testGetTestsToRetry_wildcardFilter_oneFails(self): + results = [ +@@ -143,7 +143,7 @@ + + test_run = TestLocalDeviceTestRun() + tests_to_retry = test_run._GetTestsToRetry(tests, try_results) +- self.assertEquals(1, len(tests_to_retry)) ++ self.assertEqual(1, len(tests_to_retry)) + self.assertIn('TestCase.*', tests_to_retry) + + def testGetTestsToRetry_nonStringTests(self): +@@ -163,9 +163,9 @@ + + test_run = TestLocalDeviceNonStringTestRun() + tests_to_retry = test_run._GetTestsToRetry(tests, try_results) +- self.assertEquals(1, len(tests_to_retry)) ++ self.assertEqual(1, len(tests_to_retry)) + self.assertIsInstance(tests_to_retry[0], dict) +- self.assertEquals(tests[1], tests_to_retry[0]) ++ self.assertEqual(tests[1], tests_to_retry[0]) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/build/android/pylib/local/emulator/avd.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/local/emulator/avd.py 2025-01-16 02:26:08.519847426 +0800 +@@ -374,7 +374,7 @@ + pkgs_by_dir[pkg.dest_path] = [] + pkgs_by_dir[pkg.dest_path].append(pkg) + +- for pkg_dir, pkgs in pkgs_by_dir.iteritems(): ++ for pkg_dir, pkgs in pkgs_by_dir.items(): + logging.info('Installing packages in %s', pkg_dir) + cipd_root = os.path.join(constants.DIR_SOURCE_ROOT, pkg_dir) + if not os.path.exists(cipd_root): +--- a/src/3rdparty/chromium/build/android/pylib/local/emulator/ini.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/local/emulator/ini.py 2025-01-16 02:26:08.519847426 +0800 +@@ -27,7 +27,7 @@ + + def dumps(obj): + ret = '' +- for k, v in sorted(obj.iteritems()): ++ for k, v in sorted(obj.items()): + ret += '%s = %s\n' % (k, str(v)) + return ret + +--- a/src/3rdparty/chromium/build/android/pylib/output/local_output_manager.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/output/local_output_manager.py 2025-01-16 02:26:08.519847426 +0800 +@@ -5,7 +5,7 @@ + import time + import os + import shutil +-import urllib ++import urllib.request, urllib.parse, urllib.error + + from pylib.base import output_manager + +@@ -37,7 +37,7 @@ + self._output_path = os.path.join(out_root, out_subdir, out_filename) + + def _Link(self): +- return 'file://%s' % urllib.quote(self._output_path) ++ return 'file://%s' % urllib.parse.quote(self._output_path) + + def _Archive(self): + if not os.path.exists(os.path.dirname(self._output_path)): +--- a/src/3rdparty/chromium/build/android/pylib/results/json_results.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/results/json_results.py 2025-01-16 02:26:08.519847426 +0800 +@@ -92,14 +92,14 @@ + result_dict = { + 'status': r.GetType(), + 'elapsed_time_ms': r.GetDuration(), +- 'output_snippet': unicode(r.GetLog(), errors='replace'), ++ 'output_snippet': str(r.GetLog(), errors='replace'), + 'losless_snippet': True, + 'output_snippet_base64': '', + 'links': r.GetLinks(), + } + iteration_data[r.GetName()].append(result_dict) + +- all_tests = all_tests.union(set(iteration_data.iterkeys())) ++ all_tests = all_tests.union(set(iteration_data.keys())) + per_iteration_data.append(iteration_data) + + return { +@@ -213,7 +213,7 @@ + results_list = [] + testsuite_runs = json_results['per_iteration_data'] + for testsuite_run in testsuite_runs: +- for test, test_runs in testsuite_run.iteritems(): ++ for test, test_runs in testsuite_run.items(): + results_list.extend( + [base_test_result.BaseTestResult(test, + string_as_status(tr['status']), +--- a/src/3rdparty/chromium/build/android/pylib/results/json_results_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/results/json_results_test.py 2025-01-16 02:26:08.519847426 +0800 +@@ -19,18 +19,18 @@ + all_results.AddResult(result) + + results_dict = json_results.GenerateResultsDict([all_results]) +- self.assertEquals( ++ self.assertEqual( + ['test.package.TestName'], + results_dict['all_tests']) +- self.assertEquals(1, len(results_dict['per_iteration_data'])) ++ self.assertEqual(1, len(results_dict['per_iteration_data'])) + + iteration_result = results_dict['per_iteration_data'][0] + self.assertTrue('test.package.TestName' in iteration_result) +- self.assertEquals(1, len(iteration_result['test.package.TestName'])) ++ self.assertEqual(1, len(iteration_result['test.package.TestName'])) + + test_iteration_result = iteration_result['test.package.TestName'][0] + self.assertTrue('status' in test_iteration_result) +- self.assertEquals('SUCCESS', test_iteration_result['status']) ++ self.assertEqual('SUCCESS', test_iteration_result['status']) + + def testGenerateResultsDict_skippedResult(self): + result = base_test_result.BaseTestResult( +@@ -40,18 +40,18 @@ + all_results.AddResult(result) + + results_dict = json_results.GenerateResultsDict([all_results]) +- self.assertEquals( ++ self.assertEqual( + ['test.package.TestName'], + results_dict['all_tests']) +- self.assertEquals(1, len(results_dict['per_iteration_data'])) ++ self.assertEqual(1, len(results_dict['per_iteration_data'])) + + iteration_result = results_dict['per_iteration_data'][0] + self.assertTrue('test.package.TestName' in iteration_result) +- self.assertEquals(1, len(iteration_result['test.package.TestName'])) ++ self.assertEqual(1, len(iteration_result['test.package.TestName'])) + + test_iteration_result = iteration_result['test.package.TestName'][0] + self.assertTrue('status' in test_iteration_result) +- self.assertEquals('SKIPPED', test_iteration_result['status']) ++ self.assertEqual('SKIPPED', test_iteration_result['status']) + + def testGenerateResultsDict_failedResult(self): + result = base_test_result.BaseTestResult( +@@ -61,18 +61,18 @@ + all_results.AddResult(result) + + results_dict = json_results.GenerateResultsDict([all_results]) +- self.assertEquals( ++ self.assertEqual( + ['test.package.TestName'], + results_dict['all_tests']) +- self.assertEquals(1, len(results_dict['per_iteration_data'])) ++ self.assertEqual(1, len(results_dict['per_iteration_data'])) + + iteration_result = results_dict['per_iteration_data'][0] + self.assertTrue('test.package.TestName' in iteration_result) +- self.assertEquals(1, len(iteration_result['test.package.TestName'])) ++ self.assertEqual(1, len(iteration_result['test.package.TestName'])) + + test_iteration_result = iteration_result['test.package.TestName'][0] + self.assertTrue('status' in test_iteration_result) +- self.assertEquals('FAILURE', test_iteration_result['status']) ++ self.assertEqual('FAILURE', test_iteration_result['status']) + + def testGenerateResultsDict_duration(self): + result = base_test_result.BaseTestResult( +@@ -82,18 +82,18 @@ + all_results.AddResult(result) + + results_dict = json_results.GenerateResultsDict([all_results]) +- self.assertEquals( ++ self.assertEqual( + ['test.package.TestName'], + results_dict['all_tests']) +- self.assertEquals(1, len(results_dict['per_iteration_data'])) ++ self.assertEqual(1, len(results_dict['per_iteration_data'])) + + iteration_result = results_dict['per_iteration_data'][0] + self.assertTrue('test.package.TestName' in iteration_result) +- self.assertEquals(1, len(iteration_result['test.package.TestName'])) ++ self.assertEqual(1, len(iteration_result['test.package.TestName'])) + + test_iteration_result = iteration_result['test.package.TestName'][0] + self.assertTrue('elapsed_time_ms' in test_iteration_result) +- self.assertEquals(123, test_iteration_result['elapsed_time_ms']) ++ self.assertEqual(123, test_iteration_result['elapsed_time_ms']) + + def testGenerateResultsDict_multipleResults(self): + result1 = base_test_result.BaseTestResult( +@@ -106,27 +106,27 @@ + all_results.AddResult(result2) + + results_dict = json_results.GenerateResultsDict([all_results]) +- self.assertEquals( ++ self.assertEqual( + ['test.package.TestName1', 'test.package.TestName2'], + results_dict['all_tests']) + + self.assertTrue('per_iteration_data' in results_dict) + iterations = results_dict['per_iteration_data'] +- self.assertEquals(1, len(iterations)) ++ self.assertEqual(1, len(iterations)) + + expected_tests = set([ + 'test.package.TestName1', + 'test.package.TestName2', + ]) + +- for test_name, iteration_result in iterations[0].iteritems(): ++ for test_name, iteration_result in iterations[0].items(): + self.assertTrue(test_name in expected_tests) + expected_tests.remove(test_name) +- self.assertEquals(1, len(iteration_result)) ++ self.assertEqual(1, len(iteration_result)) + + test_iteration_result = iteration_result[0] + self.assertTrue('status' in test_iteration_result) +- self.assertEquals('SUCCESS', test_iteration_result['status']) ++ self.assertEqual('SUCCESS', test_iteration_result['status']) + + def testGenerateResultsDict_passOnRetry(self): + raw_results = [] +@@ -144,28 +144,28 @@ + raw_results.append(run_results2) + + results_dict = json_results.GenerateResultsDict([raw_results]) +- self.assertEquals(['test.package.TestName1'], results_dict['all_tests']) ++ self.assertEqual(['test.package.TestName1'], results_dict['all_tests']) + + # Check that there's only one iteration. + self.assertIn('per_iteration_data', results_dict) + iterations = results_dict['per_iteration_data'] +- self.assertEquals(1, len(iterations)) ++ self.assertEqual(1, len(iterations)) + + # Check that test.package.TestName1 is the only test in the iteration. +- self.assertEquals(1, len(iterations[0])) ++ self.assertEqual(1, len(iterations[0])) + self.assertIn('test.package.TestName1', iterations[0]) + + # Check that there are two results for test.package.TestName1. + actual_test_results = iterations[0]['test.package.TestName1'] +- self.assertEquals(2, len(actual_test_results)) ++ self.assertEqual(2, len(actual_test_results)) + + # Check that the first result is a failure. + self.assertIn('status', actual_test_results[0]) +- self.assertEquals('FAILURE', actual_test_results[0]['status']) ++ self.assertEqual('FAILURE', actual_test_results[0]['status']) + + # Check that the second result is a success. + self.assertIn('status', actual_test_results[1]) +- self.assertEquals('SUCCESS', actual_test_results[1]['status']) ++ self.assertEqual('SUCCESS', actual_test_results[1]['status']) + + def testGenerateResultsDict_globalTags(self): + raw_results = [] +@@ -173,7 +173,7 @@ + + results_dict = json_results.GenerateResultsDict( + [raw_results], global_tags=global_tags) +- self.assertEquals(['UNRELIABLE_RESULTS'], results_dict['global_tags']) ++ self.assertEqual(['UNRELIABLE_RESULTS'], results_dict['global_tags']) + + def testGenerateResultsDict_loslessSnippet(self): + result = base_test_result.BaseTestResult( +@@ -185,22 +185,22 @@ + all_results.AddResult(result) + + results_dict = json_results.GenerateResultsDict([all_results]) +- self.assertEquals( ++ self.assertEqual( + ['test.package.TestName'], + results_dict['all_tests']) +- self.assertEquals(1, len(results_dict['per_iteration_data'])) ++ self.assertEqual(1, len(results_dict['per_iteration_data'])) + + iteration_result = results_dict['per_iteration_data'][0] + self.assertTrue('test.package.TestName' in iteration_result) +- self.assertEquals(1, len(iteration_result['test.package.TestName'])) ++ self.assertEqual(1, len(iteration_result['test.package.TestName'])) + + test_iteration_result = iteration_result['test.package.TestName'][0] + self.assertTrue('losless_snippet' in test_iteration_result) + self.assertTrue(test_iteration_result['losless_snippet']) + self.assertTrue('output_snippet' in test_iteration_result) +- self.assertEquals(log, test_iteration_result['output_snippet']) ++ self.assertEqual(log, test_iteration_result['output_snippet']) + self.assertTrue('output_snippet_base64' in test_iteration_result) +- self.assertEquals('', test_iteration_result['output_snippet_base64']) ++ self.assertEqual('', test_iteration_result['output_snippet_base64']) + + def testGenerateJsonTestResultFormatDict_passedResult(self): + result = base_test_result.BaseTestResult('test.package.TestName', +@@ -210,13 +210,13 @@ + all_results.AddResult(result) + + results_dict = json_results.GenerateJsonTestResultFormatDict([all_results]) +- self.assertEquals(1, len(results_dict['tests'])) +- self.assertEquals(1, len(results_dict['tests']['test'])) +- self.assertEquals(1, len(results_dict['tests']['test']['package'])) +- self.assertEquals( ++ self.assertEqual(1, len(results_dict['tests'])) ++ self.assertEqual(1, len(results_dict['tests']['test'])) ++ self.assertEqual(1, len(results_dict['tests']['test']['package'])) ++ self.assertEqual( + 'PASS', + results_dict['tests']['test']['package']['TestName']['expected']) +- self.assertEquals( ++ self.assertEqual( + 'PASS', results_dict['tests']['test']['package']['TestName']['actual']) + + def testGenerateJsonTestResultFormatDict_failedResult(self): +@@ -227,13 +227,13 @@ + all_results.AddResult(result) + + results_dict = json_results.GenerateJsonTestResultFormatDict([all_results]) +- self.assertEquals(1, len(results_dict['tests'])) +- self.assertEquals(1, len(results_dict['tests']['test'])) +- self.assertEquals(1, len(results_dict['tests']['test']['package'])) +- self.assertEquals( ++ self.assertEqual(1, len(results_dict['tests'])) ++ self.assertEqual(1, len(results_dict['tests']['test'])) ++ self.assertEqual(1, len(results_dict['tests']['test']['package'])) ++ self.assertEqual( + 'PASS', + results_dict['tests']['test']['package']['TestName']['expected']) +- self.assertEquals( ++ self.assertEqual( + 'FAIL', results_dict['tests']['test']['package']['TestName']['actual']) + + +--- a/src/3rdparty/chromium/build/android/pylib/results/report_results.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/results/report_results.py 2025-01-16 02:26:08.519847426 +0800 +@@ -4,7 +4,7 @@ + + """Module containing utility functions for reporting results.""" + +-from __future__ import print_function ++ + + import logging + import os +--- a/src/3rdparty/chromium/build/android/pylib/results/flakiness_dashboard/json_results_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/results/flakiness_dashboard/json_results_generator.py 2025-01-16 02:26:08.519847426 +0800 +@@ -13,7 +13,7 @@ + import mimetypes + import os + import time +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + _log = logging.getLogger(__name__) + +@@ -44,7 +44,7 @@ + def ConvertTrieToFlatPaths(trie, prefix=None): + """Flattens the trie of paths, prepending a prefix to each.""" + result = {} +- for name, data in trie.iteritems(): ++ for name, data in trie.items(): + if prefix: + name = prefix + '/' + name + +@@ -95,7 +95,7 @@ + """A simple class that represents a single test result.""" + + # Test modifier constants. +- (NONE, FAILS, FLAKY, DISABLED) = range(4) ++ (NONE, FAILS, FLAKY, DISABLED) = list(range(4)) + + def __init__(self, test, failed=False, elapsed_time=0): + self.test_name = test +@@ -195,7 +195,7 @@ + self._results_directory = results_file_base_path + + self._test_results_map = test_results_map +- self._test_results = test_results_map.values() ++ self._test_results = list(test_results_map.values()) + + self._svn_repositories = svn_repositories + if not self._svn_repositories: +@@ -217,7 +217,7 @@ + WriteJSON(json_object, file_path) + + def GenerateTimesMSFile(self): +- times = TestTimingsTrie(self._test_results_map.values()) ++ times = TestTimingsTrie(list(self._test_results_map.values())) + file_path = os.path.join(self._results_directory, self.TIMES_MS_FILENAME) + WriteJSON(times, file_path) + +@@ -326,7 +326,7 @@ + return self.__class__.NO_DATA_RESULT + + test_result = self._test_results_map[test_name] +- if test_result.modifier in self.MODIFIER_TO_CHAR.keys(): ++ if test_result.modifier in list(self.MODIFIER_TO_CHAR.keys()): + return self.MODIFIER_TO_CHAR[test_result.modifier] + + return self.__class__.PASS_RESULT +@@ -374,23 +374,23 @@ + return {}, None + + results_file_url = (self.URL_FOR_TEST_LIST_JSON % +- (urllib2.quote(self._test_results_server), +- urllib2.quote(self._builder_name), ++ (urllib.parse.quote(self._test_results_server), ++ urllib.parse.quote(self._builder_name), + self.RESULTS_FILENAME, +- urllib2.quote(self._test_type), +- urllib2.quote(self._master_name))) ++ urllib.parse.quote(self._test_type), ++ urllib.parse.quote(self._master_name))) + + # pylint: disable=redefined-variable-type + try: + # FIXME: We should talk to the network via a Host object. +- results_file = urllib2.urlopen(results_file_url) ++ results_file = urllib.request.urlopen(results_file_url) + old_results = results_file.read() +- except urllib2.HTTPError as http_error: ++ except urllib.error.HTTPError as http_error: + # A non-4xx status code means the bot is hosed for some reason + # and we can't grab the results.json file off of it. + if http_error.code < 400 and http_error.code >= 500: + error = http_error +- except urllib2.URLError as url_error: ++ except urllib.error.URLError as url_error: + error = url_error + # pylint: enable=redefined-variable-type + +@@ -426,7 +426,7 @@ + + # Create a test modifiers (FAILS, FLAKY etc) summary dictionary. + entry = {} +- for test_name in self._test_results_map.iterkeys(): ++ for test_name in self._test_results_map.keys(): + result_char = self._GetModifierChar(test_name) + entry[result_char] = entry.get(result_char, 0) + 1 + +@@ -543,7 +543,7 @@ + + # version 3->4 + if archive_version == 3: +- for results in results_json.values(): ++ for results in list(results_json.values()): + self._ConvertTestsToTrie(results) + + results_json[self.VERSION_KEY] = self.VERSION +@@ -554,7 +554,7 @@ + + test_results = results[self.TESTS] + test_results_trie = {} +- for test in test_results.iterkeys(): ++ for test in test_results.keys(): + single_test_result = test_results[test] + AddPathToTrie(test, single_test_result, test_results_trie) + +@@ -642,10 +642,10 @@ + end = start + self._timeout_seconds + while time.time() < end: + try: +- request = urllib2.Request(self._url, data, ++ request = urllib.request.Request(self._url, data, + {'Content-Type': content_type}) +- return urllib2.urlopen(request) +- except urllib2.HTTPError as e: ++ return urllib.request.urlopen(request) ++ except urllib.error.HTTPError as e: + _log.warn("Received HTTP status %s loading \"%s\". " + 'Retrying in 10 seconds...', e.code, e.filename) + time.sleep(10) +@@ -678,7 +678,7 @@ + lines.append('--' + BOUNDARY) + lines.append('Content-Disposition: form-data; name="%s"' % key) + lines.append('') +- if isinstance(value, unicode): ++ if isinstance(value, str): + value = value.encode('utf-8') + lines.append(value) + +@@ -688,7 +688,7 @@ + 'filename="%s"' % (key, filename)) + lines.append('Content-Type: %s' % _GetMIMEType(filename)) + lines.append('') +- if isinstance(value, unicode): ++ if isinstance(value, str): + value = value.encode('utf-8') + lines.append(value) + +--- a/src/3rdparty/chromium/build/android/pylib/results/flakiness_dashboard/json_results_generator_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/results/flakiness_dashboard/json_results_generator_unittest.py 2025-01-16 02:26:08.519847426 +0800 +@@ -114,7 +114,7 @@ + if tests_set or DISABLED_count: + fixable = {} + for fixable_items in buildinfo[JRG.FIXABLE]: +- for (result_type, count) in fixable_items.iteritems(): ++ for (result_type, count) in fixable_items.items(): + if result_type in fixable: + fixable[result_type] = fixable[result_type] + count + else: +@@ -138,7 +138,7 @@ + + if failed_count_map: + tests = buildinfo[JRG.TESTS] +- for test_name in failed_count_map.iterkeys(): ++ for test_name in failed_count_map.keys(): + test = self._FindTestInTrie(test_name, tests) + + failed = 0 +--- a/src/3rdparty/chromium/build/android/pylib/results/presentation/standard_gtest_merge.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/results/presentation/standard_gtest_merge.py 2025-01-16 02:26:08.519847426 +0800 +@@ -4,7 +4,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + import json +@@ -43,17 +43,17 @@ + # client/swarming.py, which means the state enum is saved in its string + # name form, not in the number form. + state = result.get('state') +- if state == u'BOT_DIED': ++ if state == 'BOT_DIED': + print( + 'Shard #%d had a Swarming internal failure' % index, file=sys.stderr) +- elif state == u'EXPIRED': ++ elif state == 'EXPIRED': + print('There wasn\'t enough capacity to run your test', file=sys.stderr) +- elif state == u'TIMED_OUT': ++ elif state == 'TIMED_OUT': + print('Test runtime exceeded allocated time' + 'Either it ran for too long (hard timeout) or it didn\'t produce ' + 'I/O for an extended period of time (I/O timeout)', + file=sys.stderr) +- elif state != u'COMPLETED': ++ elif state != 'COMPLETED': + print('Invalid Swarming task state: %s' % state, file=sys.stderr) + + json_data, err_msg = load_shard_json(index, result.get('task_id'), +@@ -138,7 +138,7 @@ + def merge_list_of_dicts(left, right): + """Merges dicts left[0] with right[0], left[1] with right[1], etc.""" + output = [] +- for i in xrange(max(len(left), len(right))): ++ for i in range(max(len(left), len(right))): + left_dict = left[i] if i < len(left) else {} + right_dict = right[i] if i < len(right) else {} + merged_dict = left_dict.copy() +--- a/src/3rdparty/chromium/build/android/pylib/results/presentation/test_results_presentation.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/results/presentation/test_results_presentation.py 2025-01-16 02:26:08.519847426 +0800 +@@ -4,7 +4,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + import collections +@@ -14,7 +14,7 @@ + import tempfile + import os + import sys +-import urllib ++import urllib.request, urllib.parse, urllib.error + + + CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) +@@ -104,7 +104,7 @@ + + + def flakiness_dashbord_link(test_name, suite_name): +- url_args = urllib.urlencode([ ++ url_args = urllib.parse.urlencode([ + ('testType', suite_name), + ('tests', test_name)]) + return ('https://test-results.appspot.com/' +@@ -156,7 +156,7 @@ + ] + + test_row_blocks = [] +- for test_name, test_results in results_dict.iteritems(): ++ for test_name, test_results in results_dict.items(): + test_runs = [] + for index, result in enumerate(test_results): + if index == 0: +@@ -215,7 +215,7 @@ + ] + + suite_row_dict = {} +- for test_name, test_results in results_dict.iteritems(): ++ for test_name, test_results in results_dict.items(): + # TODO(mikecase): This logic doesn't work if there are multiple test runs. + # That is, if 'per_iteration_data' has multiple entries. + # Since we only care about the result of the last test run. +@@ -253,7 +253,7 @@ + suite_row[TIME_INDEX]['data'] += result['elapsed_time_ms'] + footer_row[TIME_INDEX]['data'] += result['elapsed_time_ms'] + +- for suite in suite_row_dict.values(): ++ for suite in list(suite_row_dict.values()): + if suite[FAIL_COUNT_INDEX]['data'] > 0: + suite[FAIL_COUNT_INDEX]['class'] += ' failure' + else: +@@ -265,7 +265,7 @@ + footer_row[FAIL_COUNT_INDEX]['class'] += ' success' + + return (header_row, +- [[suite_row] for suite_row in suite_row_dict.values()], ++ [[suite_row] for suite_row in list(suite_row_dict.values())], + footer_row) + + +@@ -278,7 +278,7 @@ + ] + if result_details_link: + url_args.append(('comment', 'Please check out: %s' % result_details_link)) +- url_args = urllib.urlencode(url_args) ++ url_args = urllib.parse.urlencode(url_args) + # pylint: enable=redefined-variable-type + return 'https://bugs.chromium.org/p/chromium/issues/entry?%s' % url_args + +@@ -349,7 +349,7 @@ + + results_dict = collections.defaultdict(list) + for testsuite_run in json_object['per_iteration_data']: +- for test, test_runs in testsuite_run.iteritems(): ++ for test, test_runs in testsuite_run.items(): + results_dict[test].extend(test_runs) + return results_to_html(results_dict, cs_base_url, bucket, test_name, + builder_name, build_number, local_output) +@@ -376,12 +376,12 @@ + ui_screenshots = [] + # pylint: disable=too-many-nested-blocks + for testsuite_run in json_object['per_iteration_data']: +- for _, test_runs in testsuite_run.iteritems(): ++ for _, test_runs in testsuite_run.items(): + for test_run in test_runs: + if 'ui screenshot' in test_run['links']: + screenshot_link = test_run['links']['ui screenshot'] + if screenshot_link.startswith('file:'): +- with contextlib.closing(urllib.urlopen(screenshot_link)) as f: ++ with contextlib.closing(urllib.request.urlopen(screenshot_link)) as f: + test_screenshots = json.load(f) + else: + # Assume anything that isn't a file link is a google storage link +@@ -518,7 +518,7 @@ + + if ui_screenshot_set_link: + ui_catalog_url = 'https://chrome-ui-catalog.appspot.com/' +- ui_catalog_query = urllib.urlencode( ++ ui_catalog_query = urllib.parse.urlencode( + {'screenshot_source': ui_screenshot_set_link}) + ui_screenshot_link = '%s?%s' % (ui_catalog_url, ui_catalog_query) + +--- a/src/3rdparty/chromium/build/android/pylib/symbols/apk_lib_dump.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/symbols/apk_lib_dump.py 2025-01-16 02:26:08.519847426 +0800 +@@ -22,7 +22,7 @@ + (0x, 0x, 0x, ), + """ + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/build/android/pylib/symbols/apk_native_libs_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/symbols/apk_native_libs_unittest.py 2025-01-16 02:26:08.519847426 +0800 +@@ -130,12 +130,12 @@ + 'foo.txt': (1024, 1024, 'FooFooFoo'), + 'lib/bar/libcode.so': (16000, 3240, 1024, '\x7fELFFFFFFFFFFFF'), + } +- for path, props in _ENTRIES.iteritems(): ++ for path, props in _ENTRIES.items(): + reader.AddTestEntry(path, props[0], props[1], props[2]) + + entries = reader.ListEntries() + self.assertEqual(len(entries), len(_ENTRIES)) +- for path, props in _ENTRIES.iteritems(): ++ for path, props in _ENTRIES.items(): + entry = reader.FindEntry(path) + self.assertEqual(entry.filename, path) + self.assertEqual(entry.file_size, props[0]) +--- a/src/3rdparty/chromium/build/android/pylib/symbols/deobfuscator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/symbols/deobfuscator.py 2025-01-16 02:26:08.519847426 +0800 +@@ -139,7 +139,7 @@ + # out/Release/apks/ChromePublic.apk.mapping + def __init__(self, mapping_path, pool_size=4): + self._mapping_path = mapping_path +- self._pool = [Deobfuscator(mapping_path) for _ in xrange(pool_size)] ++ self._pool = [Deobfuscator(mapping_path) for _ in range(pool_size)] + # Allow only one thread to select from the pool at a time. + self._lock = threading.Lock() + self._num_restarts = 0 +--- a/src/3rdparty/chromium/build/android/pylib/symbols/elf_symbolizer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/symbols/elf_symbolizer.py 2025-01-16 02:26:08.519847426 +0800 +@@ -8,7 +8,7 @@ + import multiprocessing + import os + import posixpath +-import Queue ++import queue + import re + import subprocess + import sys +@@ -293,7 +293,7 @@ + + try: + lines = self._out_queue.get(block=True, timeout=0.25) +- except Queue.Empty: ++ except queue.Empty: + # On timeout (1/4 s.) repeat the inner loop and check if either the + # addr2line process did crash or we waited its output for too long. + continue +@@ -314,7 +314,7 @@ + while True: + try: + lines = self._out_queue.get_nowait() +- except Queue.Empty: ++ except queue.Empty: + break + self._ProcessSymbolOutput(lines) + +@@ -405,7 +405,7 @@ + # The only reason of existence of this Queue (and the corresponding + # Thread below) is the lack of a subprocess.stdout.poll_avail_lines(). + # Essentially this is a pipe able to extract a couple of lines atomically. +- self._out_queue = Queue.Queue() ++ self._out_queue = queue.Queue() + + # Start the underlying addr2line process in line buffered mode. + +--- a/src/3rdparty/chromium/build/android/pylib/symbols/elf_symbolizer_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/symbols/elf_symbolizer_unittest.py 2025-01-16 02:26:08.519847426 +0800 +@@ -55,7 +55,7 @@ + inlines=True, + max_concurrent_jobs=4) + +- for addr in xrange(1000): ++ for addr in range(1000): + exp_inline = False + exp_unknown = False + +@@ -150,7 +150,7 @@ + max_concurrent_jobs=max_concurrent_jobs, + addr2line_timeout=0.5) + +- for addr in xrange(num_symbols): ++ for addr in range(num_symbols): + exp_name = 'mock_sym_for_addr_%d' % addr + exp_source_path = 'mock_src/mock_lib1.so.c' + exp_source_line = addr +@@ -160,7 +160,7 @@ + symbolizer.Join() + + # Check that all the expected callbacks have been received. +- for addr in xrange(num_symbols): ++ for addr in range(num_symbols): + self.assertIn(addr, self._resolved_addresses) + self._resolved_addresses.remove(addr) + +--- a/src/3rdparty/chromium/build/android/pylib/symbols/symbol_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/symbols/symbol_utils.py 2025-01-16 02:26:08.519847426 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import bisect + import collections +@@ -346,7 +346,7 @@ + offset) + libraries_map[lib_path].add(lib_offset) + +- for lib_path, lib_offsets in libraries_map.iteritems(): ++ for lib_path, lib_offsets in libraries_map.items(): + self.AddLibraryOffsets(lib_path, lib_offsets) + + def FindSymbolInfo(self, device_path, offset): +--- a/src/3rdparty/chromium/build/android/pylib/symbols/symbol_utils_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/symbols/symbol_utils_unittest.py 2025-01-16 02:26:08.519847426 +0800 +@@ -772,7 +772,7 @@ + addr2line_path_for_tests=_MOCK_A2L_PATH) + resolver.SetAndroidAbi('ignored-abi') + +- for addr, expected_sym in _TEST_SYMBOL_DATA.iteritems(): ++ for addr, expected_sym in _TEST_SYMBOL_DATA.items(): + self.assertEqual(resolver.FindSymbolInfo('/some/path/libmock1.so', addr), + expected_sym) + +@@ -781,11 +781,11 @@ + addr2line_path_for_tests=_MOCK_A2L_PATH) + resolver.SetAndroidAbi('ignored-abi') + resolver.AddLibraryOffsets('/some/path/libmock1.so', +- _TEST_SYMBOL_DATA.keys()) ++ list(_TEST_SYMBOL_DATA.keys())) + + resolver.DisallowSymbolizerForTesting() + +- for addr, expected_sym in _TEST_SYMBOL_DATA.iteritems(): ++ for addr, expected_sym in _TEST_SYMBOL_DATA.items(): + sym_info = resolver.FindSymbolInfo('/some/path/libmock1.so', addr) + self.assertIsNotNone(sym_info, 'None symbol info for addr %x' % addr) + self.assertEqual( +@@ -915,7 +915,7 @@ + input_backtrace = _EXPECTED_BACKTRACE.splitlines() + expected_lib_offsets_map = _EXPECTED_BACKTRACE_OFFSETS_MAP + offset_map = backtrace_translator.FindLibraryOffsets(input_backtrace) +- for lib_path, offsets in offset_map.iteritems(): ++ for lib_path, offsets in offset_map.items(): + self.assertTrue(lib_path in expected_lib_offsets_map, + '%s is not in expected library-offsets map!' % lib_path) + sorted_offsets = sorted(offsets) +--- a/src/3rdparty/chromium/build/android/pylib/utils/argparse_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/utils/argparse_utils.py 2025-01-16 02:26:08.519847426 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + +--- a/src/3rdparty/chromium/build/android/pylib/utils/chrome_proxy_utils_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/utils/chrome_proxy_utils_test.py 2025-01-16 02:26:08.519847426 +0800 +@@ -91,7 +91,7 @@ + wpr_mock.assert_called_once_with() + ts_proxy_mock.assert_called_once_with() + self.assertFalse(chrome_proxy.wpr_replay_mode) +- self.assertEquals(chrome_proxy.wpr_archive_path, os.path.abspath(__file__)) ++ self.assertEqual(chrome_proxy.wpr_archive_path, os.path.abspath(__file__)) + + def test_SetWPRRecordMode(self): + chrome_proxy = chrome_proxy_utils.ChromeProxySession(4) +@@ -108,7 +108,7 @@ + def test_SetWPRArchivePath(self): + chrome_proxy = chrome_proxy_utils.ChromeProxySession(4) + chrome_proxy._wpr_server._archive_path = 'abc' +- self.assertEquals(chrome_proxy.wpr_archive_path, 'abc') ++ self.assertEqual(chrome_proxy.wpr_archive_path, 'abc') + + def test_UseDefaultDeviceProxyPort(self): + chrome_proxy = chrome_proxy_utils.ChromeProxySession() +@@ -117,7 +117,7 @@ + 'PhrPvGIaAMmd29hj8BCZOq096yj7uMpRNHpn5PDxI6I=', + '--proxy-server=socks5://localhost:1080' + ] +- self.assertEquals(chrome_proxy.device_proxy_port, 1080) ++ self.assertEqual(chrome_proxy.device_proxy_port, 1080) + self.assertListEqual(chrome_proxy.GetFlags(), expected_flags) + + def test_UseNewDeviceProxyPort(self): +@@ -127,7 +127,7 @@ + 'PhrPvGIaAMmd29hj8BCZOq096yj7uMpRNHpn5PDxI6I=', + '--proxy-server=socks5://localhost:1' + ] +- self.assertEquals(chrome_proxy.device_proxy_port, 1) ++ self.assertEqual(chrome_proxy.device_proxy_port, 1) + self.assertListEqual(chrome_proxy.GetFlags(), expected_flags) + + +--- a/src/3rdparty/chromium/build/android/pylib/utils/decorators_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/utils/decorators_test.py 2025-01-16 02:26:08.519847426 +0800 +@@ -35,8 +35,8 @@ + def doesNotRaiseException(): + return 999 + +- self.assertEquals(raiseException(), 111) +- self.assertEquals(doesNotRaiseException(), 999) ++ self.assertEqual(raiseException(), 111) ++ self.assertEqual(doesNotRaiseException(), 999) + + + class MemoizeDecoratorTest(unittest.TestCase): +@@ -79,13 +79,13 @@ + return notMemoized.count + notMemoized.count = 0 + +- self.assertEquals(memoized(), 1) +- self.assertEquals(memoized(), 1) +- self.assertEquals(memoized(), 1) +- +- self.assertEquals(notMemoized(), 1) +- self.assertEquals(notMemoized(), 2) +- self.assertEquals(notMemoized(), 3) ++ self.assertEqual(memoized(), 1) ++ self.assertEqual(memoized(), 1) ++ self.assertEqual(memoized(), 1) ++ ++ self.assertEqual(notMemoized(), 1) ++ self.assertEqual(notMemoized(), 2) ++ self.assertEqual(notMemoized(), 3) + + def testFunctionMemoizedBasedOnArgs(self): + """Tests that |Memoize| caches results based on args and kwargs.""" +@@ -94,10 +94,10 @@ + def returnValueBasedOnArgsKwargs(a, k=0): + return a + k + +- self.assertEquals(returnValueBasedOnArgsKwargs(1, 1), 2) +- self.assertEquals(returnValueBasedOnArgsKwargs(1, 2), 3) +- self.assertEquals(returnValueBasedOnArgsKwargs(2, 1), 3) +- self.assertEquals(returnValueBasedOnArgsKwargs(3, 3), 6) ++ self.assertEqual(returnValueBasedOnArgsKwargs(1, 1), 2) ++ self.assertEqual(returnValueBasedOnArgsKwargs(1, 2), 3) ++ self.assertEqual(returnValueBasedOnArgsKwargs(2, 1), 3) ++ self.assertEqual(returnValueBasedOnArgsKwargs(3, 3), 6) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/build/android/pylib/utils/device_dependencies_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/utils/device_dependencies_test.py 2025-01-16 02:26:08.519847426 +0800 +@@ -16,7 +16,7 @@ + test_path = os.path.join(constants.DIR_SOURCE_ROOT, 'foo', 'bar', 'baz.txt') + output_directory = os.path.join( + constants.DIR_SOURCE_ROOT, 'out-foo', 'Release') +- self.assertEquals( ++ self.assertEqual( + [None, 'foo', 'bar', 'baz.txt'], + device_dependencies.DevicePathComponentsFor( + test_path, output_directory)) +@@ -26,7 +26,7 @@ + 'icudtl.dat') + output_directory = os.path.join( + constants.DIR_SOURCE_ROOT, 'out-foo', 'Release') +- self.assertEquals( ++ self.assertEqual( + [None, 'icudtl.dat'], + device_dependencies.DevicePathComponentsFor( + test_path, output_directory)) +@@ -36,7 +36,7 @@ + 'test_dir', 'icudtl.dat') + output_directory = os.path.join( + constants.DIR_SOURCE_ROOT, 'out-foo', 'Release') +- self.assertEquals( ++ self.assertEqual( + [None, 'test_dir', 'icudtl.dat'], + device_dependencies.DevicePathComponentsFor( + test_path, output_directory)) +@@ -46,7 +46,7 @@ + 'foo.pak') + output_directory = os.path.join( + constants.DIR_SOURCE_ROOT, 'out-foo', 'Release') +- self.assertEquals( ++ self.assertEqual( + [None, 'paks', 'foo.pak'], + device_dependencies.DevicePathComponentsFor( + test_path, output_directory)) +--- a/src/3rdparty/chromium/build/android/pylib/utils/dexdump.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/utils/dexdump.py 2025-01-16 02:26:08.519847426 +0800 +@@ -48,10 +48,10 @@ + # re-encode it (as etree expects a byte string as input so it can figure + # out the encoding itself from the XML declaration) + BAD_XML_CHARS = re.compile( +- u'[\x00-\x08\x0b-\x0c\x0e-\x1f\x7f-\x84\x86-\x9f' + +- u'\ud800-\udfff\ufdd0-\ufddf\ufffe-\uffff]') ++ '[\x00-\x08\x0b-\x0c\x0e-\x1f\x7f-\x84\x86-\x9f' + ++ '\ud800-\udfff\ufdd0-\ufddf\ufffe-\uffff]') + decoded_xml = output_xml.decode('utf-8', 'replace') +- clean_xml = BAD_XML_CHARS.sub(u'\ufffd', decoded_xml) ++ clean_xml = BAD_XML_CHARS.sub('\ufffd', decoded_xml) + parsed_dex_files.append( + _ParseRootNode(ElementTree.fromstring(clean_xml.encode('utf-8')))) + return parsed_dex_files +--- a/src/3rdparty/chromium/build/android/pylib/utils/dexdump_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/utils/dexdump_test.py 2025-01-16 02:26:08.520930740 +0800 +@@ -89,7 +89,7 @@ + 'com.foo.bar2' : {'classes': {}}, + 'com.foo.bar3' : {'classes': {}}, + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testParsePackageNode(self): + example_xml_string = ( +@@ -116,7 +116,7 @@ + }, + }, + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testParseClassNode(self): + example_xml_string = ( +@@ -134,7 +134,7 @@ + 'methods': ['method1', 'method2'], + 'superclass': 'java.lang.Object', + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/build/android/pylib/utils/google_storage_helper.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/utils/google_storage_helper.py 2025-01-16 02:26:08.520930740 +0800 +@@ -13,7 +13,7 @@ + import os + import sys + import time +-import urlparse ++import urllib.parse + + from pylib.constants import host_paths + from pylib.utils import decorators +@@ -67,7 +67,7 @@ + def read_from_link(link): + # Note that urlparse returns the path with an initial '/', so we only need to + # add one more after the 'gs;' +- gs_path = 'gs:/%s' % urlparse.urlparse(link).path ++ gs_path = 'gs:/%s' % urllib.parse.urlparse(link).path + cmd = [_GSUTIL_PATH, '-q', 'cat', gs_path] + return cmd_helper.GetCmdOutput(cmd) + +--- a/src/3rdparty/chromium/build/android/pylib/utils/logging_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/utils/logging_utils.py 2025-01-16 02:26:08.520930740 +0800 +@@ -110,7 +110,7 @@ + try: + yield + finally: +- for formatter, prev_color in prev_colors.iteritems(): ++ for formatter, prev_color in prev_colors.items(): + formatter.color_map[level] = prev_color + + +--- a/src/3rdparty/chromium/build/android/pylib/utils/proguard_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/utils/proguard_test.py 2025-01-16 02:26:08.520930740 +0800 +@@ -26,7 +26,7 @@ + } + ] + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testMethod(self): + actual = proguard.Parse( +@@ -48,7 +48,7 @@ + } + ] + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testClassAnnotation(self): + actual = proguard.Parse( +@@ -77,7 +77,7 @@ + } + ] + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testClassAnnotationWithArrays(self): + actual = proguard.Parse( +@@ -109,7 +109,7 @@ + } + ] + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testNestedClassAnnotations(self): + actual = proguard.Parse( +@@ -157,7 +157,7 @@ + } + ] + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testClassArraysOfAnnotations(self): + actual = proguard.Parse( +@@ -216,7 +216,7 @@ + } + ] + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testReadFullClassFileAttributes(self): + actual = proguard.Parse( +@@ -248,7 +248,7 @@ + } + ] + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testMethodAnnotation(self): + actual = proguard.Parse( +@@ -283,7 +283,7 @@ + } + ] + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testMethodAnnotationWithArrays(self): + actual = proguard.Parse( +@@ -321,7 +321,7 @@ + } + ] + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testMethodAnnotationWithPrimitivesAndArrays(self): + actual = proguard.Parse( +@@ -369,7 +369,7 @@ + } + ] + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testNestedMethodAnnotations(self): + actual = proguard.Parse( +@@ -423,7 +423,7 @@ + } + ] + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testMethodArraysOfAnnotations(self): + actual = proguard.Parse( +@@ -488,7 +488,7 @@ + } + ] + } +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/build/android/pylib/utils/shared_preference_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/utils/shared_preference_utils.py 2025-01-16 02:26:08.520930740 +0800 +@@ -20,10 +20,10 @@ + """ + if isinstance(data, dict): + return {UnicodeToStr(key): UnicodeToStr(value) +- for key, value in data.iteritems()} ++ for key, value in data.items()} + elif isinstance(data, list): + return [UnicodeToStr(element) for element in data] +- elif isinstance(data, unicode): ++ elif isinstance(data, str): + return data.encode('utf-8') + return data + +@@ -80,12 +80,12 @@ + shared_pref.Remove(key) + except KeyError: + logging.warning("Attempted to remove non-existent key %s", key) +- for key, value in setting.get('set', {}).iteritems(): ++ for key, value in setting.get('set', {}).items(): + if isinstance(value, bool): + shared_pref.SetBoolean(key, value) +- elif isinstance(value, basestring): ++ elif isinstance(value, str): + shared_pref.SetString(key, value) +- elif isinstance(value, long) or isinstance(value, int): ++ elif isinstance(value, int) or isinstance(value, int): + shared_pref.SetLong(key, value) + elif isinstance(value, list): + shared_pref.SetStringSet(key, value) +--- a/src/3rdparty/chromium/build/android/pylib/utils/test_filter_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/pylib/utils/test_filter_test.py 2025-01-16 02:26:08.520930740 +0800 +@@ -22,7 +22,7 @@ + ] + actual = test_filter.ParseFilterFile(input_lines) + expected = ['positive1', 'positive2', 'positive3'], [] +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testParseFilterFile_onlyPositive(self): + input_lines = [ +@@ -31,7 +31,7 @@ + ] + actual = test_filter.ParseFilterFile(input_lines) + expected = ['positive1', 'positive2'], [] +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testParseFilterFile_onlyNegative(self): + input_lines = [ +@@ -40,7 +40,7 @@ + ] + actual = test_filter.ParseFilterFile(input_lines) + expected = [], ['negative1', 'negative2'] +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testParseFilterFile_positiveAndNegative(self): + input_lines = [ +@@ -51,7 +51,7 @@ + ] + actual = test_filter.ParseFilterFile(input_lines) + expected = ['positive1', 'positive2'], ['negative1', 'negative2'] +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + + class InitializeFilterFromArgsTest(unittest.TestCase): +@@ -64,7 +64,7 @@ + 'FooTest.testFoo:BarTest.testBar']) + expected = 'FooTest.testFoo:BarTest.testBar' + actual = test_filter.InitializeFilterFromArgs(args) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + + def testInitializeJavaStyleFilter(self): + parser = argparse.ArgumentParser() +@@ -74,7 +74,7 @@ + 'FooTest#testFoo:BarTest#testBar']) + expected = 'FooTest.testFoo:BarTest.testBar' + actual = test_filter.InitializeFilterFromArgs(args) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + + def testInitializeBasicIsolatedScript(self): + parser = argparse.ArgumentParser() +@@ -84,7 +84,7 @@ + 'FooTest.testFoo::BarTest.testBar']) + expected = 'FooTest.testFoo:BarTest.testBar' + actual = test_filter.InitializeFilterFromArgs(args) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + + def testFilterArgWithPositiveFilterInFilterFile(self): + parser = argparse.ArgumentParser() +@@ -98,7 +98,7 @@ + tmp_file.name]) + expected = 'positive1:positive2-negative1:negative2:negative3' + actual = test_filter.InitializeFilterFromArgs(args) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + + def testFilterFileWithPositiveFilterInFilterArg(self): + parser = argparse.ArgumentParser() +@@ -113,7 +113,7 @@ + tmp_file.name]) + expected = 'positive1:positive2-negative1:negative2:negative3' + actual = test_filter.InitializeFilterFromArgs(args) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + + def testPositiveFilterInBothFileAndArg(self): + parser = argparse.ArgumentParser() +@@ -141,59 +141,59 @@ + tmp_file.name]) + expected = '-negative1:negative2:negative3:negative4' + actual = test_filter.InitializeFilterFromArgs(args) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + + + class AppendPatternsToFilter(unittest.TestCase): + def testAllEmpty(self): + expected = '' + actual = test_filter.AppendPatternsToFilter('', [], []) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + def testAppendOnlyPositiveToEmptyFilter(self): + expected = 'positive' + actual = test_filter.AppendPatternsToFilter('', ['positive']) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + def testAppendOnlyNegativeToEmptyFilter(self): + expected = '-negative' + actual = test_filter.AppendPatternsToFilter('', + negative_patterns=['negative']) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + def testAppendToEmptyFilter(self): + expected = 'positive-negative' + actual = test_filter.AppendPatternsToFilter('', ['positive'], ['negative']) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + def testAppendToPositiveOnlyFilter(self): + expected = 'positive1:positive2-negative' + actual = test_filter.AppendPatternsToFilter('positive1', ['positive2'], + ['negative']) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + def testAppendToNegativeOnlyFilter(self): + expected = 'positive-negative1:negative2' + actual = test_filter.AppendPatternsToFilter('-negative1', ['positive'], + ['negative2']) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + def testAppendPositiveToFilter(self): + expected = 'positive1:positive2-negative1' + actual = test_filter.AppendPatternsToFilter('positive1-negative1', + ['positive2']) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + def testAppendNegativeToFilter(self): + expected = 'positive1-negative1:negative2' + actual = test_filter.AppendPatternsToFilter('positive1-negative1', + negative_patterns=['negative2']) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + def testAppendBothToFilter(self): + expected = 'positive1:positive2-negative1:negative2' + actual = test_filter.AppendPatternsToFilter('positive1-negative1', + positive_patterns=['positive2'], + negative_patterns=['negative2']) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + def testAppendMultipleToFilter(self): + expected = 'positive1:positive2:positive3-negative1:negative2:negative3' + actual = test_filter.AppendPatternsToFilter('positive1-negative1', + ['positive2', 'positive3'], + ['negative2', 'negative3']) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + def testRepeatedAppendToFilter(self): + expected = 'positive1:positive2:positive3-negative1:negative2:negative3' + filter_string = test_filter.AppendPatternsToFilter('positive1-negative1', +@@ -201,32 +201,32 @@ + ['negative2']) + actual = test_filter.AppendPatternsToFilter(filter_string, ['positive3'], + ['negative3']) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + def testAppendHashSeparatedPatternsToFilter(self): + expected = 'positive.test1:positive.test2-negative.test1:negative.test2' + actual = test_filter.AppendPatternsToFilter('positive#test1-negative#test1', + ['positive#test2'], + ['negative#test2']) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + + + class HasPositivePatterns(unittest.TestCase): + def testEmpty(self): + expected = False + actual = test_filter.HasPositivePatterns('') +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + def testHasOnlyPositive(self): + expected = True + actual = test_filter.HasPositivePatterns('positive') +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + def testHasOnlyNegative(self): + expected = False + actual = test_filter.HasPositivePatterns('-negative') +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + def testHasBoth(self): + expected = True + actual = test_filter.HasPositivePatterns('positive-negative') +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/build/android/stacktrace/stackwalker.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/android/stacktrace/stackwalker.py 2025-01-16 02:26:08.520930740 +0800 +@@ -4,7 +4,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/build/apple/tweak_info_plist.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/apple/tweak_info_plist.py 2025-01-16 02:26:08.520930740 +0800 +@@ -20,7 +20,7 @@ + # by the time the app target is done, the info.plist is correct. + # + +-from __future__ import print_function ++ + + import optparse + import os +@@ -115,7 +115,7 @@ + if len(groups) != 4 or not all(element.isdigit() for element in groups): + print('Invalid version string specified: "%s"' % version, file=sys.stderr) + return False +- values = dict(zip(('MAJOR', 'MINOR', 'BUILD', 'PATCH'), groups)) ++ values = dict(list(zip(('MAJOR', 'MINOR', 'BUILD', 'PATCH'), groups))) + + for key in version_format_for_key: + plist[key] = _GetVersion(version_format_for_key[key], values, overrides) +--- a/src/3rdparty/chromium/build/chromeos/create_test_runner_script.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/chromeos/create_test_runner_script.py 2025-01-16 02:26:08.520930740 +0800 +@@ -114,7 +114,7 @@ + vm_test_args=str(vm_test_args), + vm_test_path_args=str(vm_test_path_args))) + +- os.chmod(args.script_output_path, 0750) ++ os.chmod(args.script_output_path, 0o750) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/build/chromeos/test_runner.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/chromeos/test_runner.py 2025-01-16 02:26:08.520930740 +0800 +@@ -157,7 +157,7 @@ + logging.info('Running the following command on the device:') + logging.info('\n' + '\n'.join(script_contents)) + fd, tmp_path = tempfile.mkstemp(suffix='.sh', dir=self._path_to_outdir) +- os.fchmod(fd, 0755) ++ os.fchmod(fd, 0o755) + with os.fdopen(fd, 'wb') as f: + f.write('\n'.join(script_contents) + '\n') + return tmp_path +@@ -182,7 +182,7 @@ + + signal.signal(signal.SIGTERM, _kill_child_procs) + +- for i in xrange(self._retries + 1): ++ for i in range(self._retries + 1): + logging.info('########################################') + logging.info('Test attempt #%d', i) + logging.info('########################################') +--- a/src/3rdparty/chromium/build/config/get_host_byteorder.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/config/get_host_byteorder.py 2025-01-16 02:26:08.520930740 +0800 +@@ -5,7 +5,7 @@ + + """Get Byteorder of host architecture""" + +-from __future__ import print_function ++ + + import sys + +--- a/src/3rdparty/chromium/build/config/merge_for_jumbo.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/config/merge_for_jumbo.py 2025-01-16 02:26:08.520930740 +0800 +@@ -9,8 +9,8 @@ + + """ + +-from __future__ import print_function +-from __future__ import unicode_literals ++ ++ + + import argparse + import hashlib +@@ -22,7 +22,7 @@ + # non-overlapping ranges. The total range is inclusive of the first index + # and exclusive of the last index from the given sequence. + for start, stop in zip(boundaries, boundaries[1:]): +- yield range(start, stop) ++ yield list(range(start, stop)) + + + def generate_chunk_stops(inputs, output_count, smart_merge=True): +--- a/src/3rdparty/chromium/build/config/ios/codesign.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/config/ios/codesign.py 2025-01-16 02:26:08.522014055 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + import codecs +@@ -18,7 +18,7 @@ + import tempfile + + if sys.version_info.major < 3: +- basestring_compat = basestring ++ basestring_compat = str + else: + basestring_compat = str + +@@ -97,7 +97,7 @@ + error message. The dictionary will be empty if there are no errors. + """ + errors = {} +- for key, expected_value in expected_mappings.items(): ++ for key, expected_value in list(expected_mappings.items()): + if key in self._data: + value = self._data[key] + if value != expected_value: +@@ -181,12 +181,12 @@ + + def _ExpandVariables(self, data, substitutions): + if isinstance(data, basestring_compat): +- for key, substitution in substitutions.items(): ++ for key, substitution in list(substitutions.items()): + data = data.replace('$(%s)' % (key,), substitution) + return data + + if isinstance(data, dict): +- for key, value in data.items(): ++ for key, value in list(data.items()): + data[key] = self._ExpandVariables(value, substitutions) + return data + +@@ -197,7 +197,7 @@ + return data + + def LoadDefaults(self, defaults): +- for key, value in defaults.items(): ++ for key, value in list(defaults.items()): + if key not in self._data: + self._data[key] = value + +--- a/src/3rdparty/chromium/build/config/ios/compile_ib_files.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/config/ios/compile_ib_files.py 2025-01-16 02:26:08.522014055 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + import logging +--- a/src/3rdparty/chromium/build/config/ios/compile_xcassets_unittests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/config/ios/compile_xcassets_unittests.py 2025-01-16 02:26:08.522014055 +0800 +@@ -16,7 +16,7 @@ + } + + def testNoError(self): +- self.assertEquals( ++ self.assertEqual( + '', + compile_xcassets.FilterCompilerOutput( + '/* com.apple.actool.compilation-results */\n' +@@ -24,7 +24,7 @@ + self.relative_paths)) + + def testNoErrorRandomMessages(self): +- self.assertEquals( ++ self.assertEqual( + '', + compile_xcassets.FilterCompilerOutput( + '2017-07-04 04:59:19.460 ibtoold[23487:41214] CoreSimulator is att' +@@ -37,7 +37,7 @@ + self.relative_paths)) + + def testWarning(self): +- self.assertEquals( ++ self.assertEqual( + '/* com.apple.actool.document.warnings */\n' + '../../Chromium.xcassets:./image1.imageset/[universal][][][1x][][][][' + '][][]: warning: The file "image1.png" for the image set "image1"' +@@ -52,7 +52,7 @@ + self.relative_paths)) + + def testError(self): +- self.assertEquals( ++ self.assertEqual( + '/* com.apple.actool.errors */\n' + '../../Chromium.xcassets: error: The output directory "/Users/janedoe/' + 'chromium/src/out/Default/Chromium.app" does not exist.\n', +@@ -65,7 +65,7 @@ + self.relative_paths)) + + def testSpurious(self): +- self.assertEquals( ++ self.assertEqual( + '/* com.apple.actool.document.warnings */\n' + '../../Chromium.xcassets:./AppIcon.appiconset: warning: A 1024x1024 ap' + 'p store icon is required for iOS apps\n', +@@ -80,7 +80,7 @@ + self.relative_paths)) + + def testComplexError(self): +- self.assertEquals( ++ self.assertEqual( + '/* com.apple.actool.errors */\n' + ': error: Failed to find a suitable device for the type SimDeviceType ' + ': com.apple.dt.Xcode.IBSimDeviceType.iPad-2x with runtime SimRunt' +--- a/src/3rdparty/chromium/build/config/ios/find_signing_identity.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/config/ios/find_signing_identity.py 2025-01-16 02:26:08.522014055 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/build/config/ios/write_framework_hmap.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/config/ios/write_framework_hmap.py 2025-01-16 02:26:08.522014055 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import os + import struct +@@ -18,7 +18,7 @@ + (out, framework, all_headers) = args[1], args[2], args[3:] + + framework_name = os.path.basename(framework).split('.')[0] +- all_headers = map(os.path.abspath, all_headers) ++ all_headers = list(map(os.path.abspath, all_headers)) + filelist = {} + for header in all_headers: + filename = os.path.basename(header) +@@ -50,7 +50,7 @@ + count = len(filelist) + capacity = NextGreaterPowerOf2(count) + strings_offset = 24 + (12 * capacity) +- max_value_length = len(max(filelist.values(), key=lambda v: len(v))) ++ max_value_length = len(max(list(filelist.values()), key=lambda v: len(v))) + + out = open(output_name, 'wb') + out.write(struct.pack('= timeout: +--- a/src/3rdparty/chromium/build/fuchsia/boot_data.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/boot_data.py 2025-01-16 02:26:08.523097370 +0800 +@@ -4,7 +4,7 @@ + + """Functions used to provision Fuchsia boot images.""" + +-import common ++from . import common + import logging + import os + import subprocess +--- a/src/3rdparty/chromium/build/fuchsia/common_args.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/common_args.py 2025-01-16 02:26:08.523097370 +0800 +@@ -8,7 +8,7 @@ + import os + import sys + +-from common import GetHostArchFromPlatform ++from .common import GetHostArchFromPlatform + + + def _AddTargetSpecificationArgs(arg_parser): +--- a/src/3rdparty/chromium/build/fuchsia/deploy_to_amber_repo.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/deploy_to_amber_repo.py 2025-01-16 02:26:08.523097370 +0800 +@@ -7,7 +7,7 @@ + """Deploys Fuchsia packages to an Amber repository in a Fuchsia + build output directory.""" + +-import amber_repo ++from . import amber_repo + import argparse + import os + import sys +@@ -55,7 +55,7 @@ + fuchsia_out_dir = os.path.expanduser(args.fuchsia_out_dir.pop()) + repo = amber_repo.ExternalAmberRepo( + os.path.join(fuchsia_out_dir, 'amber-files')) +- print('Installing packages and symbols in Amber repo %s...' % repo.GetPath()) ++ print(('Installing packages and symbols in Amber repo %s...' % repo.GetPath())) + + for package in args.package: + repo.PublishPackage(package) +--- a/src/3rdparty/chromium/build/fuchsia/device_target.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/device_target.py 2025-01-16 02:26:08.523097370 +0800 +@@ -4,22 +4,22 @@ + + """Implements commands for running and interacting with Fuchsia on devices.""" + +-from __future__ import print_function + +-import amber_repo +-import boot_data ++ ++from . import amber_repo ++from . import boot_data + import filecmp + import logging + import os + import re + import subprocess + import sys +-import target ++from . import target + import tempfile + import time + import uuid + +-from common import SDK_ROOT, EnsurePathExists, GetHostToolPathFromPlatform ++from .common import SDK_ROOT, EnsurePathExists, GetHostToolPathFromPlatform + + # The maximum times to attempt mDNS resolution when connecting to a freshly + # booted Fuchsia instance before aborting. +@@ -258,7 +258,7 @@ + # Repeatdly query mDNS until we find the device, or we hit the timeout of + # DISCOVERY_TIMEOUT_SECS. + logging.info('Waiting for device to join network.') +- for _ in xrange(_BOOT_DISCOVERY_ATTEMPTS): ++ for _ in range(_BOOT_DISCOVERY_ATTEMPTS): + if self.__Discover(): + break + +--- a/src/3rdparty/chromium/build/fuchsia/emu_target.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/emu_target.py 2025-01-16 02:26:08.523097370 +0800 +@@ -4,14 +4,14 @@ + + """Implements commands for running/interacting with Fuchsia on an emulator.""" + +-import amber_repo +-import boot_data ++from . import amber_repo ++from . import boot_data + import logging + import os +-import runner_logs ++from . import runner_logs + import subprocess + import sys +-import target ++from . import target + import tempfile + + +--- a/src/3rdparty/chromium/build/fuchsia/generic_x64_target.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/generic_x64_target.py 2025-01-16 02:26:08.523097370 +0800 +@@ -4,12 +4,12 @@ + """Implements commands for running and interacting with Fuchsia generic + build on devices.""" + +-import boot_data +-import device_target ++from . import boot_data ++from . import device_target + import logging + import os + +-from common import SDK_ROOT, EnsurePathExists, \ ++from .common import SDK_ROOT, EnsurePathExists, \ + GetHostToolPathFromPlatform, SubprocessCallWithTimeout + + +@@ -83,7 +83,7 @@ + # Repeatdly query mDNS until we find the device, or we hit + # BOOT_DISCOVERY_ATTEMPTS + logging.info('Waiting for device to join network.') +- for _ in xrange(device_target.BOOT_DISCOVERY_ATTEMPTS): ++ for _ in range(device_target.BOOT_DISCOVERY_ATTEMPTS): + if self.__Discover(): + break + +--- a/src/3rdparty/chromium/build/fuchsia/net_test_server.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/net_test_server.py 2025-01-16 02:26:08.523097370 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-import common ++from . import common + import json + import logging + import os +@@ -37,7 +37,7 @@ + return self._port_mapping[host_port] + + def Unmap(self, device_port): +- for host_port, entry in self._port_mapping.iteritems(): ++ for host_port, entry in self._port_mapping.items(): + if entry == device_port: + forwarding_args = [ + '-NT', '-O', 'cancel', '-R', '0:localhost:%d' % host_port] +--- a/src/3rdparty/chromium/build/fuchsia/qemu_target.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/qemu_target.py 2025-01-16 02:26:08.523097370 +0800 +@@ -4,23 +4,23 @@ + + """Implements commands for running and interacting with Fuchsia on QEMU.""" + +-import boot_data +-import common +-import emu_target ++from . import boot_data ++from . import common ++from . import emu_target + import logging + import md5 + import os + import platform +-import qemu_image ++from . import qemu_image + import shutil + import subprocess + import sys + import tempfile + +-from common import GetHostArchFromPlatform, GetEmuRootForPlatform +-from common import EnsurePathExists +-from qemu_image import ExecQemuImgWithRetry +-from target import FuchsiaTargetException ++from .common import GetHostArchFromPlatform, GetEmuRootForPlatform ++from .common import EnsurePathExists ++from .qemu_image import ExecQemuImgWithRetry ++from .target import FuchsiaTargetException + + + # Virtual networking configuration data for QEMU. +--- a/src/3rdparty/chromium/build/fuchsia/qemu_target_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/qemu_target_test.py 2025-01-16 02:26:08.523097370 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-import qemu_target ++from . import qemu_target + import shutil + import subprocess + import tempfile +--- a/src/3rdparty/chromium/build/fuchsia/remote_cmd.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/remote_cmd.py 2025-01-16 02:26:08.523097370 +0800 +@@ -7,7 +7,7 @@ + import subprocess + import threading + +-from common import SubprocessCallWithTimeout ++from .common import SubprocessCallWithTimeout + + _SSH = ['ssh'] + _SCP = ['scp', '-C'] # Use gzip compression. +--- a/src/3rdparty/chromium/build/fuchsia/run_package.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/run_package.py 2025-01-16 02:26:08.523097370 +0800 +@@ -5,9 +5,9 @@ + """Contains a helper function for deploying and executing a packaged + executable on a Target.""" + +-from __future__ import print_function + +-import common ++ ++from . import common + import hashlib + import logging + import multiprocessing +@@ -19,7 +19,7 @@ + import threading + import uuid + +-from symbolizer import BuildIdsPaths, RunSymbolizer, SymbolizerFilter ++from .symbolizer import BuildIdsPaths, RunSymbolizer, SymbolizerFilter + + FAR = common.GetHostToolPathFromPlatform('far') + +--- a/src/3rdparty/chromium/build/fuchsia/runner_exceptions.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/runner_exceptions.py 2025-01-16 02:26:08.523097370 +0800 +@@ -15,13 +15,13 @@ + import sys + import traceback + +-from target import FuchsiaTargetException ++from .target import FuchsiaTargetException + + def _PrintException(value, trace): + """Prints stack trace and error message for the current exception.""" + + traceback.print_tb(trace) +- print(str(value)) ++ print((str(value))) + + + def IsStdoutBlocking(): +@@ -68,7 +68,7 @@ + return 72 + elif type is subprocess.CalledProcessError: + if os.path.basename(value.cmd[0]) == 'scp': +- print('Error: scp operation failed - %s' % str(value)) ++ print(('Error: scp operation failed - %s' % str(value))) + return 81 + if os.path.basename(value.cmd[0]) == 'qemu-img': + print('Error: qemu-img fuchsia image generation failed.') +--- a/src/3rdparty/chromium/build/fuchsia/runner_logs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/runner_logs.py 2025-01-16 02:26:08.523097370 +0800 +@@ -12,7 +12,7 @@ + import multiprocessing + import os + +-from symbolizer import RunSymbolizer ++from .symbolizer import RunSymbolizer + + SYMBOLIZED_SUFFIX = '.symbolized' + +--- a/src/3rdparty/chromium/build/fuchsia/symbolizer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/symbolizer.py 2025-01-16 02:26:08.523097370 +0800 +@@ -6,9 +6,9 @@ + import os + import subprocess + +-from common import SDK_ROOT +-from common import GetHostArchFromPlatform +-from common import GetHostToolPathFromPlatform ++from .common import SDK_ROOT ++from .common import GetHostArchFromPlatform ++from .common import GetHostToolPathFromPlatform + + # TODO(crbug.com/1131647): Change 'llvm-3.8' to 'llvm' after docker image is + # updated. +@@ -17,10 +17,8 @@ + + def BuildIdsPaths(package_paths): + """Generate build ids paths for symbolizer processes.""" +- build_ids_paths = map( +- lambda package_path: os.path.join( +- os.path.dirname(package_path), 'ids.txt'), +- package_paths) ++ build_ids_paths = [os.path.join( ++ os.path.dirname(package_path), 'ids.txt') for package_path in package_paths] + return build_ids_paths + + +--- a/src/3rdparty/chromium/build/fuchsia/target.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/target.py 2025-01-16 02:26:08.524180685 +0800 +@@ -2,12 +2,12 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-import common ++from . import common + import json + import logging + import os +-import remote_cmd +-import runner_logs ++from . import remote_cmd ++from . import runner_logs + import subprocess + import time + +@@ -203,7 +203,7 @@ + assert type(sources) is tuple or type(sources) is list + self._AssertIsStarted() + if for_package: +- sources = map(_MapIsolatedPathsForPackage(for_package, 0), sources) ++ sources = list(map(_MapIsolatedPathsForPackage(for_package, 0), sources)) + logging.debug('copy remote:%s => local:%s' % (sources, dest)) + return self.GetCommandRunner().RunScp(sources, dest, + remote_cmd.COPY_FROM_TARGET) +--- a/src/3rdparty/chromium/build/fuchsia/test_runner.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/test_runner.py 2025-01-16 02:26:08.524180685 +0800 +@@ -8,15 +8,15 @@ + + import argparse + import os +-import runner_logs ++from . import runner_logs + import sys + +-from common_args import AddCommonArgs, ConfigureLogging, GetDeploymentTargetForArgs +-from net_test_server import SetupTestServer +-from run_package import RunPackage, RunPackageArgs, SystemLogReader +-from runner_exceptions import HandleExceptionAndReturnExitCode +-from runner_logs import RunnerLogManager +-from symbolizer import BuildIdsPaths ++from .common_args import AddCommonArgs, ConfigureLogging, GetDeploymentTargetForArgs ++from .net_test_server import SetupTestServer ++from .run_package import RunPackage, RunPackageArgs, SystemLogReader ++from .runner_exceptions import HandleExceptionAndReturnExitCode ++from .runner_logs import RunnerLogManager ++from .symbolizer import BuildIdsPaths + + DEFAULT_TEST_SERVER_CONCURRENCY = 4 + +--- a/src/3rdparty/chromium/build/fuchsia/update_sdk.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/fuchsia/update_sdk.py 2025-01-16 02:26:08.524180685 +0800 +@@ -16,7 +16,7 @@ + import sys + import tarfile + +-from common import GetHostOsFromPlatform, GetHostArchFromPlatform, \ ++from .common import GetHostOsFromPlatform, GetHostArchFromPlatform, \ + DIR_SOURCE_ROOT, SDK_ROOT, IMAGES_ROOT + + sys.path.append(os.path.join(DIR_SOURCE_ROOT, 'build')) +--- a/src/3rdparty/chromium/build/linux/dump_app_syms.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/linux/dump_app_syms.py 2025-01-16 02:26:08.524180685 +0800 +@@ -5,7 +5,7 @@ + # Helper script to run dump_syms on Chrome Linux executables and strip + # them if needed. + +-from __future__ import print_function ++ + + import os + import subprocess +--- a/src/3rdparty/chromium/build/linux/install-chromeos-fonts.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/linux/install-chromeos-fonts.py 2025-01-16 02:26:08.524180685 +0800 +@@ -7,7 +7,7 @@ + # This script can be run manually (as root), but is also run as part + # install-build-deps.sh. + +-from __future__ import print_function ++ + + import os + import shutil +@@ -59,7 +59,7 @@ + if os.path.isdir(dest_dir): + shutil.rmtree(dest_dir) + os.mkdir(dest_dir) +- os.chmod(dest_dir, 0755) ++ os.chmod(dest_dir, 0o755) + + print("Installing Chrome OS fonts to %s." % dest_dir) + for url in URLS: +@@ -80,9 +80,9 @@ + + for base, dirs, files in os.walk(dest_dir): + for dir in dirs: +- os.chmod(os.path.join(base, dir), 0755) ++ os.chmod(os.path.join(base, dir), 0o755) + for file in files: +- os.chmod(os.path.join(base, file), 0644) ++ os.chmod(os.path.join(base, file), 0o644) + + print("""\ + +--- a/src/3rdparty/chromium/build/linux/rewrite_dirs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/linux/rewrite_dirs.py 2025-01-16 02:26:08.524180685 +0800 +@@ -5,7 +5,7 @@ + + """Rewrites paths in -I, -L and other option to be relative to a sysroot.""" + +-from __future__ import print_function ++ + + import sys + import os +--- a/src/3rdparty/chromium/build/linux/sysroot_scripts/build_and_upload.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/linux/sysroot_scripts/build_and_upload.py 2025-01-16 02:26:08.524180685 +0800 +@@ -7,7 +7,7 @@ + UploadSysroot for each supported arch of each sysroot creator. + """ + +-from __future__ import print_function ++ + + import glob + import hashlib +--- a/src/3rdparty/chromium/build/linux/sysroot_scripts/find_incompatible_glibc_symbols.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/linux/sysroot_scripts/find_incompatible_glibc_symbols.py 2025-01-16 02:26:08.524180685 +0800 +@@ -6,7 +6,7 @@ + """Find incompatible symbols in glibc and output a list of replacements. + """ + +-from __future__ import print_function ++ + + import re + import sys +@@ -31,7 +31,7 @@ + symbols[symbol] = set([version]) + + replacements = [] +- for symbol, versions in symbols.iteritems(): ++ for symbol, versions in symbols.items(): + if len(versions) <= 1: + continue + versions_parsed = [[ +--- a/src/3rdparty/chromium/build/linux/sysroot_scripts/find_incompatible_glibc_symbols_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/linux/sysroot_scripts/find_incompatible_glibc_symbols_unittest.py 2025-01-16 02:26:08.524180685 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-import cStringIO ++import io + import find_incompatible_glibc_symbols + + NM_DATA = """\ +@@ -27,7 +27,7 @@ + '__asm__(".symver foo2, foo2@GLIBC_2.3");', + ] + +-nm_file = cStringIO.StringIO() ++nm_file = io.StringIO() + nm_file.write(NM_DATA) + nm_file.seek(0) + +--- a/src/3rdparty/chromium/build/linux/sysroot_scripts/install-sysroot.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/linux/sysroot_scripts/install-sysroot.py 2025-01-16 02:26:08.524180685 +0800 +@@ -18,7 +18,7 @@ + # time chrome's build dependencies are changed but should also be updated + # periodically to include upstream security fixes from Debian. + +-from __future__ import print_function ++ + + import hashlib + import json +@@ -34,7 +34,7 @@ + from urllib.request import urlopen + except ImportError: + # Fall back to Python 2's urllib2 +- from urllib2 import urlopen ++ from urllib.request import urlopen + + SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + +--- a/src/3rdparty/chromium/build/linux/sysroot_scripts/merge-package-lists.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/linux/sysroot_scripts/merge-package-lists.py 2025-01-16 02:26:08.524180685 +0800 +@@ -22,7 +22,7 @@ + lines = file.readlines() + if len(lines) % 3 != 0: + exit(1) +- for i in xrange(0, len(lines), 3): ++ for i in range(0, len(lines), 3): + packages[lines[i]] = (lines[i + 1], lines[i + 2]) + + AddPackagesFromFile(open(sys.argv[1], 'r')) +@@ -30,5 +30,5 @@ + + output_file = open(sys.argv[1], 'w') + +-for (package, (filename, sha256)) in packages.iteritems(): ++for (package, (filename, sha256)) in packages.items(): + output_file.write(package + filename + sha256) +--- a/src/3rdparty/chromium/build/linux/unbundle/remove_bundled_libraries.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/linux/unbundle/remove_bundled_libraries.py 2025-01-16 02:26:08.524180685 +0800 +@@ -9,7 +9,7 @@ + See README for more details. + """ + +-from __future__ import print_function ++ + + import optparse + import os.path +@@ -91,7 +91,7 @@ + + # Fail if exclusion list contains stale entries - this helps keep it + # up to date. +- for exclusion, used in exclusion_used.iteritems(): ++ for exclusion, used in exclusion_used.items(): + if not used: + print('%s does not exist' % exclusion) + exit_code = 1 +--- a/src/3rdparty/chromium/build/linux/unbundle/replace_gn_files.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/linux/unbundle/replace_gn_files.py 2025-01-16 02:26:08.524180685 +0800 +@@ -8,7 +8,7 @@ + make the build use system libraries. + """ + +-from __future__ import print_function ++ + + import argparse + import os +@@ -51,7 +51,7 @@ + args = parser.parse_args(argv) + + handled_libraries = set() +- for lib, path in REPLACEMENTS.items(): ++ for lib, path in list(REPLACEMENTS.items()): + if lib not in args.system_libraries: + continue + handled_libraries.add(lib) +--- a/src/3rdparty/chromium/build/mac/find_sdk.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/mac/find_sdk.py 2025-01-16 02:26:08.524180685 +0800 +@@ -21,7 +21,7 @@ + 10.14 + """ + +-from __future__ import print_function ++ + + import os + import re +--- a/src/3rdparty/chromium/build/mac/should_use_hermetic_xcode.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/mac/should_use_hermetic_xcode.py 2025-01-16 02:26:08.524180685 +0800 +@@ -14,7 +14,7 @@ + python should_use_hermetic_xcode.py + """ + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/build/toolchain/clang_code_coverage_wrapper.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/toolchain/clang_code_coverage_wrapper.py 2025-01-16 02:26:08.524180685 +0800 +@@ -46,7 +46,7 @@ + --files-to-instrument=coverage_instrumentation_input.txt + """ + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/build/toolchain/get_concurrent_links.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/toolchain/get_concurrent_links.py 2025-01-16 02:26:08.524180685 +0800 +@@ -6,7 +6,7 @@ + # This script computs the number of concurrent links we want to run in the build + # as a function of machine spec. It's based on GetDefaultConcurrentLinks in GYP. + +-from __future__ import print_function ++ + + import argparse + import multiprocessing +--- a/src/3rdparty/chromium/build/toolchain/get_cpu_count.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/toolchain/get_cpu_count.py 2025-01-16 02:26:08.524180685 +0800 +@@ -4,7 +4,7 @@ + + # This script shows cpu count to specify capacity of action pool. + +-from __future__ import print_function ++ + + import multiprocessing + import sys +--- a/src/3rdparty/chromium/build/toolchain/mac/filter_libtool.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/toolchain/mac/filter_libtool.py 2025-01-16 02:26:08.525264000 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import os + import re +--- a/src/3rdparty/chromium/build/toolchain/mac/get_tool_mtime.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/toolchain/mac/get_tool_mtime.py 2025-01-16 02:26:08.525264000 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/build/toolchain/mac/linker_driver.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/toolchain/mac/linker_driver.py 2025-01-16 02:26:08.525264000 +0800 +@@ -104,7 +104,7 @@ + except: + # If a linker driver action failed, remove all the outputs to make the + # build step atomic. +- map(_RemovePath, linker_driver_outputs) ++ list(map(_RemovePath, linker_driver_outputs)) + + # Re-report the original failure. + raise +--- a/src/3rdparty/chromium/build/toolchain/win/midl.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/toolchain/win/midl.py 2025-01-16 02:26:08.525264000 +0800 +@@ -2,8 +2,8 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import division +-from __future__ import print_function ++ ++ + + import array + import difflib +--- a/src/3rdparty/chromium/build/toolchain/win/ml.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/toolchain/win/ml.py 2025-01-16 02:26:08.525264000 +0800 +@@ -36,7 +36,7 @@ + + def Subtract(nt, **kwargs): + """Subtract(nt, f=2) returns a new namedtuple with 2 subtracted from nt.f""" +- return nt._replace(**{k: getattr(nt, k) - v for k, v in kwargs.iteritems()}) ++ return nt._replace(**{k: getattr(nt, k) - v for k, v in kwargs.items()}) + + + def MakeDeterministic(objdata): +--- a/src/3rdparty/chromium/build/toolchain/win/setup_toolchain.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/toolchain/win/setup_toolchain.py 2025-01-16 02:26:08.525264000 +0800 +@@ -10,7 +10,7 @@ + # win tool. The script assumes that the root build directory is the current dir + # and the files will be written to the current directory. + +-from __future__ import print_function ++ + + import errno + import json +@@ -184,7 +184,7 @@ + CreateProcess documentation for more details.""" + block = '' + nul = '\0' +- for key, value in envvar_dict.items(): ++ for key, value in list(envvar_dict.items()): + block += key + '=' + value + nul + block += nul + return block +@@ -279,7 +279,7 @@ + lib = [p.replace('"', r'\"') for p in env['LIB'].split(';') if p] + # Make lib path relative to builddir when cwd and sdk in same drive. + try: +- lib = map(os.path.relpath, lib) ++ lib = list(map(os.path.relpath, lib)) + except ValueError: + pass + +--- a/src/3rdparty/chromium/build/toolchain/win/tool_wrapper.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/toolchain/win/tool_wrapper.py 2025-01-16 02:26:08.525264000 +0800 +@@ -8,7 +8,7 @@ + is used to set up calls to tools used by the build that need wrappers. + """ + +-from __future__ import print_function ++ + + import os + import re +@@ -185,7 +185,7 @@ + env = self._GetEnv(arch) + # TODO(scottmg): This is a temporary hack to get some specific variables + # through to actions that are set after GN-time. http://crbug.com/333738. +- for k, v in os.environ.items(): ++ for k, v in list(os.environ.items()): + if k not in env: + env[k] = v + args = open(rspfile).read() +--- a/src/3rdparty/chromium/build/toolchain/win/rc/rc.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/toolchain/win/rc/rc.py 2025-01-16 02:26:08.525264000 +0800 +@@ -15,7 +15,7 @@ + /nologo Ignored (rc.py doesn't print a logo by default). + /showIncludes Print referenced header and resource files.""" + +-from __future__ import print_function ++ + from collections import namedtuple + import codecs + import os +--- a/src/3rdparty/chromium/build/util/android_chrome_version.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/util/android_chrome_version.py 2025-01-16 02:26:08.525264000 +0800 +@@ -106,7 +106,7 @@ + } + + # Expose the available choices to other scripts. +-ARCH_CHOICES = _ARCH_TO_MFG_AND_BITNESS.keys() ++ARCH_CHOICES = list(_ARCH_TO_MFG_AND_BITNESS.keys()) + """ + The architecture preference is encoded into the version_code for devices + that support multiple architectures. (exploiting play store logic that pushes +--- a/src/3rdparty/chromium/build/util/generate_wrapper.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/util/generate_wrapper.py 2025-01-16 02:26:08.525264000 +0800 +@@ -140,7 +140,7 @@ + help='Path to the output directory.') + parser.add_argument( + '--script-language', +- choices=SCRIPT_TEMPLATES.keys(), ++ choices=list(SCRIPT_TEMPLATES.keys()), + help='Language in which the wrapper script will be written.') + parser.add_argument( + 'executable_args', nargs='*', +--- a/src/3rdparty/chromium/build/util/lastchange.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/util/lastchange.py 2025-01-16 02:26:08.525264000 +0800 +@@ -6,7 +6,7 @@ + """ + lastchange.py -- Chromium revision fetching utility. + """ +-from __future__ import print_function ++ + + import argparse + import collections +--- a/src/3rdparty/chromium/build/util/version.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/util/version.py 2025-01-16 02:26:08.525264000 +0800 +@@ -7,7 +7,7 @@ + version.py -- Chromium version string substitution utility. + """ + +-from __future__ import print_function ++ + + import argparse + import os +@@ -70,7 +70,7 @@ + contains any @KEYWORD@ strings expecting them to be recursively + substituted, okay? + """ +- for key, val in values.items(): ++ for key, val in list(values.items()): + try: + contents = contents.replace('@' + key + '@', val) + except TypeError: +@@ -189,12 +189,12 @@ + """ + values = FetchValues(options.file, options.official) + +- for key, val in evals.items(): ++ for key, val in list(evals.items()): + values[key] = str(eval(val, globals(), values)) + + if options.os == 'android': + android_chrome_version_codes = android_chrome_version.GenerateVersionCodes( +- values, options.arch, options.next) ++ values, options.arch, options.__next__) + values.update(android_chrome_version_codes) + + return values +--- a/src/3rdparty/chromium/build/util/version_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/util/version_test.py 2025-01-16 02:26:08.525264000 +0800 +@@ -95,7 +95,7 @@ + result = {} + version.FetchValuesFromFile(result, self._CHROME_VERSION_FILE) + +- for key, val in result.iteritems(): ++ for key, val in result.items(): + self.assertIsInstance(key, str) + self.assertIsInstance(val, str) + +@@ -105,15 +105,15 @@ + get_new_args=lambda args: self._EXAMPLE_ANDROID_ARGS) + contents = output['contents'] + +- self.assertRegexpMatches(contents, r'\bchrome_version_code = "\d+"\s') +- self.assertRegexpMatches(contents, ++ self.assertRegex(contents, r'\bchrome_version_code = "\d+"\s') ++ self.assertRegex(contents, + r'\bchrome_modern_version_code = "\d+"\s') +- self.assertRegexpMatches(contents, r'\bmonochrome_version_code = "\d+"\s') +- self.assertRegexpMatches(contents, r'\btrichrome_version_code = "\d+"\s') +- self.assertRegexpMatches(contents, ++ self.assertRegex(contents, r'\bmonochrome_version_code = "\d+"\s') ++ self.assertRegex(contents, r'\btrichrome_version_code = "\d+"\s') ++ self.assertRegex(contents, + r'\bwebview_stable_version_code = "\d+"\s') +- self.assertRegexpMatches(contents, r'\bwebview_beta_version_code = "\d+"\s') +- self.assertRegexpMatches(contents, r'\bwebview_dev_version_code = "\d+"\s') ++ self.assertRegex(contents, r'\bwebview_beta_version_code = "\d+"\s') ++ self.assertRegex(contents, r'\bwebview_dev_version_code = "\d+"\s') + + def testBuildOutputAndroidArchVariantsArm64(self): + """Assert 64-bit-specific version codes""" +@@ -129,13 +129,13 @@ + output = self._RunBuildOutput(get_new_args=lambda args: new_args) + contents = output['contents'] + +- self.assertRegexpMatches(contents, ++ self.assertRegex(contents, + r'\bmonochrome_64_32_version_code = "\d+"\s') +- self.assertRegexpMatches(contents, ++ self.assertRegex(contents, + r'\bmonochrome_64_version_code = "\d+"\s') +- self.assertRegexpMatches(contents, ++ self.assertRegex(contents, + r'\btrichrome_64_32_version_code = "\d+"\s') +- self.assertRegexpMatches(contents, ++ self.assertRegex(contents, + r'\btrichrome_64_version_code = "\d+"\s') + + def testBuildOutputAndroidArchVariantsX64(self): +@@ -152,13 +152,13 @@ + output = self._RunBuildOutput(get_new_args=lambda args: new_args) + contents = output['contents'] + +- self.assertRegexpMatches(contents, ++ self.assertRegex(contents, + r'\bmonochrome_64_32_version_code = "\d+"\s') +- self.assertRegexpMatches(contents, ++ self.assertRegex(contents, + r'\bmonochrome_64_version_code = "\d+"\s') +- self.assertRegexpMatches(contents, ++ self.assertRegex(contents, + r'\btrichrome_64_32_version_code = "\d+"\s') +- self.assertRegexpMatches(contents, ++ self.assertRegex(contents, + r'\btrichrome_64_version_code = "\d+"\s') + + def testBuildOutputAndroidChromeArchInput(self): +--- a/src/3rdparty/chromium/build/util/lib/common/chrome_test_server_spawner.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/util/lib/common/chrome_test_server_spawner.py 2025-01-16 02:26:08.525264000 +0800 +@@ -9,7 +9,7 @@ + """ + # pylint: disable=W0702 + +-import BaseHTTPServer ++import http.server + import json + import logging + import os +@@ -19,7 +19,7 @@ + import sys + import threading + import time +-import urlparse ++import urllib.parse + + + SERVER_TYPES = { +@@ -202,7 +202,7 @@ + self.command_line.append('--startup-pipe=%d' % self.pipe_out) + + # Pass the remaining arguments as-is. +- for key, values in args_copy.iteritems(): ++ for key, values in args_copy.items(): + if not isinstance(values, list): + values = [values] + for value in values: +@@ -215,7 +215,7 @@ + # This is required to avoid subtle deadlocks that could be caused by the + # test server child process inheriting undesirable file descriptors such as + # file lock file descriptors. +- for fd in xrange(0, 1024): ++ for fd in range(0, 1024): + if fd != self.pipe_out: + try: + os.close(fd) +@@ -296,7 +296,7 @@ + self.wait_event.wait() + + +-class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): ++class SpawningServerRequestHandler(http.server.BaseHTTPRequestHandler): + """A handler used to process http GET/POST request.""" + + def _SendResponse(self, response_code, response_reason, additional_headers, +@@ -404,7 +404,7 @@ + pass + + def do_POST(self): +- parsed_path = urlparse.urlparse(self.path) ++ parsed_path = urllib.parse.urlparse(self.path) + action = parsed_path.path + _logger.info('Action for POST method is: %s.', action) + if action == '/start': +@@ -414,9 +414,9 @@ + _logger.info('Encounter unknown request: %s.', action) + + def do_GET(self): +- parsed_path = urlparse.urlparse(self.path) ++ parsed_path = urllib.parse.urlparse(self.path) + action = parsed_path.path +- params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1) ++ params = urllib.parse.parse_qs(parsed_path.query, keep_blank_values=1) + _logger.info('Action for GET method is: %s.', action) + for param in params: + _logger.info('%s=%s', param, params[param][0]) +@@ -437,7 +437,7 @@ + """The class used to start/stop a http server.""" + + def __init__(self, test_server_spawner_port, port_forwarder, max_instances): +- self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port), ++ self.server = http.server.HTTPServer(('', test_server_spawner_port), + SpawningServerRequestHandler) + self.server_port = self.server.server_port + _logger.info('Started test server spawner on port: %d.', self.server_port) +--- a/src/3rdparty/chromium/build/util/lib/common/perf_tests_results_helper.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/util/lib/common/perf_tests_results_helper.py 2025-01-16 02:26:08.525264000 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import re + import sys +@@ -11,7 +11,7 @@ + import logging + import math + +-import perf_result_data_type ++from . import perf_result_data_type + + + # Mapping from result type to test output +--- a/src/3rdparty/chromium/build/util/lib/common/unittest_util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/util/lib/common/unittest_util.py 2025-01-16 02:26:08.525264000 +0800 +@@ -82,7 +82,7 @@ + + def GetTestNamesFromSuite(suite): + """Returns a list of every test name in the given suite.""" +- return map(lambda x: GetTestName(x), GetTestsFromSuite(suite)) ++ return [GetTestName(x) for x in GetTestsFromSuite(suite)] + + + def GetTestName(test): +--- a/src/3rdparty/chromium/build/util/lib/common/unittest_util_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/util/lib/common/unittest_util_test.py 2025-01-16 02:26:08.525264000 +0800 +@@ -8,7 +8,7 @@ + import logging + import sys + import unittest +-import unittest_util ++from . import unittest_util + + + class FilterTestNamesTest(unittest.TestCase): +@@ -25,19 +25,19 @@ + + def testMatchAll(self): + x = unittest_util.FilterTestNames(self.possible_list, "*") +- self.assertEquals(x, self.possible_list) ++ self.assertEqual(x, self.possible_list) + + def testMatchPartial(self): + x = unittest_util.FilterTestNames(self.possible_list, "Foo.*") +- self.assertEquals(x, ["Foo.One", "Foo.Two", "Foo.Three"]) ++ self.assertEqual(x, ["Foo.One", "Foo.Two", "Foo.Three"]) + + def testMatchFull(self): + x = unittest_util.FilterTestNames(self.possible_list, "Foo.Two") +- self.assertEquals(x, ["Foo.Two"]) ++ self.assertEqual(x, ["Foo.Two"]) + + def testMatchTwo(self): + x = unittest_util.FilterTestNames(self.possible_list, "Bar.*:Foo.*") +- self.assertEquals(x, ["Bar.One", ++ self.assertEqual(x, ["Bar.One", + "Bar.Two", + "Bar.Three", + "Foo.One", +@@ -46,14 +46,14 @@ + + def testMatchWithNegative(self): + x = unittest_util.FilterTestNames(self.possible_list, "Bar.*:Foo.*-*.Three") +- self.assertEquals(x, ["Bar.One", ++ self.assertEqual(x, ["Bar.One", + "Bar.Two", + "Foo.One", + "Foo.Two"]) + + def testMatchOverlapping(self): + x = unittest_util.FilterTestNames(self.possible_list, "Bar.*:*.Two") +- self.assertEquals(x, ["Bar.One", ++ self.assertEqual(x, ["Bar.One", + "Bar.Two", + "Bar.Three", + "Foo.Two", +--- a/src/3rdparty/chromium/build/util/lib/common/util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/util/lib/common/util.py 2025-01-16 02:26:08.525264000 +0800 +@@ -5,14 +5,14 @@ + """Generic utilities for all python scripts.""" + + import atexit +-import httplib ++import http.client + import os + import signal + import stat + import subprocess + import sys + import tempfile +-import urlparse ++import urllib.parse + + + def GetPlatformName(): +@@ -136,9 +136,9 @@ + Returns: + True if url exists, otherwise False. + """ +- parsed = urlparse.urlparse(url) ++ parsed = urllib.parse.urlparse(url) + try: +- conn = httplib.HTTPConnection(parsed.netloc) ++ conn = http.client.HTTPConnection(parsed.netloc) + conn.request('HEAD', parsed.path) + response = conn.getresponse() + except (socket.gaierror, socket.error): +--- a/src/3rdparty/chromium/build/win/copy_cdb_to_output.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/win/copy_cdb_to_output.py 2025-01-16 02:26:08.525264000 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import glob + import hashlib +--- a/src/3rdparty/chromium/build/win/gn_meta_sln.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/win/gn_meta_sln.py 2025-01-16 02:26:08.525264000 +0800 +@@ -6,7 +6,7 @@ + # Helper utility to combine GN-generated Visual Studio projects into + # a single meta-solution. + +-from __future__ import print_function ++ + + import os + import glob +@@ -34,7 +34,7 @@ + if " [*] + +-from __future__ import print_function ++ + + import difflib + import distutils.dir_util +--- a/src/3rdparty/chromium/build/win/use_ansi_codes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/build/win/use_ansi_codes.py 2025-01-16 02:26:08.525264000 +0800 +@@ -4,7 +4,7 @@ + # found in the LICENSE file. + """Prints if the the terminal is likely to understand ANSI codes.""" + +-from __future__ import print_function ++ + + import os + +--- a/src/3rdparty/chromium/buildtools/ensure_gn_version.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/buildtools/ensure_gn_version.py 2025-01-16 02:26:08.525264000 +0800 +@@ -15,7 +15,7 @@ + TODO(crbug.com/944667): remove this script when it is no longer needed. + """ + +-from __future__ import print_function ++ + + import argparse + import errno +@@ -97,7 +97,7 @@ + url = 'https://chrome-infra-packages.appspot.com/dl/gn/gn/%s/+/%s' % ( + platform, args.version) + try: +- zipdata = urllib.urlopen(url).read() ++ zipdata = urllib.request.urlopen(url).read() + except urllib.HTTPError as e: + print('Failed to download the package from %s: %d %s' % ( + url, e.code, e.reason)) +--- a/src/3rdparty/chromium/buildtools/checkdeps/builddeps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/buildtools/checkdeps/builddeps.py 2025-01-16 02:26:08.526347315 +0800 +@@ -180,7 +180,7 @@ + if self._ignore_specific_rules: + return rules + +- for regexp, specific_rules in specific_includes.iteritems(): ++ for regexp, specific_rules in specific_includes.items(): + for rule_str in specific_rules: + ApplyOneRule(rule_str, regexp) + +@@ -210,7 +210,7 @@ + + # Check the DEPS file in this directory. + if self.verbose: +- print 'Applying rules from', dir_path_local_abs ++ print('Applying rules from', dir_path_local_abs) + def FromImpl(*_): + pass # NOP function so "From" doesn't fail. + +@@ -248,9 +248,9 @@ + if os.path.isfile(deps_file_path) and not ( + self._under_test and + os.path.basename(dir_path_local_abs) == 'checkdeps'): +- execfile(deps_file_path, global_scope, local_scope) ++ exec(compile(open(deps_file_path, "rb").read(), deps_file_path, 'exec'), global_scope, local_scope) + elif self.verbose: +- print ' No deps file found in', dir_path_local_abs ++ print(' No deps file found in', dir_path_local_abs) + + # Even if a DEPS file does not exist we still invoke ApplyRules + # to apply the implicit "allow" rule for the current directory +--- a/src/3rdparty/chromium/buildtools/checkdeps/checkdeps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/buildtools/checkdeps/checkdeps.py 2025-01-16 02:26:08.526347315 +0800 +@@ -68,7 +68,7 @@ + if self.results_formatter.GetResults(): + self.results_formatter.PrintResults() + return 1 +- print '\nSUCCESS\n' ++ print('\nSUCCESS\n') + return 0 + + def CheckDirectory(self, start_dir): +@@ -185,7 +185,7 @@ + verbose=self.verbose, root_dir=self.base_directory)) + + def PrintUsage(): +- print """Usage: python checkdeps.py [--root ] [tocheck] ++ print("""Usage: python checkdeps.py [--root ] [tocheck] + + --root ROOT Specifies the repository root. This defaults to "../../.." + relative to the script file. This will be correct given the +@@ -198,7 +198,7 @@ + + Examples: + python checkdeps.py +- python checkdeps.py --root c:\\source chrome""" ++ python checkdeps.py --root c:\\source chrome""") + + + def main(): +@@ -266,12 +266,12 @@ + return 1 + + if not start_dir.startswith(deps_checker.base_directory): +- print 'Directory to check must be a subdirectory of the base directory,' +- print 'but %s is not a subdirectory of %s' % (start_dir, base_directory) ++ print('Directory to check must be a subdirectory of the base directory,') ++ print('but %s is not a subdirectory of %s' % (start_dir, base_directory)) + return 1 + +- print 'Using base directory:', base_directory +- print 'Checking:', start_dir ++ print('Using base directory:', base_directory) ++ print('Checking:', start_dir) + + if options.generate_temp_rules: + deps_checker.results_formatter = results.TemporaryRulesFormatter() +--- a/src/3rdparty/chromium/buildtools/checkdeps/checkdeps_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/buildtools/checkdeps/checkdeps_test.py 2025-01-16 02:26:08.526347315 +0800 +@@ -31,9 +31,9 @@ + + problems = self.deps_checker.results_formatter.GetResults() + if skip_tests: +- self.failUnlessEqual(4, len(problems)) ++ self.assertEqual(4, len(problems)) + else: +- self.failUnlessEqual(5, len(problems)) ++ self.assertEqual(5, len(problems)) + + def VerifySubstringsInProblems(key_path, substrings_in_sequence): + """Finds the problem in |problems| that contains |key_path|, +@@ -48,7 +48,7 @@ + if index != -1: + for substring in substrings_in_sequence: + index = problem.find(substring, index + 1) +- self.failUnless(index != -1, '%s in %s' % (substring, problem)) ++ self.assertTrue(index != -1, '%s in %s' % (substring, problem)) + found = True + break + if not found: +@@ -103,16 +103,16 @@ + return self.deps_checker.results_formatter.GetResults() + + def testCountViolations(self): +- self.failUnlessEqual('11', self.CountViolations(False)) ++ self.assertEqual('11', self.CountViolations(False)) + + def testCountViolationsIgnoringTempRules(self): +- self.failUnlessEqual('12', self.CountViolations(True)) ++ self.assertEqual('12', self.CountViolations(True)) + + def testCountViolationsWithRelativePath(self): + self.deps_checker.results_formatter = results.CountViolationsFormatter() + self.deps_checker.CheckDirectory( + os.path.join('buildtools', 'checkdeps', 'testdata', 'allowed')) +- self.failUnlessEqual('4', self.deps_checker.results_formatter.GetResults()) ++ self.assertEqual('4', self.deps_checker.results_formatter.GetResults()) + + def testTempRulesGenerator(self): + self.deps_checker.results_formatter = results.TemporaryRulesFormatter() +@@ -120,11 +120,11 @@ + os.path.join(self.deps_checker.base_directory, + 'buildtools/checkdeps/testdata/allowed')) + temp_rules = self.deps_checker.results_formatter.GetResults() +- expected = [u' "!buildtools/checkdeps/testdata/disallowed/bad.h",', +- u' "!buildtools/checkdeps/testdata/disallowed/teststuff/bad.h",', +- u' "!third_party/explicitly_disallowed/bad.h",', +- u' "!third_party/no_rule/bad.h",'] +- self.failUnlessEqual(expected, temp_rules) ++ expected = [' "!buildtools/checkdeps/testdata/disallowed/bad.h",', ++ ' "!buildtools/checkdeps/testdata/disallowed/teststuff/bad.h",', ++ ' "!third_party/explicitly_disallowed/bad.h",', ++ ' "!third_party/no_rule/bad.h",'] ++ self.assertEqual(expected, temp_rules) + + def testBadBaseDirectoryNotCheckoutRoot(self): + # This assumes git. It's not a valid test if buildtools is fetched via svn. +@@ -138,34 +138,34 @@ + ['#include "buildtools/checkdeps/testdata/allowed/good.h"', + '#include "buildtools/checkdeps/testdata/disallowed/allowed/good.h"'] + ]]) +- self.failIf(problems) ++ self.assertFalse(problems) + + def testCheckAddedIncludesManyGarbageLines(self): + garbage_lines = ["My name is Sam%d\n" % num for num in range(50)] + problems = self.deps_checker.CheckAddedCppIncludes( + [['buildtools/checkdeps/testdata/allowed/test.cc', garbage_lines]]) +- self.failIf(problems) ++ self.assertFalse(problems) + + def testCheckAddedIncludesNoRule(self): + problems = self.deps_checker.CheckAddedCppIncludes( + [['buildtools/checkdeps/testdata/allowed/test.cc', + ['#include "no_rule_for_this/nogood.h"'] + ]]) +- self.failUnless(problems) ++ self.assertTrue(problems) + + def testCheckAddedIncludesSkippedDirectory(self): + problems = self.deps_checker.CheckAddedCppIncludes( + [['buildtools/checkdeps/testdata/disallowed/allowed/skipped/test.cc', + ['#include "whatever/whocares.h"'] + ]]) +- self.failIf(problems) ++ self.assertFalse(problems) + + def testCheckAddedIncludesTempAllowed(self): + problems = self.deps_checker.CheckAddedCppIncludes( + [['buildtools/checkdeps/testdata/allowed/test.cc', + ['#include "buildtools/checkdeps/testdata/disallowed/temporarily_allowed.h"'] + ]]) +- self.failUnless(problems) ++ self.assertTrue(problems) + + def testCopyIsDeep(self): + # Regression test for a bug where we were making shallow copies of +@@ -187,26 +187,26 @@ + ]]) + # With the bug in place, there would be two problems reported, and + # the second would be for foo_unittest.cc. +- self.failUnless(len(problems) == 1) +- self.failUnless(problems[0][0].endswith('/test.cc')) ++ self.assertTrue(len(problems) == 1) ++ self.assertTrue(problems[0][0].endswith('/test.cc')) + + def testTraversalIsOrdered(self): + dirs_traversed = [] + for rules, filenames in self.deps_checker.GetAllRulesAndFiles(dir_name='buildtools'): +- self.failUnlessEqual(type(filenames), list) +- self.failUnlessEqual(filenames, sorted(filenames)) ++ self.assertEqual(type(filenames), list) ++ self.assertEqual(filenames, sorted(filenames)) + if filenames: + dir_names = set(os.path.dirname(file) for file in filenames) +- self.failUnlessEqual(1, len(dir_names)) ++ self.assertEqual(1, len(dir_names)) + dirs_traversed.append(dir_names.pop()) +- self.failUnlessEqual(dirs_traversed, sorted(dirs_traversed)) ++ self.assertEqual(dirs_traversed, sorted(dirs_traversed)) + + def testCheckPartialImportsAreAllowed(self): + problems = self.deps_checker.CheckAddedProtoImports( + [['buildtools/checkdeps/testdata/test.proto', + ['import "no_rule_for_this/nogood.proto"'] + ]]) +- self.failIf(problems) ++ self.assertFalse(problems) + + def testCheckAddedFullPathImportsAllowed(self): + problems = self.deps_checker.CheckAddedProtoImports( +@@ -214,28 +214,28 @@ + ['import "buildtools/checkdeps/testdata/allowed/good.proto"', + 'import "buildtools/checkdeps/testdata/disallowed/sub_folder/good.proto"'] + ]]) +- self.failIf(problems) ++ self.assertFalse(problems) + + def testCheckAddedFullPathImportsDisallowed(self): + problems = self.deps_checker.CheckAddedProtoImports( + [['buildtools/checkdeps/testdata/test.proto', + ['import "buildtools/checkdeps/testdata/disallowed/bad.proto"'] + ]]) +- self.failUnless(problems) ++ self.assertTrue(problems) + + def testCheckAddedFullPathImportsManyGarbageLines(self): + garbage_lines = ["My name is Sam%d\n" % num for num in range(50)] + problems = self.deps_checker.CheckAddedProtoImports( + [['buildtools/checkdeps/testdata/test.proto', + garbage_lines]]) +- self.failIf(problems) ++ self.assertFalse(problems) + + def testCheckAddedIncludesNoRuleFullPath(self): + problems = self.deps_checker.CheckAddedProtoImports( + [['buildtools/checkdeps/testdata/test.proto', + ['import "tools/some.proto"'] + ]]) +- self.failUnless(problems) ++ self.assertTrue(problems) + + if __name__ == '__main__': + unittest.main() +--- a/src/3rdparty/chromium/buildtools/checkdeps/cpp_checker.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/buildtools/checkdeps/cpp_checker.py 2025-01-16 02:26:08.526347315 +0800 +@@ -64,7 +64,7 @@ + # Don't fail when no directory is specified. We may want to be more + # strict about this in the future. + if self._verbose: +- print ' WARNING: include specified with no directory: ' + include_path ++ print(' WARNING: include specified with no directory: ' + include_path) + return True, None + + if self._resolve_dotdot and '../' in include_path: +@@ -80,7 +80,7 @@ + + def CheckFile(self, rules, filepath): + if self._verbose: +- print 'Checking: ' + filepath ++ print('Checking: ' + filepath) + + dependee_status = results.DependeeStatus(filepath) + ret_val = '' # We'll collect the error messages in here +--- a/src/3rdparty/chromium/buildtools/checkdeps/graphdeps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/buildtools/checkdeps/graphdeps.py 2025-01-16 02:26:08.526347315 +0800 +@@ -187,7 +187,7 @@ + # Edges and nodes are emphasized with color and line/border weight depending + # on how many of incl/excl/hilite_fanins/hilite_fanouts filters they hit, + # and in what way. +- for src in deps_graph.keys(): ++ for src in list(deps_graph.keys()): + for (dst, allow) in deps_graph[src]: + if allow == Rule.DISALLOW and self.hide_disallowed_deps: + continue +@@ -233,7 +233,7 @@ + + # Reformat the computed raw node attributes into a final DOT representation. + nodes = [] +- for (node, attrs) in node_props.iteritems(): ++ for (node, attrs) in node_props.items(): + attr_strs = [] + if attrs['hilite']: + attr_strs.append('style=filled,fillcolor=%s' % attrs['hilite']) +@@ -253,7 +253,7 @@ + + + def PrintUsage(): +- print """Usage: python graphdeps.py [--root ] ++ print("""Usage: python graphdeps.py [--root ] + + --root ROOT Specifies the repository root. This defaults to "../../.." + relative to the script file. This will be correct given the +@@ -280,7 +280,7 @@ + --excl='.*->third_party' \ + --fanin='^(apps|content/browser/renderer_host)$' \ + --ignore-specific-rules \ +- --ignore-temp-rules""" ++ --ignore-temp-rules""") + + + def main(): +@@ -392,11 +392,11 @@ + PrintUsage() + return 1 + +- print 'Using base directory: ', deps_grapher.base_directory +- print 'include nodes : ', options.incl +- print 'exclude nodes : ', options.excl +- print 'highlight fanins of : ', options.hilite_fanins +- print 'highlight fanouts of: ', options.hilite_fanouts ++ print('Using base directory: ', deps_grapher.base_directory) ++ print('include nodes : ', options.incl) ++ print('exclude nodes : ', options.excl) ++ print('highlight fanins of : ', options.hilite_fanins) ++ print('highlight fanouts of: ', options.hilite_fanouts) + + deps_grapher.DumpDependencies() + return 0 +--- a/src/3rdparty/chromium/buildtools/checkdeps/java_checker.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/buildtools/checkdeps/java_checker.py 2025-01-16 02:26:08.526347315 +0800 +@@ -112,21 +112,21 @@ + + def _PrescanFile(self, filepath, added_classset): + if self._verbose: +- print 'Prescanning: ' + filepath ++ print('Prescanning: ' + filepath) + full_class_name = self._GetClassFullName(filepath) + if full_class_name: + if full_class_name in self._classmap: + if self._verbose or full_class_name in added_classset: + if not any(re.match(i, filepath) for i in + self._allow_multiple_definitions): +- print 'WARNING: multiple definitions of %s:' % full_class_name +- print ' ' + filepath +- print ' ' + self._classmap[full_class_name] +- print ++ print('WARNING: multiple definitions of %s:' % full_class_name) ++ print(' ' + filepath) ++ print(' ' + self._classmap[full_class_name]) ++ print() + else: + self._classmap[full_class_name] = filepath + elif self._verbose: +- print 'WARNING: no package definition found in %s' % filepath ++ print('WARNING: no package definition found in %s' % filepath) + + def CheckLine(self, rules, line, filepath, fail_on_temp_allow=False): + """Checks the given line with the given rule set. +@@ -157,7 +157,7 @@ + + def CheckFile(self, rules, filepath): + if self._verbose: +- print 'Checking: ' + filepath ++ print('Checking: ' + filepath) + + dependee_status = results.DependeeStatus(filepath) + with codecs.open(filepath, encoding='utf-8') as f: +--- a/src/3rdparty/chromium/buildtools/checkdeps/proto_checker.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/buildtools/checkdeps/proto_checker.py 2025-01-16 02:26:08.526347315 +0800 +@@ -67,7 +67,7 @@ + # Don't fail when no directory is specified. We may want to be more + # strict about this in the future. + if self._verbose: +- print ' WARNING: import specified with no directory: ' + import_path ++ print(' WARNING: import specified with no directory: ' + import_path) + return True, None + + if self._resolve_dotdot and '../' in import_path: +@@ -87,7 +87,7 @@ + + def CheckFile(self, rules, filepath): + if self._verbose: +- print 'Checking: ' + filepath ++ print('Checking: ' + filepath) + + dependee_status = results.DependeeStatus(filepath) + last_import = 0 +--- a/src/3rdparty/chromium/buildtools/checkdeps/results.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/buildtools/checkdeps/results.py 2025-01-16 02:26:08.526347315 +0800 +@@ -96,9 +96,9 @@ + + def PrintResults(self): + for result in self.results: +- print result ++ print(result) + if self.results: +- print '\nFAILED\n' ++ print('\nFAILED\n') + + + class JSONResultsFormatter(ResultsFormatter): +@@ -133,7 +133,7 @@ + self.wrapped_formatter.PrintResults() + return + +- print self.results ++ print(self.results) + + + class TemporaryRulesFormatter(ResultsFormatter): +@@ -154,7 +154,7 @@ + + def PrintResults(self): + for result in self.GetResults(): +- print result ++ print(result) + + + class CountViolationsFormatter(ResultsFormatter): +@@ -175,4 +175,4 @@ + return '%d' % self.count + + def PrintResults(self): +- print self.count ++ print(self.count) +--- a/src/3rdparty/chromium/buildtools/checkdeps/rules.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/buildtools/checkdeps/rules.py 2025-01-16 02:26:08.526347315 +0800 +@@ -112,7 +112,7 @@ + def __str__(self): + result = ['Rules = {\n (apply to all files): [\n%s\n ],' % '\n'.join( + ' %s' % x for x in self._general_rules)] +- for regexp, rules in self._specific_rules.iteritems(): ++ for regexp, rules in self._specific_rules.items(): + result.append(' (limited to files matching %s): [\n%s\n ]' % ( + regexp, '\n'.join(' %s' % x for x in rules))) + result.append(' }') +@@ -132,7 +132,7 @@ + if include_general_rules: + AddDependencyTuplesImpl(deps, self._general_rules) + if include_specific_rules: +- for regexp, rules in self._specific_rules.iteritems(): ++ for regexp, rules in self._specific_rules.items(): + AddDependencyTuplesImpl(deps, rules, "/" + regexp) + return deps + +@@ -175,7 +175,7 @@ + file located at |dependee_path|. + """ + dependee_filename = os.path.basename(dependee_path) +- for regexp, specific_rules in self._specific_rules.iteritems(): ++ for regexp, specific_rules in self._specific_rules.items(): + if re.match(regexp, dependee_filename): + for rule in specific_rules: + if rule.ChildOrMatch(include_path): +--- a/src/3rdparty/chromium/chrome/browser/resources/optimize_webui.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/chrome/browser/resources/optimize_webui.py 2025-01-16 02:26:08.526347315 +0800 +@@ -108,8 +108,7 @@ + _URL_MAPPINGS.append(('//' + redirect_url, file_path)) + + +-_VULCANIZE_REDIRECT_ARGS = list(itertools.chain.from_iterable(map( +- lambda m: ['--redirect', '%s|%s' % (m[0], m[1])], _URL_MAPPINGS))) ++_VULCANIZE_REDIRECT_ARGS = list(itertools.chain.from_iterable([['--redirect', '%s|%s' % (m[0], m[1])] for m in _URL_MAPPINGS])) + + + def _undo_mapping(mappings, url): +@@ -134,9 +133,7 @@ + + # Add a slash in front of every dependency that is not a chrome:// URL, so + # that we can map it to the correct source file path below. +- request_list = map( +- lambda dep: '/' + dep if not (dep.startswith('chrome://') or dep.startswith('//')) else dep, +- request_list) ++ request_list = ['/' + dep if not (dep.startswith('chrome://') or dep.startswith('//')) else dep for dep in request_list] + + # Undo the URL mappings applied by vulcanize to get file paths relative to + # current working directory. +@@ -146,7 +143,7 @@ + ] + + deps = [_undo_mapping(url_mappings, u) for u in request_list] +- deps = map(os.path.normpath, deps) ++ deps = list(map(os.path.normpath, deps)) + + # If the input was a folder holding an unpacked .pak file, the generated + # depfile should not list files already in the .pak file. +--- a/src/3rdparty/chromium/chrome/browser/resources/unpack_pak.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/chrome/browser/resources/unpack_pak.py 2025-01-16 02:26:08.526347315 +0800 +@@ -77,7 +77,7 @@ + + root_dir = pak_base_dir if pak_base_dir else pak_dir + # Extract packed files, while preserving directory structure. +- for (resource_id, text) in data.resources.iteritems(): ++ for (resource_id, text) in data.resources.items(): + UnpackResource(root_dir, out_path, excludes or [], + resource_filenames[resource_ids[resource_id]], text) + +--- a/src/3rdparty/chromium/chrome/browser/resources/vr/assets/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/chrome/browser/resources/vr/assets/PRESUBMIT.py 2025-01-16 02:26:08.526347315 +0800 +@@ -61,7 +61,7 @@ + return [ + output_api.PresubmitError( + 'Must have same asset files for %s in \'%s\'.' % +- (changed_asset_files.keys(), ++ (list(changed_asset_files.keys()), + input_api.os_path.dirname( + input_api.AffectedFiles()[0].LocalPath()))) + ] +--- a/src/3rdparty/chromium/chrome/browser/resources/vr/assets/push_assets_component.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/chrome/browser/resources/vr/assets/push_assets_component.py 2025-01-16 02:26:08.526347315 +0800 +@@ -34,8 +34,8 @@ + + + def PrintInfo(header, items): +- print('\n%s' % header) +- print ' ', '\n '.join(items) ++ print(('\n%s' % header)) ++ print(' ', '\n '.join(items)) + + + def main(): +@@ -76,8 +76,8 @@ + PrintInfo('Which pushes the following file', [zip_path]) + PrintInfo('Which contains the files', zip_files) + +- if raw_input('\nAre you sure (y/N) ').lower() != 'y': +- print 'aborting' ++ if input('\nAre you sure (y/N) ').lower() != 'y': ++ print('aborting') + return 1 + return subprocess.call(command, cwd=temp_dir) + +--- a/src/3rdparty/chromium/components/assist_ranker/print_example_preprocessor_config.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/assist_ranker/print_example_preprocessor_config.py 2025-01-16 02:26:08.526347315 +0800 +@@ -61,31 +61,31 @@ + # Indent description by a tab and wrap text. + max_len = 80 - 8 # Leave at least 8 columns for tab width. + description += ('\n\t').join(textwrap.wrap(bucket_str, max_len)) +- print description ++ print(description) + return 0 + + + def Main(args): + if len(args) != 2: +- print 'Usage: %s ' % ( +- __file__) ++ print('Usage: %s ' % ( ++ __file__)) + return 1 + + out_dir = args[0] + if not os.path.isdir(out_dir): +- print 'Could not find out directory: %s' % out_dir ++ print('Could not find out directory: %s' % out_dir) + return 1 + + pb_file = args[1] + if not os.path.isfile(pb_file): +- print 'Protobuf file not found: %s' % pb_file ++ print('Protobuf file not found: %s' % pb_file) + return 1 + + proto_dir = os.path.join(out_dir, 'pyproto/components/assist_ranker/proto') + if not os.path.isdir(proto_dir): +- print 'Proto directory not found: %s' % proto_dir +- print 'Build the "components/assist_ranker/proto" target' +- print ' (usually built with chrome)' ++ print('Proto directory not found: %s' % proto_dir) ++ print('Build the "components/assist_ranker/proto" target') ++ print(' (usually built with chrome)') + return 1 + + # Allow importing the ExamplePreprocessorConfig proto definition. +--- a/src/3rdparty/chromium/components/autofill_assistant/browser/devtools/devtools_api/client_api_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/autofill_assistant/browser/devtools/devtools_api/client_api_generator.py 2025-01-16 02:26:08.526347315 +0800 +@@ -435,7 +435,7 @@ + + if not isinstance(json, dict): + return +- for value in json.itervalues(): ++ for value in json.values(): + GetDomainDepsFromRefs(domain_name, value) + + if '$ref' in json: +--- a/src/3rdparty/chromium/components/certificate_transparency/tools/make_ct_known_logs_list.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/certificate_transparency/tools/make_ct_known_logs_list.py 2025-01-16 02:26:08.526347315 +0800 +@@ -83,7 +83,7 @@ + + def _is_log_disqualified(log): + # Disqualified logs are denoted with state="retired" +- assert (len(log.get("state").keys()) == 1) ++ assert (len(list(log.get("state").keys())) == 1) + log_state = list(log.get("state"))[0] + return log_state == "retired" + +@@ -157,7 +157,7 @@ + + + def _is_log_once_or_currently_qualified(log): +- assert (len(log.get("state").keys()) == 1) ++ assert (len(list(log.get("state").keys())) == 1) + return list(log.get("state"))[0] not in ("pending", "rejected") + + +@@ -193,7 +193,7 @@ + + def main(): + if len(sys.argv) != 3: +- print("usage: %s in_loglist_json out_header" % sys.argv[0]) ++ print(("usage: %s in_loglist_json out_header" % sys.argv[0])) + return 1 + with open(sys.argv[1], "r") as infile, open(sys.argv[2], "w") as outfile: + generate_cpp_file(infile, outfile) +--- a/src/3rdparty/chromium/components/certificate_transparency/tools/make_ct_known_logs_list_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/certificate_transparency/tools/make_ct_known_logs_list_unittest.py 2025-01-16 02:26:08.527430630 +0800 +@@ -17,7 +17,7 @@ + class FormattingTest(unittest.TestCase): + + def testSplitAndHexifyBinData(self): +- bin_data = bytes(bytearray(range(32, 60))) ++ bin_data = bytes(bytearray(list(range(32, 60)))) + expected_encoded_array = [ + ('"\\x20\\x21\\x22\\x23\\x24\\x25\\x26\\x27\\x28\\x29\\x2a' + '\\x2b\\x2c\\x2d\\x2e\\x2f\\x30"'), +@@ -28,7 +28,7 @@ + expected_encoded_array) + + # This data should fit in exactly one line - 17 bytes. +- short_bin_data = bytes(bytearray(range(32, 49))) ++ short_bin_data = bytes(bytearray(list(range(32, 49)))) + expected_short_array = [ + ('"\\x20\\x21\\x22\\x23\\x24\\x25\\x26\\x27\\x28\\x29\\x2a' + '\\x2b\\x2c\\x2d\\x2e\\x2f\\x30"') +@@ -38,7 +38,7 @@ + expected_short_array) + + # This data should fit exactly in two lines - 34 bytes. +- two_line_data = bytes(bytearray(range(32, 66))) ++ two_line_data = bytes(bytearray(list(range(32, 66)))) + expected_two_line_data_array = [ + ('"\\x20\\x21\\x22\\x23\\x24\\x25\\x26\\x27\\x28\\x29\\x2a' + '\\x2b\\x2c\\x2d\\x2e\\x2f\\x30"'), +--- a/src/3rdparty/chromium/components/crash/content/tools/dmp2minidump.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/crash/content/tools/dmp2minidump.py 2025-01-16 02:26:08.527430630 +0800 +@@ -25,11 +25,11 @@ + boundary = dump.readline().strip()[2:] + data = parse_multipart(dump, {'boundary': boundary}) + except: +- print 'Failed to read dmp file %s' % dump_file ++ print('Failed to read dmp file %s' % dump_file) + return + + if not 'upload_file_minidump' in data: +- print 'Could not find minidump file in dump.' ++ print('Could not find minidump file in dump.') + return + + f = open(minidump_file, 'w') +@@ -39,9 +39,9 @@ + + def main(): + if len(sys.argv) != 3: +- print 'Usage: %s [dmp file] [minidump]' % sys.argv[0] +- print '' +- print 'Extracts the minidump stored in the crash dump file' ++ print('Usage: %s [dmp file] [minidump]' % sys.argv[0]) ++ print('') ++ print('Extracts the minidump stored in the crash dump file') + return 1 + + ProcessDump(sys.argv[1], sys.argv[2]) +--- a/src/3rdparty/chromium/components/crash/content/tools/generate_breakpad_symbols.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/crash/content/tools/generate_breakpad_symbols.py 2025-01-16 02:26:08.527430630 +0800 +@@ -15,7 +15,7 @@ + import multiprocessing + import optparse + import os +-import Queue ++import queue + import re + import shutil + import subprocess +@@ -35,7 +35,7 @@ + DUMP_SYMS = 'dump_syms' + dump_syms_bin = os.path.join(os.path.expanduser(build_dir), DUMP_SYMS) + if not os.access(dump_syms_bin, os.X_OK): +- print 'Cannot find %s.' % dump_syms_bin ++ print('Cannot find %s.' % dump_syms_bin) + return None + + return dump_syms_bin +@@ -129,7 +129,7 @@ + for path in candidate_paths: + if os.path.exists(path): + return path +- print 'WARNING: no value found for DEVELOPER_DIR. Some commands may fail.' ++ print('WARNING: no value found for DEVELOPER_DIR. Some commands may fail.') + + + def GetSharedLibraryDependenciesMac(binary, exe_path): +@@ -195,10 +195,10 @@ + if dep: + deps.append(os.path.normpath(dep)) + else: +- print >>sys.stderr, ( ++ print(( + 'ERROR: failed to resolve %s, exe_path %s, loader_path %s, ' + 'rpaths %s' % (m.group(1), exe_path, loader_path, +- ', '.join(rpaths))) ++ ', '.join(rpaths))), file=sys.stderr) + sys.exit(1) + return deps + +@@ -223,7 +223,7 @@ + elif options.platform == 'chromeos': + deps = GetSharedLibraryDependenciesChromeOS(binary) + else: +- print "Platform not supported." ++ print("Platform not supported.") + sys.exit(1) + + result = [] +@@ -255,7 +255,7 @@ + binaries |= new_deps + queue.extend(list(new_deps)) + return binaries +- print "Platform not supported." ++ print("Platform not supported.") + sys.exit(1) + + +@@ -293,7 +293,7 @@ + def GenerateSymbols(options, binaries): + """Dumps the symbols of binary and places them in the given directory.""" + +- queue = Queue.Queue() ++ queue = queue.Queue() + print_lock = threading.Lock() + + def _Worker(): +@@ -343,22 +343,22 @@ + if not should_dump_syms: + if options.verbose: + with print_lock: +- print "Skipping %s (%s)" % (binary, reason) ++ print("Skipping %s (%s)" % (binary, reason)) + queue.task_done() + continue + + if options.verbose: + with print_lock: +- print "Generating symbols for %s" % binary ++ print("Generating symbols for %s" % binary) + + CreateSymbolDir(options, output_dir, binary_info.hash) + try: + with open(output_path, 'wb') as f: + subprocess.check_call([dump_syms, '-r', binary], stdout=f) +- except Exception, e: ++ except Exception as e: + # Not much we can do about this. + with print_lock: +- print e ++ print(e) + + queue.task_done() + +@@ -394,19 +394,19 @@ + (options, _) = parser.parse_args() + + if not options.symbols_dir: +- print "Required option --symbols-dir missing." ++ print("Required option --symbols-dir missing.") + return 1 + + if not options.build_dir: +- print "Required option --build-dir missing." ++ print("Required option --build-dir missing.") + return 1 + + if not options.binary: +- print "Required option --binary missing." ++ print("Required option --binary missing.") + return 1 + + if not os.access(options.binary, os.X_OK): +- print "Cannot find %s." % options.binary ++ print("Cannot find %s." % options.binary) + return 1 + + if options.clear: +--- a/src/3rdparty/chromium/components/domain_reliability/bake_in_configs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/domain_reliability/bake_in_configs.py 2025-01-16 02:26:08.527430630 +0800 +@@ -8,7 +8,7 @@ + encodes their contents as an array of C strings that gets compiled in to Chrome + and loaded at runtime.""" + +-from __future__ import print_function ++ + + import ast + import json +--- a/src/3rdparty/chromium/components/feed/core/v2/tools/protoc_util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/feed/core/v2/tools/protoc_util.py 2025-01-16 02:26:08.527430630 +0800 +@@ -65,8 +65,8 @@ + glob.glob(os.path.join(root_dir, "out") + "/*/protoc")) + list( + glob.glob(os.path.join(root_dir, "out") + "/*/*/protoc")) + if not len(protoc_list): +- print("Can't find a suitable build output directory", +- "(it should have protoc)") ++ print(("Can't find a suitable build output directory", ++ "(it should have protoc)")) + sys.exit(1) + _protoc_path = protoc_list[0] + return _protoc_path +--- a/src/3rdparty/chromium/components/feed/core/v2/tools/stream_dump.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/feed/core/v2/tools/stream_dump.py 2025-01-16 02:26:08.527430630 +0800 +@@ -18,7 +18,7 @@ + import glob + import os + import plyvel +-import protoc_util ++from . import protoc_util + import re + import subprocess + import sys +@@ -90,7 +90,7 @@ + with open(join(DUMP_DIR, 'entry{:03d}.textproto'.format(i)), + 'w') as f: + f.write(extract_db_entry(k, v)) +- print('Finished dumping to', DUMP_DIR) ++ print(('Finished dumping to', DUMP_DIR)) + db.close() + + +--- a/src/3rdparty/chromium/components/feed/core/v2/tools/textpb_to_binarypb.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/feed/core/v2/tools/textpb_to_binarypb.py 2025-01-16 02:26:08.527430630 +0800 +@@ -18,7 +18,7 @@ + import base64 + import glob + import os +-import protoc_util ++from . import protoc_util + import subprocess + import sys + import urllib.parse +--- a/src/3rdparty/chromium/components/feed/tools/content_dump.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/feed/tools/content_dump.py 2025-01-16 02:26:08.527430630 +0800 +@@ -18,7 +18,7 @@ + import glob + import os + import plyvel +-import protoc_util ++from . import protoc_util + import re + import subprocess + import sys +@@ -96,7 +96,7 @@ + return 'search.now.feed.client.StreamSharedState' + if key.startswith('FEATURE::') or key.startswith('FSM::'): + return 'search.now.feed.client.StreamPayload' +- print("Unknown Key kind", key) ++ print(("Unknown Key kind", key)) + sys.exit(1) + + +@@ -131,7 +131,7 @@ + f.write(k) + with open(join(DUMP_DIR, 'entry{:03d}.textproto'.format(i)), 'w') as f: + f.write(extract_db_entry(k, v)) +- print('Finished dumping to', DUMP_DIR) ++ print(('Finished dumping to', DUMP_DIR)) + db.close() + + +--- a/src/3rdparty/chromium/components/feed/tools/mockserver_textpb_to_binary.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/feed/tools/mockserver_textpb_to_binary.py 2025-01-16 02:26:08.527430630 +0800 +@@ -23,7 +23,7 @@ + + import glob + import os +-import protoc_util ++from . import protoc_util + import subprocess + + from absl import app +--- a/src/3rdparty/chromium/components/feed/tools/protoc_util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/feed/tools/protoc_util.py 2025-01-16 02:26:08.527430630 +0800 +@@ -58,8 +58,8 @@ + glob.glob(os.path.join(root_dir, "out") + "/*/protoc")) + list( + glob.glob(os.path.join(root_dir, "out") + "/*/*/protoc")) + if not len(protoc_list): +- print("Can't find a suitable build output directory", +- "(it should have protoc)") ++ print(("Can't find a suitable build output directory", ++ "(it should have protoc)")) + sys.exit(1) + _protoc_path = protoc_list[0] + return _protoc_path +--- a/src/3rdparty/chromium/components/ntp_snippets/remote/fetch.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/ntp_snippets/remote/fetch.py 2025-01-16 02:26:08.527430630 +0800 +@@ -15,7 +15,7 @@ + credentials at ~/.zineauth. + """ + +-from __future__ import absolute_import, division, print_function, unicode_literals ++ + + import argparse + import base64 +--- a/src/3rdparty/chromium/components/resources/protobufs/binary_proto_generator.py 2025-01-14 21:29:17.870562185 +0800 ++++ b/src/3rdparty/chromium/components/resources/protobufs/binary_proto_generator.py 2025-01-16 02:26:08.527430630 +0800 +@@ -7,7 +7,7 @@ + Converts a given ASCII proto into a binary resource. + + """ +-from __future__ import print_function ++ + import abc + import imp + import optparse +--- a/src/3rdparty/chromium/components/resources/ssl/ssl_error_assistant/push_proto.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/resources/ssl/ssl_error_assistant/push_proto.py 2025-01-16 02:26:08.527430630 +0800 +@@ -46,10 +46,10 @@ + gn_command = ['ninja', + '-C', opts.dir, + RESOURCE_SUBDIR + ':make_ssl_error_assistant_protobuf'] +- print "Running the following" +- print " " + (' '.join(gn_command)) ++ print("Running the following") ++ print(" " + (' '.join(gn_command))) + if subprocess.call(gn_command): +- print "Ninja failed." ++ print("Ninja failed.") + return 1 + + # Use the versioned files under the copy directory to push to the GCS bucket. +@@ -68,19 +68,19 @@ + version_dir = dirs[0] + command = ['gsutil', 'cp', '-Rn', version_dir, DEST_BUCKET] + +- print '\nGoing to run the following command' +- print ' ', ' '.join(command) +- print '\nIn directory' +- print ' ', copy_dir +- print '\nWhich should push the following files' ++ print('\nGoing to run the following command') ++ print(' ', ' '.join(command)) ++ print('\nIn directory') ++ print(' ', copy_dir) ++ print('\nWhich should push the following files') + expected_files = [os.path.join(dp, f) for dp, _, fn in + os.walk(version_dir) for f in fn] + for f in expected_files: +- print ' ', f ++ print(' ', f) + +- shall = raw_input('\nAre you sure (y/N) ').lower() == 'y' ++ shall = input('\nAre you sure (y/N) ').lower() == 'y' + if not shall: +- print 'aborting' ++ print('aborting') + return 1 + return subprocess.call(command) + +--- a/src/3rdparty/chromium/components/safe_browsing/core/resources/gen_file_type_proto.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/safe_browsing/core/resources/gen_file_type_proto.py 2025-01-16 02:26:08.527430630 +0800 +@@ -154,7 +154,7 @@ + FilterForPlatformAndWrite(pb, platform_enum, outfile) + else: + # Make a separate file for each platform +- for platform_type, platform_enum in PlatformTypes().iteritems(): ++ for platform_type, platform_enum in PlatformTypes().items(): + # e.g. .../all/77/chromeos/download_file_types.pb + outfile = os.path.join(opts.outdir, + str(pb.version_id), +@@ -179,12 +179,12 @@ + + def VerifyArgs(self, opts): + if (not opts.all and opts.type not in PlatformTypes()): +- print "ERROR: Unknown platform type '%s'" % opts.type ++ print("ERROR: Unknown platform type '%s'" % opts.type) + self.opt_parser.print_help() + return False + + if (bool(opts.all) == bool(opts.type)): +- print "ERROR: Need exactly one of --type or --all" ++ print("ERROR: Need exactly one of --type or --all") + self.opt_parser.print_help() + return False + return True +--- a/src/3rdparty/chromium/components/safe_browsing/core/resources/push_file_type_proto.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/safe_browsing/core/resources/push_file_type_proto.py 2025-01-16 02:26:08.527430630 +0800 +@@ -40,10 +40,10 @@ + gn_command = ['ninja', + '-C', opts.dir, + RESOURCE_SUBDIR + ':make_all_file_types_protobuf'] +- print "Running the following" +- print " " + (' '.join(gn_command)) ++ print("Running the following") ++ print(" " + (' '.join(gn_command))) + if subprocess.call(gn_command): +- print "Ninja failed." ++ print("Ninja failed.") + return 1 + + os.chdir(all_dir) +@@ -60,19 +60,19 @@ + vers_dir = dirs[0] + command = ['gsutil', 'cp', '-Rn', vers_dir, DEST_BUCKET] + +- print '\nGoing to run the following command' +- print ' ', ' '.join(command) +- print '\nIn directory' +- print ' ', all_dir +- print '\nWhich should push the following files' ++ print('\nGoing to run the following command') ++ print(' ', ' '.join(command)) ++ print('\nIn directory') ++ print(' ', all_dir) ++ print('\nWhich should push the following files') + expected_files = [os.path.join(dp, f) for dp, dn, fn in + os.walk(vers_dir) for f in fn] + for f in expected_files: +- print ' ', f ++ print(' ', f) + +- shall = raw_input('\nAre you sure (y/N) ').lower() == 'y' ++ shall = input('\nAre you sure (y/N) ').lower() == 'y' + if not shall: +- print 'aborting' ++ print('aborting') + return 1 + return subprocess.call(command) + +--- a/src/3rdparty/chromium/components/schema_org/generate_schema_org_code.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/schema_org/generate_schema_org_code.py 2025-01-16 02:26:08.527430630 +0800 +@@ -225,7 +225,7 @@ + template_vars['properties'].append( + parse_property(thing, schema, names)) + +- for entity, parents in entity_parent_lookup.iteritems(): ++ for entity, parents in entity_parent_lookup.items(): + template_vars['entity_parent_lookup'].append({ + 'name': + entity, +--- a/src/3rdparty/chromium/components/variations/service/generate_ui_string_overrider.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/variations/service/generate_ui_string_overrider.py 2025-01-16 02:26:08.527430630 +0800 +@@ -101,7 +101,7 @@ + A set of all |Resource| objects with collisions. + """ + collisions = set() +- for i in xrange(len(sorted_resource_list) - 1): ++ for i in range(len(sorted_resource_list) - 1): + resource = sorted_resource_list[i] + next_resource = sorted_resource_list[i+1] + if resource.hash == next_resource.hash: +--- a/src/3rdparty/chromium/components/vector_icons/aggregate_vector_icons.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/vector_icons/aggregate_vector_icons.py 2025-01-16 02:26:08.527430630 +0800 +@@ -18,7 +18,7 @@ + + + def Error(msg): +- print >> sys.stderr, msg ++ print(msg, file=sys.stderr) + sys.exit(1) + + +--- a/src/3rdparty/chromium/components/viz/service/display/process_renderer_perftest_results.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/components/viz/service/display/process_renderer_perftest_results.py 2025-01-16 02:26:08.527430630 +0800 +@@ -20,7 +20,7 @@ + def SaveResultsAsCSV(csv_data, csv_filename): + assert len(csv_data) > 0 + with open(csv_filename, 'wb') as csv_file: +- labels = sorted(csv_data[0].keys(), reverse=True) ++ labels = sorted(list(csv_data[0].keys()), reverse=True) + writer = csv.DictWriter(csv_file, fieldnames=labels) + writer.writeheader() + writer.writerows(csv_data) +--- a/src/3rdparty/chromium/content/public/android/generate_child_service.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/content/public/android/generate_child_service.py 2025-01-16 02:26:08.527430630 +0800 +@@ -46,7 +46,7 @@ + path_template = "org/chromium/content/app/SandboxedProcessService{0}.java" + with build_utils.AtomicOutput(output) as f: + with zipfile.ZipFile(f, 'w', zipfile.ZIP_STORED) as srcjar: +- for i in xrange(number): ++ for i in range(number): + build_utils.AddToZipHermetic(srcjar, + path_template.format(i), + data=GenerateService(i)) +--- a/src/3rdparty/chromium/docs/enterprise/extension_query.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/docs/enterprise/extension_query.py 2025-01-16 02:26:08.528513945 +0800 +@@ -4,8 +4,8 @@ + # found in the LICENSE file. + """Transform CBCM Takeout API Data (Python3).""" + +-from __future__ import print_function +-from __future__ import unicode_literals ++ ++ + + import argparse + import csv +@@ -81,7 +81,7 @@ + The values from |data|, with each value's key inlined into the value. + """ + assert isinstance(data, dict), '|data| must be a dict' +- for key, value in data.items(): ++ for key, value in list(data.items()): + assert isinstance(value, dict), '|value| must contain dict items' + value[key_name] = key + yield value +@@ -111,7 +111,7 @@ + + for item in data: + added_item = {} +- for prop, value in item.items(): ++ for prop, value in list(item.items()): + # Non-container properties can be added directly. + if not isinstance(value, (list, set)): + added_item[prop] = value +@@ -149,7 +149,7 @@ + (int, bool, str)), ('unexpected type for item: %s' % + type(added_item[prop]).__name__) + +- all_columns.update(added_item.keys()) ++ all_columns.update(list(added_item.keys())) + yield added_item + + +--- a/src/3rdparty/chromium/docs/enterprise/extension_query_py2.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/docs/enterprise/extension_query_py2.py 2025-01-16 02:26:08.528513945 +0800 +@@ -4,7 +4,7 @@ + # found in the LICENSE file. + """Transform CBCM Takeout API Data (Python2).""" + +-from __future__ import print_function ++ + + import argparse + import csv +@@ -74,8 +74,8 @@ + A list of dict objects whose values have been encoded as UTF-8. + """ + for entry in data: +- for prop, value in entry.iteritems(): +- entry[prop] = unicode(value).encode('utf-8') ++ for prop, value in entry.items(): ++ entry[prop] = str(value).encode('utf-8') + yield entry + + +@@ -93,7 +93,7 @@ + The values from |data|, with each value's key inlined into the value. + """ + assert isinstance(data, dict), '|data| must be a dict' +- for key, value in data.items(): ++ for key, value in list(data.items()): + assert isinstance(value, dict), '|value| must contain dict items' + value[key_name] = key + yield value +@@ -123,7 +123,7 @@ + + for item in data: + added_item = {} +- for prop, value in item.items(): ++ for prop, value in list(item.items()): + # Non-container properties can be added directly. + if not isinstance(value, (list, set)): + added_item[prop] = value +@@ -159,10 +159,10 @@ + + assert isinstance( + added_item[prop], +- (int, bool, str, unicode)), ('unexpected type for item: %s' % ++ (int, bool, str)), ('unexpected type for item: %s' % + type(added_item[prop]).__name__) + +- all_columns.update(added_item.keys()) ++ all_columns.update(list(added_item.keys())) + yield added_item + + +--- a/src/3rdparty/chromium/extensions/common/api/externs_checker.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/extensions/common/api/externs_checker.py 2025-01-16 02:26:08.528513945 +0800 +@@ -15,7 +15,7 @@ + self._output_api = output_api + self._api_pairs = api_pairs + +- for path in api_pairs.keys() + api_pairs.values(): ++ for path in list(api_pairs.keys()) + list(api_pairs.values()): + if not input_api.os_path.exists(path): + raise OSError('Path Not Found: %s' % path) + +--- a/src/3rdparty/chromium/extensions/common/api/externs_checker_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/extensions/common/api/externs_checker_test.py 2025-01-16 02:26:08.528513945 +0800 +@@ -30,10 +30,10 @@ + + def testModifiedSourceWithoutModifiedExtern(self): + results = self._runChecks(['b', 'test', 'random']) +- self.assertEquals(1, len(results)) +- self.assertEquals(1, len(results[0].items)) +- self.assertEquals('b', results[0].items[0]) +- self.assertEquals( ++ self.assertEqual(1, len(results)) ++ self.assertEqual(1, len(results[0].items)) ++ self.assertEqual('b', results[0].items[0]) ++ self.assertEqual( + 'To update the externs, run:\n' + ' src/ $ python tools/json_schema_compiler/compiler.py b --root=. ' + '--generator=externs > 2', +@@ -41,15 +41,15 @@ + + def testModifiedSourceWithModifiedExtern(self): + results = self._runChecks(['b', '2', 'test', 'random']) +- self.assertEquals(0, len(results)) ++ self.assertEqual(0, len(results)) + + def testModifiedMultipleSourcesWithNoModifiedExterns(self): + results = self._runChecks(['b', 'test', 'c', 'random']) +- self.assertEquals(1, len(results)) +- self.assertEquals(2, len(results[0].items)) ++ self.assertEqual(1, len(results)) ++ self.assertEqual(2, len(results[0].items)) + self.assertTrue('b' in results[0].items) + self.assertTrue('c' in results[0].items) +- self.assertEquals( ++ self.assertEqual( + 'To update the externs, run:\n' + ' src/ $ python tools/json_schema_compiler/compiler.py ' + '--root=. --generator=externs > ', +@@ -57,9 +57,9 @@ + + def testModifiedMultipleSourcesWithOneModifiedExtern(self): + results = self._runChecks(['b', 'test', 'c', 'random', '2']) +- self.assertEquals(1, len(results)) +- self.assertEquals(1, len(results[0].items)) +- self.assertEquals('c', results[0].items[0]) ++ self.assertEqual(1, len(results)) ++ self.assertEqual(1, len(results[0].items)) ++ self.assertEqual('c', results[0].items[0]) + + def testApiFileDoesNotExist(self): + exists = lambda f: f in ['a', 'b', 'c', '1', '2'] +--- a/src/3rdparty/chromium/google_apis/google_api_keys.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/google_apis/google_api_keys.py 2025-01-16 02:26:08.528513945 +0800 +@@ -86,15 +86,15 @@ + + + if __name__ == "__main__": +- print 'GOOGLE_API_KEY=%s' % GetAPIKey() +- print 'GOOGLE_CLIENT_ID_MAIN=%s' % GetClientID('MAIN') +- print 'GOOGLE_CLIENT_SECRET_MAIN=%s' % GetClientSecret('MAIN') +- print 'GOOGLE_CLIENT_ID_CLOUD_PRINT=%s' % GetClientID('CLOUD_PRINT') +- print 'GOOGLE_CLIENT_SECRET_CLOUD_PRINT=%s' % GetClientSecret('CLOUD_PRINT') +- print 'GOOGLE_CLIENT_ID_REMOTING=%s' % GetClientID('REMOTING') +- print 'GOOGLE_CLIENT_SECRET_REMOTING=%s' % GetClientSecret('REMOTING') +- print 'GOOGLE_CLIENT_ID_REMOTING_HOST=%s' % GetClientID('REMOTING_HOST') +- print 'GOOGLE_CLIENT_SECRET_REMOTING_HOST=%s' % GetClientSecret( +- 'REMOTING_HOST') +- print 'GOOGLE_CLIENT_ID_REMOTING_IDENTITY_API=%s' %GetClientID( +- 'REMOTING_IDENTITY_API') ++ print('GOOGLE_API_KEY=%s' % GetAPIKey()) ++ print('GOOGLE_CLIENT_ID_MAIN=%s' % GetClientID('MAIN')) ++ print('GOOGLE_CLIENT_SECRET_MAIN=%s' % GetClientSecret('MAIN')) ++ print('GOOGLE_CLIENT_ID_CLOUD_PRINT=%s' % GetClientID('CLOUD_PRINT')) ++ print('GOOGLE_CLIENT_SECRET_CLOUD_PRINT=%s' % GetClientSecret('CLOUD_PRINT')) ++ print('GOOGLE_CLIENT_ID_REMOTING=%s' % GetClientID('REMOTING')) ++ print('GOOGLE_CLIENT_SECRET_REMOTING=%s' % GetClientSecret('REMOTING')) ++ print('GOOGLE_CLIENT_ID_REMOTING_HOST=%s' % GetClientID('REMOTING_HOST')) ++ print('GOOGLE_CLIENT_SECRET_REMOTING_HOST=%s' % GetClientSecret( ++ 'REMOTING_HOST')) ++ print('GOOGLE_CLIENT_ID_REMOTING_IDENTITY_API=%s' %GetClientID( ++ 'REMOTING_IDENTITY_API')) +--- a/src/3rdparty/chromium/google_apis/build/check_internal.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/google_apis/build/check_internal.py 2025-01-16 02:26:08.528513945 +0800 +@@ -8,7 +8,7 @@ + Takes one argument, a path. Prints 1 if the path exists, 0 if not. + """ + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/gpu/command_buffer/build_cmd_buffer_lib.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/gpu/command_buffer/build_cmd_buffer_lib.py 2025-01-16 02:26:08.528513945 +0800 +@@ -682,7 +682,7 @@ + def Grouper(n, iterable, fillvalue=None): + """Collect data into fixed-length chunks or blocks""" + args = [iter(iterable)] * n +- return itertools.izip_longest(fillvalue=fillvalue, *args) ++ return itertools.zip_longest(fillvalue=fillvalue, *args) + + + def SplitWords(input_string): +@@ -5923,7 +5923,7 @@ + """Writes the cmd cmd_flags constant.""" + # By default trace only at the highest level 3. + trace_level = int(self.GetInfo('trace_level', default = 3)) +- if trace_level not in xrange(0, 4): ++ if trace_level not in range(0, 4): + raise KeyError("Unhandled trace_level: %d" % trace_level) + + cmd_flags = ('CMD_FLAG_SET_TRACE_LEVEL(%d)' % trace_level) +@@ -6351,11 +6351,11 @@ + def Log(self, msg): + """Prints something if verbose is true.""" + if self.verbose: +- print msg ++ print(msg) + + def Error(self, msg): + """Prints an error.""" +- print "Error: %s" % msg ++ print("Error: %s" % msg) + self.errors += 1 + + def ParseGLH(self, filename): +@@ -6381,7 +6381,7 @@ + 'return_type': match.group(1).strip(), + } + +- for k in parsed_func_info.keys(): ++ for k in list(parsed_func_info.keys()): + if not k in func_info: + func_info[k] = parsed_func_info[k] + +@@ -7424,13 +7424,13 @@ + f.write("#include \"ppapi/c/ppb_opengles2.h\"\n\n") + else: + f.write("\n#ifndef __gl2_h_\n") +- for (k, v) in _GL_TYPES.iteritems(): ++ for (k, v) in _GL_TYPES.items(): + f.write("typedef %s %s;\n" % (v, k)) + f.write("#ifdef _WIN64\n") +- for (k, v) in _GL_TYPES_64.iteritems(): ++ for (k, v) in _GL_TYPES_64.items(): + f.write("typedef %s %s;\n" % (v, k)) + f.write("#else\n") +- for (k, v) in _GL_TYPES_32.iteritems(): ++ for (k, v) in _GL_TYPES_32.items(): + f.write("typedef %s %s;\n" % (v, k)) + f.write("#endif // _WIN64\n") + f.write("#endif // __gl2_h_\n\n") +@@ -7578,7 +7578,6 @@ + if platform.system() == "Windows": + formatter = "third_party\\depot_tools\\clang-format.bat" + formatter = os.path.join(chromium_root_dir, formatter) +- generated_files = map(lambda filename: os.path.join(output_dir, filename), +- generated_files) ++ generated_files = [os.path.join(output_dir, filename) for filename in generated_files] + for filename in generated_files: + call([formatter, "-i", "-style=chromium", filename], cwd=chromium_root_dir) +--- a/src/3rdparty/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py 2025-01-16 02:26:08.529597260 +0800 +@@ -4396,7 +4396,7 @@ + chromium_root_dir) + + if gen.errors > 0: +- print "build_gles2_cmd_buffer.py: Failed with %d errors" % gen.errors ++ print("build_gles2_cmd_buffer.py: Failed with %d errors" % gen.errors) + return 1 + + check_failed_filenames = [] +@@ -4407,10 +4407,10 @@ + check_failed_filenames.append(filename) + + if len(check_failed_filenames) > 0: +- print 'Please run gpu/command_buffer/build_gles2_cmd_buffer.py' +- print 'Failed check on autogenerated command buffer files:' ++ print('Please run gpu/command_buffer/build_gles2_cmd_buffer.py') ++ print('Failed check on autogenerated command buffer files:') + for filename in check_failed_filenames: +- print filename ++ print(filename) + return 1 + + return 0 +--- a/src/3rdparty/chromium/gpu/command_buffer/build_raster_cmd_buffer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/gpu/command_buffer/build_raster_cmd_buffer.py 2025-01-16 02:26:08.529597260 +0800 +@@ -442,7 +442,7 @@ + chromium_root_dir) + + if gen.errors > 0: +- print "build_raster_cmd_buffer.py: Failed with %d errors" % gen.errors ++ print("build_raster_cmd_buffer.py: Failed with %d errors" % gen.errors) + return 1 + + check_failed_filenames = [] +@@ -453,10 +453,10 @@ + check_failed_filenames.append(filename) + + if len(check_failed_filenames) > 0: +- print 'Please run gpu/command_buffer/build_raster_cmd_buffer.py' +- print 'Failed check on autogenerated command buffer files:' ++ print('Please run gpu/command_buffer/build_raster_cmd_buffer.py') ++ print('Failed check on autogenerated command buffer files:') + for filename in check_failed_filenames: +- print filename ++ print(filename) + return 1 + + return 0 +--- a/src/3rdparty/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py 2025-01-16 02:26:08.529597260 +0800 +@@ -155,7 +155,7 @@ + chromium_root_dir) + + if gen.errors > 0: +- print "build_webgpu_cmd_buffer.py: Failed with %d errors" % gen.errors ++ print("build_webgpu_cmd_buffer.py: Failed with %d errors" % gen.errors) + return 1 + + check_failed_filenames = [] +@@ -166,10 +166,10 @@ + check_failed_filenames.append(filename) + + if len(check_failed_filenames) > 0: +- print 'Please run gpu/command_buffer/build_webgpu_cmd_buffer.py' +- print 'Failed check on autogenerated command buffer files:' ++ print('Please run gpu/command_buffer/build_webgpu_cmd_buffer.py') ++ print('Failed check on autogenerated command buffer files:') + for filename in check_failed_filenames: +- print filename ++ print(filename) + return 1 + + return 0 +--- a/src/3rdparty/chromium/gpu/config/process_json.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/gpu/config/process_json.py 2025-01-16 02:26:08.529597260 +0800 +@@ -606,7 +606,7 @@ + 'Intel' in driver_vendor) + assert is_intel, 'Intel driver schema is only for Intel GPUs' + valid_version = check_intel_driver_version(driver_version['value']) +- if driver_version.has_key('value2'): ++ if 'value2' in driver_version: + valid_version = (valid_version and + check_intel_driver_version(driver_version['value2'])) + assert valid_version, INTEL_DRIVER_VERSION_SCHEMA +@@ -616,7 +616,7 @@ + is_nvidia = (format(vendor_id, '#04x') == '0x10de') + assert is_nvidia, 'Nvidia driver schema is only for Nvidia GPUs' + valid_version = check_nvidia_driver_version(driver_version['value']) +- if driver_version.has_key('value2'): ++ if 'value2' in driver_version: + valid_version = (valid_version and + check_nvidia_driver_version(driver_version['value2'])) + assert valid_version, NVIDIA_DRIVER_VERSION_SCHEMA +--- a/src/3rdparty/chromium/gpu/gles2_conform_support/generate_gles2_embedded_data.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/gpu/gles2_conform_support/generate_gles2_embedded_data.py 2025-01-16 02:26:08.529597260 +0800 +@@ -70,7 +70,7 @@ + sub_dirs.append(full_path) + elif ext in GenerateEmbeddedFiles.extensions_to_include: + if self.base_dir == None: +- print full_path.replace("\\", "/") ++ print(full_path.replace("\\", "/")) + else: + self.count += 1 + name = "_FILE_%s_%d" % (ext.upper(), self.count) +--- a/src/3rdparty/chromium/gpu/ipc/common/generate_vulkan_types.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/gpu/ipc/common/generate_vulkan_types.py 2025-01-16 02:26:08.529597260 +0800 +@@ -598,10 +598,10 @@ + check_failed_filenames.append(filename) + + if len(check_failed_filenames) > 0: +- print 'Please run gpu/ipc/common/generate_vulkan_types.py' +- print 'Failed check on generated files:' ++ print('Please run gpu/ipc/common/generate_vulkan_types.py') ++ print('Failed check on generated files:') + for filename in check_failed_filenames: +- print filename ++ print(filename) + return 1 + + return 0 +--- a/src/3rdparty/chromium/gpu/khronos_glcts_support/generate_khronos_glcts_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/gpu/khronos_glcts_support/generate_khronos_glcts_tests.py 2025-01-16 02:26:08.529597260 +0800 +@@ -59,7 +59,7 @@ + elif ext == ".run": + tests += ReadRunFile(os.path.join(base_dir, line)) + else: +- raise ValueError, "Unexpected line '%s' in '%s'" % (line, run_file) ++ raise ValueError("Unexpected line '%s' in '%s'" % (line, run_file)) + return tests + + def GenerateTests(run_files, output): +--- a/src/3rdparty/chromium/headless/lib/browser/devtools_api/client_api_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/headless/lib/browser/devtools_api/client_api_generator.py 2025-01-16 02:26:08.529597260 +0800 +@@ -410,7 +410,7 @@ + + if not isinstance(json, dict): + return +- for value in json.values(): ++ for value in list(json.values()): + GetDomainDepsFromRefs(domain_name, value) + + if '$ref' in json: +--- a/src/3rdparty/chromium/infra/config/lint-luci-milo.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/infra/config/lint-luci-milo.py 2025-01-16 02:26:08.529597260 +0800 +@@ -43,12 +43,12 @@ + sub_desc = to_list(sub_builders, name) + + if main_desc != sub_desc: +- print ('bot lists different between main waterfall ' + +- 'and stand-alone %s waterfall:' % name) +- print '\n'.join(difflib.unified_diff(main_desc, sub_desc, ++ print(('bot lists different between main waterfall ' + ++ 'and stand-alone %s waterfall:' % name)) ++ print('\n'.join(difflib.unified_diff(main_desc, sub_desc, + fromfile='main', tofile=name, +- lineterm='')) +- print ++ lineterm=''))) ++ print() + return False + return True + +@@ -80,7 +80,7 @@ + referenced_names = set(subwaterfalls.keys()) + missing_names = referenced_names - set(all_console_names + excluded_names) + if missing_names: +- print 'Missing subwaterfall console for', missing_names ++ print('Missing subwaterfall console for', missing_names) + return 1 + + # Check that the bots on a subwaterfall match the corresponding bots on the +--- a/src/3rdparty/chromium/infra/scripts/sizes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/infra/scripts/sizes.py 2025-01-16 02:26:08.529597260 +0800 +@@ -23,12 +23,12 @@ + import sys + import tempfile + +-import build_directory ++from . import build_directory + + + SRC_DIR = os.path.abspath( + os.path.join(os.path.dirname(__file__), '..', '..')) +-print SRC_DIR ++print(SRC_DIR) + + # Add Catapult to the path so we can import the chartjson-histogramset + # conversion. +@@ -48,7 +48,7 @@ + } + + # Legacy printing, previously used for parsing the text logs. +- print 'RESULT %s: %s= %s %s' % (name, identifier, value, units) ++ print('RESULT %s: %s= %s %s' % (name, identifier, value, units)) + + + def get_size(filename): +@@ -78,7 +78,7 @@ + p = subprocess.Popen(command, stdout=subprocess.PIPE) + stdout = p.communicate()[0] + if p.returncode != 0: +- print 'ERROR from command "%s": %d' % (' '.join(command), p.returncode) ++ print('ERROR from command "%s": %d' % (' '.join(command), p.returncode)) + if result == 0: + result = p.returncode + return result, stdout +@@ -289,7 +289,7 @@ + path = os.path.join(target_dir, filename) + try: + size = get_size(path) +- except OSError, e: ++ except OSError as e: + if e.errno == errno.ENOENT: + continue # Don't print anything for missing files. + raise +@@ -299,7 +299,7 @@ + # TODO(mcgrathr): This should all be refactored so the mac and win flavors + # also deliver data structures rather than printing, and the logic for + # the printing and the summing totals is shared across all three flavors. +- for (identifier, units), value in sorted(totals.iteritems()): ++ for (identifier, units), value in sorted(totals.items()): + results_collector.add_result( + 'totals-%s' % identifier, identifier, value, units) + +@@ -431,7 +431,7 @@ + # 1. Add a top-level "benchmark_name" key. + # 2. Pull out the "identifier" value to be the story name. + formatted_data = {} +- for metric, metric_data in data.iteritems(): ++ for metric, metric_data in data.items(): + story = metric_data['identifier'] + formatted_data[metric] = { + story: metric_data.copy() +--- a/src/3rdparty/chromium/media/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/media/PRESUBMIT.py 2025-01-16 02:26:08.529597260 +0800 +@@ -124,7 +124,7 @@ + + if (not uma_max_re.match(max_arg) and match.group(2) != + 'PRESUBMIT_IGNORE_UMA_MAX'): +- uma_range = range(match.start(), match.end() + 1) ++ uma_range = list(range(match.start(), match.end() + 1)) + # Check if any part of the match is in the changed lines: + for num, line in f.ChangedContents(): + if line_number <= num <= line_number + match.group().count('\n'): +--- a/src/3rdparty/chromium/media/tools/constrained_network_server/cn.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/media/tools/constrained_network_server/cn.py 2025-01-16 02:26:08.529597260 +0800 +@@ -59,7 +59,7 @@ + (indent_first, '', opt_width, s, COMMANDS[s].desc)) + + parser.usage = ('usage: %%prog {%s} [options]\n\n%s' % +- ('|'.join(COMMANDS.keys()), '\n'.join(cmd_usage))) ++ ('|'.join(list(COMMANDS.keys())), '\n'.join(cmd_usage))) + + parser.add_option('--port', type='int', + help='The port to apply traffic control constraints to.') +@@ -83,7 +83,7 @@ + + # Check a valid command was entered + if not args or args[0].lower() not in COMMANDS: +- parser.error('Please specify a command {%s}.' % '|'.join(COMMANDS.keys())) ++ parser.error('Please specify a command {%s}.' % '|'.join(list(COMMANDS.keys()))) + user_cmd = args[0].lower() + + # Check if required options are available +--- a/src/3rdparty/chromium/media/tools/constrained_network_server/cns.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/media/tools/constrained_network_server/cns.py 2025-01-16 02:26:08.529597260 +0800 +@@ -56,8 +56,8 @@ + import sys + import threading + import time +-import urllib +-import urllib2 ++import urllib.request, urllib.parse, urllib.error ++import urllib.request, urllib.error, urllib.parse + + import traffic_control + +@@ -126,7 +126,7 @@ + # so just iterate over ports dict for simplicity. + full_key = (key,) + tuple(kwargs.values()) + if not new_port: +- for port, status in self._ports.iteritems(): ++ for port, status in self._ports.items(): + if full_key == status['key']: + self._ports[port]['last_update'] = time.time() + return port +@@ -138,7 +138,7 @@ + + # Performance isn't really an issue here, so just iterate over the port + # range to find an unused port. If no port is found, None is returned. +- for port in xrange(self._port_range[0], self._port_range[1]): ++ for port in range(self._port_range[0], self._port_range[1]): + if port in self._ports: + continue + if self._SetupPort(port, **kwargs): +@@ -180,7 +180,7 @@ + with self._port_lock: + now = time.time() + # Use .items() instead of .iteritems() so we can delete keys w/o error. +- for port, status in self._ports.items(): ++ for port, status in list(self._ports.items()): + expired = now - status['last_update'] > self._expiry_time_secs + matching_ip = request_ip and status['key'][0].startswith(request_ip) + if all_ports or expired or matching_ip: +@@ -295,7 +295,7 @@ + test_url = self._GetServerURL(f, self._options.local_server_port) + try: + cherrypy.log('Check file exist using URL: %s' % test_url) +- return urllib2.urlopen(test_url) is not None ++ return urllib.request.urlopen(test_url) is not None + except Exception: + raise cherrypy.HTTPError(404, 'File not found on local server.') + +@@ -323,7 +323,7 @@ + cherrypy.url().replace('ServeConstrained', self._options.www_root), f) + + url = url.replace(':%d' % self._options.port, ':%d' % port) +- extra_args = urllib.urlencode(kwargs) ++ extra_args = urllib.parse.urlencode(kwargs) + if extra_args: + url += extra_args + return url +--- a/src/3rdparty/chromium/media/tools/constrained_network_server/cns_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/media/tools/constrained_network_server/cns_test.py 2025-01-16 02:26:08.529597260 +0800 +@@ -45,7 +45,7 @@ + import tempfile + import time + import unittest +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + import cherrypy + import cns + import traffic_control +@@ -73,7 +73,7 @@ + def tearDown(self): + self._pa.Cleanup(all_ports=True) + # Ensure ports are cleaned properly. +- self.assertEquals(self._pa._ports, {}) ++ self.assertEqual(self._pa._ports, {}) + time.time = self._old_time + self._RestoreTrafficControl() + +@@ -93,45 +93,45 @@ + + def testPortAllocator(self): + # Ensure Get() succeeds and returns the correct port. +- self.assertEquals(self._pa.Get('test'), cns._DEFAULT_CNS_PORT_RANGE[0]) ++ self.assertEqual(self._pa.Get('test'), cns._DEFAULT_CNS_PORT_RANGE[0]) + + # Call again with the same key and make sure we get the same port. +- self.assertEquals(self._pa.Get('test'), cns._DEFAULT_CNS_PORT_RANGE[0]) ++ self.assertEqual(self._pa.Get('test'), cns._DEFAULT_CNS_PORT_RANGE[0]) + + # Call with a different key and make sure we get a different port. +- self.assertEquals(self._pa.Get('test2'), cns._DEFAULT_CNS_PORT_RANGE[0] + 1) ++ self.assertEqual(self._pa.Get('test2'), cns._DEFAULT_CNS_PORT_RANGE[0] + 1) + + # Update fake time so that ports should expire. + self._current_time += self._EXPIRY_TIME + 1 + + # Test to make sure cache is checked before expiring ports. +- self.assertEquals(self._pa.Get('test2'), cns._DEFAULT_CNS_PORT_RANGE[0] + 1) ++ self.assertEqual(self._pa.Get('test2'), cns._DEFAULT_CNS_PORT_RANGE[0] + 1) + + # Update fake time so that ports should expire. + self._current_time += self._EXPIRY_TIME + 1 + + # Request a new port, old ports should be expired, so we should get the + # first port in the range. Make sure this is the only allocated port. +- self.assertEquals(self._pa.Get('test3'), cns._DEFAULT_CNS_PORT_RANGE[0]) +- self.assertEquals(self._pa._ports.keys(), [cns._DEFAULT_CNS_PORT_RANGE[0]]) ++ self.assertEqual(self._pa.Get('test3'), cns._DEFAULT_CNS_PORT_RANGE[0]) ++ self.assertEqual(list(self._pa._ports.keys()), [cns._DEFAULT_CNS_PORT_RANGE[0]]) + + def testPortAllocatorExpiresOnlyCorrectPorts(self): + # Ensure Get() succeeds and returns the correct port. +- self.assertEquals(self._pa.Get('test'), cns._DEFAULT_CNS_PORT_RANGE[0]) ++ self.assertEqual(self._pa.Get('test'), cns._DEFAULT_CNS_PORT_RANGE[0]) + + # Stagger port allocation and so we can ensure only ports older than the + # expiry time are actually expired. + self._current_time += self._EXPIRY_TIME / 2 + 1 + + # Call with a different key and make sure we get a different port. +- self.assertEquals(self._pa.Get('test2'), cns._DEFAULT_CNS_PORT_RANGE[0] + 1) ++ self.assertEqual(self._pa.Get('test2'), cns._DEFAULT_CNS_PORT_RANGE[0] + 1) + + # After this sleep the port with key 'test' should expire on the next Get(). + self._current_time += self._EXPIRY_TIME / 2 + 1 + + # Call with a different key and make sure we get the first port. +- self.assertEquals(self._pa.Get('test3'), cns._DEFAULT_CNS_PORT_RANGE[0]) +- self.assertEquals(set(self._pa._ports.keys()), set([ ++ self.assertEqual(self._pa.Get('test3'), cns._DEFAULT_CNS_PORT_RANGE[0]) ++ self.assertEqual(set(self._pa._ports.keys()), set([ + cns._DEFAULT_CNS_PORT_RANGE[0], cns._DEFAULT_CNS_PORT_RANGE[0] + 1])) + + def testPortAllocatorNoExpiration(self): +@@ -139,15 +139,15 @@ + self._pa = cns.PortAllocator(cns._DEFAULT_CNS_PORT_RANGE, 0) + + # Ensure Get() succeeds and returns the correct port. +- self.assertEquals(self._pa.Get('test'), cns._DEFAULT_CNS_PORT_RANGE[0]) ++ self.assertEqual(self._pa.Get('test'), cns._DEFAULT_CNS_PORT_RANGE[0]) + + # Update fake time to see if ports expire. + self._current_time += self._EXPIRY_TIME + + # Send second Get() which would normally cause ports to expire. Ensure that + # the ports did not expire. +- self.assertEquals(self._pa.Get('test2'), cns._DEFAULT_CNS_PORT_RANGE[0] + 1) +- self.assertEquals(set(self._pa._ports.keys()), set([ ++ self.assertEqual(self._pa.Get('test2'), cns._DEFAULT_CNS_PORT_RANGE[0] + 1) ++ self.assertEqual(set(self._pa._ports.keys()), set([ + cns._DEFAULT_CNS_PORT_RANGE[0], cns._DEFAULT_CNS_PORT_RANGE[0] + 1])) + + def testPortAllocatorCleanMatchingIP(self): +@@ -155,31 +155,31 @@ + self._pa = cns.PortAllocator(cns._DEFAULT_CNS_PORT_RANGE, 0) + + # Ensure Get() succeeds and returns the correct port. +- self.assertEquals(self._pa.Get('ip1', t=1), cns._DEFAULT_CNS_PORT_RANGE[0]) +- self.assertEquals(self._pa.Get('ip1', t=2), ++ self.assertEqual(self._pa.Get('ip1', t=1), cns._DEFAULT_CNS_PORT_RANGE[0]) ++ self.assertEqual(self._pa.Get('ip1', t=2), + cns._DEFAULT_CNS_PORT_RANGE[0] + 1) +- self.assertEquals(self._pa.Get('ip1', t=3), ++ self.assertEqual(self._pa.Get('ip1', t=3), + cns._DEFAULT_CNS_PORT_RANGE[0] + 2) +- self.assertEquals(self._pa.Get('ip2', t=1), ++ self.assertEqual(self._pa.Get('ip2', t=1), + cns._DEFAULT_CNS_PORT_RANGE[0] + 3) + + self._pa.Cleanup(all_ports=False, request_ip='ip1') + +- self.assertEquals(self._pa._ports.keys(), ++ self.assertEqual(list(self._pa._ports.keys()), + [cns._DEFAULT_CNS_PORT_RANGE[0] + 3]) +- self.assertEquals(self._pa.Get('ip2'), cns._DEFAULT_CNS_PORT_RANGE[0]) +- self.assertEquals(self._pa.Get('ip1'), cns._DEFAULT_CNS_PORT_RANGE[0] + 1) ++ self.assertEqual(self._pa.Get('ip2'), cns._DEFAULT_CNS_PORT_RANGE[0]) ++ self.assertEqual(self._pa.Get('ip1'), cns._DEFAULT_CNS_PORT_RANGE[0] + 1) + + self._pa.Cleanup(all_ports=False, request_ip='ip2') +- self.assertEquals(self._pa._ports.keys(), ++ self.assertEqual(list(self._pa._ports.keys()), + [cns._DEFAULT_CNS_PORT_RANGE[0] + 1]) + + self._pa.Cleanup(all_ports=False, request_ip='abc') +- self.assertEquals(self._pa._ports.keys(), ++ self.assertEqual(list(self._pa._ports.keys()), + [cns._DEFAULT_CNS_PORT_RANGE[0] + 1]) + + self._pa.Cleanup(all_ports=False, request_ip='ip1') +- self.assertEquals(self._pa._ports.keys(), []) ++ self.assertEqual(list(self._pa._ports.keys()), []) + + + class ConstrainedNetworkServerTest(unittest.TestCase): +@@ -236,7 +236,7 @@ + def testServerServesFiles(self): + now = time.time() + +- f = urllib2.urlopen('%sf=%s' % (self._SERVER_URL, self._relative_fn)) ++ f = urllib.request.urlopen('%sf=%s' % (self._SERVER_URL, self._relative_fn)) + + # Verify file data is served correctly. + self.assertEqual(self._TEST_DATA, f.read()) +@@ -253,7 +253,7 @@ + + base_url = '%sf=%s' % (self._SERVER_URL, self._relative_fn) + url = '%s&latency=%d' % (base_url, self._LATENCY_TEST_SECS * 1000) +- f = urllib2.urlopen(url) ++ f = urllib.request.urlopen(url) + + # Verify file data is served correctly. + self.assertEqual(self._TEST_DATA, f.read()) +--- a/src/3rdparty/chromium/media/tools/constrained_network_server/traffic_control.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/media/tools/constrained_network_server/traffic_control.py 2025-01-16 02:26:08.529597260 +0800 +@@ -153,7 +153,7 @@ + TrafficControlError: If any key name does not exist in config or is None. + """ + for key in args: +- if key not in config.keys() or config[key] is None: ++ if key not in list(config.keys()) or config[key] is None: + raise TrafficControlError('Missing "%s" parameter.' % key) + + +@@ -194,7 +194,7 @@ + # Use constrained port as class ID so we can attach the qdisc and filter to + # it, as well as delete the class, using only the port number. + class_id = '1:%x' % config['port'] +- if 'bandwidth' not in config.keys() or not config['bandwidth']: ++ if 'bandwidth' not in list(config.keys()) or not config['bandwidth']: + bandwidth = _DEFAULT_MAX_BANDWIDTH_KBIT + else: + bandwidth = config['bandwidth'] +@@ -223,11 +223,11 @@ + class_id, 'handle', port_hex + ':0', 'netem'] + + # Check if packet-loss is set in the configuration. +- if 'loss' in config.keys() and config['loss']: ++ if 'loss' in list(config.keys()) and config['loss']: + loss = '%d%%' % config['loss'] + command.extend(['loss', loss]) + # Check if latency is set in the configuration. +- if 'latency' in config.keys() and config['latency']: ++ if 'latency' in list(config.keys()) and config['latency']: + latency = '%dms' % config['latency'] + command.extend(['delay', latency]) + +--- a/src/3rdparty/chromium/media/tools/constrained_network_server/traffic_control_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/media/tools/constrained_network_server/traffic_control_unittest.py 2025-01-16 02:26:08.529597260 +0800 +@@ -119,7 +119,7 @@ + # Check seach for handle ID command. + self.assertRaises(traffic_control.TrafficControlError, + traffic_control._GetFilterHandleId, 'fakeeth', 1) +- self.assertEquals(self.commands, ['sudo tc filter list dev fakeeth parent ' ++ self.assertEqual(self.commands, ['sudo tc filter list dev fakeeth parent ' + '1:']) + + # Check with handle ID available. +--- a/src/3rdparty/chromium/mojo/public/tools/bindings/concatenate-files.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/bindings/concatenate-files.py 2025-01-16 02:26:08.529597260 +0800 +@@ -12,7 +12,7 @@ + # us with an easy and uniform way of doing this on all platforms. + + # for py2/py3 compatibility +-from __future__ import print_function ++ + + import optparse + +--- a/src/3rdparty/chromium/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py 2025-01-16 02:26:08.529597260 +0800 +@@ -16,7 +16,7 @@ + namespace. + """ + +-from __future__ import print_function ++ + + import optparse + import re +--- a/src/3rdparty/chromium/mojo/public/tools/bindings/gen_data_files_list.py 2025-01-14 21:29:17.871645499 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/bindings/gen_data_files_list.py 2025-01-16 02:26:08.529597260 +0800 +@@ -12,7 +12,7 @@ + will be written to the list. + """ + +-from __future__ import print_function ++ + + import os + import re +--- a/src/3rdparty/chromium/mojo/public/tools/bindings/mojom_bindings_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/bindings/mojom_bindings_generator.py 2025-01-16 02:26:08.530680575 +0800 +@@ -5,7 +5,7 @@ + + """The frontend for the Mojo bindings system.""" + +-from __future__ import print_function ++ + + import argparse + +@@ -161,7 +161,7 @@ + for filename in typemaps: + with open(filename) as f: + typemaps = json.loads("".join(filter(no_comments, f.readlines()))) +- for language, typemap in typemaps.items(): ++ for language, typemap in list(typemaps.items()): + language_map = self._typemap.get(language, {}) + language_map.update(typemap) + self._typemap[language] = language_map +@@ -189,7 +189,7 @@ + ScrambleMethodOrdinals(module.interfaces, salt) + + if self._should_generate(rel_filename.path): +- for language, generator_module in generator_modules.items(): ++ for language, generator_module in list(generator_modules.items()): + generator = generator_module.Generator( + module, args.output_dir, typemap=self._typemap.get(language, {}), + variant=args.variant, bytecode_path=args.bytecode_path, +@@ -252,7 +252,7 @@ + + + def _Precompile(args, _): +- generator_modules = LoadGenerators(",".join(_BUILTIN_GENERATORS.keys())) ++ generator_modules = LoadGenerators(",".join(list(_BUILTIN_GENERATORS.keys()))) + + template_expander.PrecompileTemplates(generator_modules, args.output_dir) + return 0 +--- a/src/3rdparty/chromium/mojo/public/tools/bindings/mojom_types_downgrader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/bindings/mojom_types_downgrader.py 2025-01-16 02:26:08.530680575 +0800 +@@ -25,13 +25,13 @@ + # Pre-compiled regular expression that matches against any of the replacements. + _REGEXP_PATTERN = re.compile( + r'|'.join( +- ['{}\s*<\s*(.*?)\s*>'.format(k) for k in _MOJO_REPLACEMENTS.keys()]), ++ ['{}\s*<\s*(.*?)\s*>'.format(k) for k in list(_MOJO_REPLACEMENTS.keys())]), + flags=re.DOTALL) + + + def ReplaceFunction(match_object): + """Returns the right replacement for the string matched against the regexp.""" +- for index, (match, repl) in enumerate(_MOJO_REPLACEMENTS.items(), 1): ++ for index, (match, repl) in enumerate(list(_MOJO_REPLACEMENTS.items()), 1): + if match_object.group(0).startswith(match): + return repl.format(match_object.group(index)) + +@@ -99,7 +99,7 @@ + elif os.path.isfile(src_path): + DowngradeFile(src_path, output_dir) + else: +- print(">>> {} not pointing to a valid file or directory".format(src_path)) ++ print((">>> {} not pointing to a valid file or directory".format(src_path))) + sys.exit(1) + + +--- a/src/3rdparty/chromium/mojo/public/tools/bindings/validate_typemap_config.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/bindings/validate_typemap_config.py 2025-01-16 02:26:08.530680575 +0800 +@@ -21,7 +21,7 @@ + ]) + with open(config_filename, 'r') as f: + for config in json.load(f): +- for key in config.keys(): ++ for key in list(config.keys()): + if key not in _SUPPORTED_CONFIG_KEYS: + raise ValueError('Invalid typemap property "%s" when processing %s' % + (key, target_name)) +@@ -32,7 +32,7 @@ + % target_name) + + for entry in types: +- for key in entry.keys(): ++ for key in list(entry.keys()): + if key not in _SUPPORTED_TYPE_KEYS: + raise IOError( + 'Invalid type property "%s" in typemap for "%s" on target %s' % +--- a/src/3rdparty/chromium/mojo/public/tools/bindings/generators/cpp_tracing_support.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/bindings/generators/cpp_tracing_support.py 2025-01-16 02:26:08.530680575 +0800 +@@ -9,8 +9,7 @@ + from abc import abstractmethod + + +-class _OutputContext(object): +- __metaclass__ = ABCMeta ++class _OutputContext(object, metaclass=ABCMeta): + """Represents the context in which |self.value| should be used. + + This is a base class for _ArrayItem, _DictionaryItemWithLiteralKey, and +--- a/src/3rdparty/chromium/mojo/public/tools/bindings/generators/mojom_cpp_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/bindings/generators/mojom_cpp_generator.py 2025-01-16 02:26:08.530680575 +0800 +@@ -278,7 +278,7 @@ + for typename in + self.module.structs + all_enums + self.module.unions) + headers = set() +- for typename, typemap in self.typemap.items(): ++ for typename, typemap in list(self.typemap.items()): + if typename in types: + headers.update(typemap.get("public_headers", [])) + return sorted(headers) +@@ -754,10 +754,10 @@ + # Blink and non-Blink bindings. + return any( + mojom.IsMapKind(k) and k.key_kind == kind +- for k in self.module.kinds.values()) ++ for k in list(self.module.kinds.values())) + return False + +- for spec, kind in imported_module.kinds.items(): ++ for spec, kind in list(imported_module.kinds.items()): + if spec in self.module.imported_kinds and requires_full_header(kind): + return True + return False +--- a/src/3rdparty/chromium/mojo/public/tools/bindings/generators/mojom_java_generator.py 2025-01-14 21:29:17.871645499 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/bindings/generators/mojom_java_generator.py 2025-01-16 02:26:08.530680575 +0800 +@@ -27,7 +27,7 @@ + + # TODO(crbug.com/1174969): Remove this once Python2 is obsoleted. + if sys.version_info.major != 2: +- basestring = str ++ str = str + long = int + + GENERATOR_PREFIX = 'java' +@@ -146,7 +146,7 @@ + return UpperCamelCase(method.name + '_Response') + + def ParseStringAttribute(attribute): +- assert isinstance(attribute, basestring) ++ assert isinstance(attribute, str) + return attribute + + def GetJavaTrueFalse(value): +@@ -335,7 +335,7 @@ + return _TranslateNamedValue(token) + if kind_spec.startswith('i') or kind_spec.startswith('u'): + number = ast.literal_eval(token.lstrip('+ ')) +- if not isinstance(number, (int, long)): ++ if not isinstance(number, int): + raise ValueError('got unexpected type %r for int literal %r' % ( + type(number), token)) + # If the literal is too large to fit a signed long, convert it to the +@@ -420,8 +420,7 @@ + def EnumCoversContinuousRange(kind): + if not kind.fields: + return False +- number_of_unique_keys = len(set(map( +- lambda field: field.numeric_value, kind.fields))) ++ number_of_unique_keys = len(set([field.numeric_value for field in kind.fields])) + if kind.max_value - kind.min_value + 1 != number_of_unique_keys: + return False + return True +--- a/src/3rdparty/chromium/mojo/public/tools/bindings/generators/mojom_mojolpm_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/bindings/generators/mojom_mojolpm_generator.py 2025-01-16 02:26:08.530680575 +0800 +@@ -43,7 +43,7 @@ + + def _IsStrOrUnicode(x): + if sys.version_info[0] < 3: +- return isinstance(x, (unicode, str)) ++ return isinstance(x, str) + return isinstance(x, str) + + +@@ -163,7 +163,7 @@ + AddKind(parameter.kind) + + import_files = list( +- map(lambda x: '{}.mojolpm.proto'.format(x.path), seen_imports)) ++ ['{}.mojolpm.proto'.format(x.path) for x in seen_imports]) + if self.needs_mojolpm_proto: + import_files.append('mojo/public/tools/fuzzers/mojolpm.proto') + import_files.sort() +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/check_stable_mojom_compatibility.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/check_stable_mojom_compatibility.py 2025-01-16 02:26:08.530680575 +0800 +@@ -89,10 +89,10 @@ + modules[mojom] = translate.OrderedModule(ast, mojom, all_modules) + + old_modules = {} +- for mojom in old_files.keys(): ++ for mojom in list(old_files.keys()): + parseMojom(mojom, old_files, old_modules) + new_modules = {} +- for mojom in new_files.keys(): ++ for mojom in list(new_files.keys()): + parseMojom(mojom, new_files, new_modules) + + # At this point we have a complete set of translated Modules from both the +@@ -104,7 +104,7 @@ + # checked. + def collectTypes(modules): + types = {} +- for m in modules.values(): ++ for m in list(modules.values()): + for kinds in (m.enums, m.structs, m.unions, m.interfaces): + for kind in kinds: + types[kind.qualified_name] = kind +@@ -115,12 +115,12 @@ + + # Collect any renamed types so they can be compared accordingly. + renamed_types = {} +- for name, kind in new_types.items(): ++ for name, kind in list(new_types.items()): + old_name = kind.attributes and kind.attributes.get('RenamedFrom') + if old_name: + renamed_types[old_name] = name + +- for qualified_name, kind in old_types.items(): ++ for qualified_name, kind in list(old_types.items()): + if not kind.stable: + continue + +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/enum_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/enum_unittest.py 2025-01-16 02:26:08.530680575 +0800 +@@ -69,11 +69,11 @@ + def testInvalidConstantReference(self): + """Verifies that enum values cannot be assigned from the value of + non-integral constants.""" +- with self.assertRaisesRegexp(ValueError, 'not an integer'): ++ with self.assertRaisesRegex(ValueError, 'not an integer'): + self.ExtractTypes('const float kFoo = 1.0; enum E { kA = kFoo };') +- with self.assertRaisesRegexp(ValueError, 'not an integer'): ++ with self.assertRaisesRegex(ValueError, 'not an integer'): + self.ExtractTypes('const double kFoo = 1.0; enum E { kA = kFoo };') +- with self.assertRaisesRegexp(ValueError, 'not an integer'): ++ with self.assertRaisesRegex(ValueError, 'not an integer'): + self.ExtractTypes('const string kFoo = "lol"; enum E { kA = kFoo };') + + def testImportedConstantReference(self): +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom_parser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom_parser.py 2025-01-16 02:26:08.530680575 +0800 +@@ -148,7 +148,7 @@ + with open(metadata_filename) as f: + metadata = json.load(f) + allowed_imports.update( +- map(os.path.normcase, map(os.path.normpath, metadata['sources']))) ++ list(map(os.path.normcase, list(map(os.path.normpath, metadata['sources']))))) + for dep_metadata in metadata['deps']: + if dep_metadata not in processed_deps: + collect(dep_metadata) +@@ -192,8 +192,8 @@ + _RebaseAbsolutePath(abs_path, input_root_paths)) + for abs_path in mojom_files) + abs_paths = dict( +- (path, abs_path) for abs_path, path in mojom_files_to_parse.items()) +- for mojom_abspath, _ in mojom_files_to_parse.items(): ++ (path, abs_path) for abs_path, path in list(mojom_files_to_parse.items())) ++ for mojom_abspath, _ in list(mojom_files_to_parse.items()): + with codecs.open(mojom_abspath, encoding='utf-8') as f: + ast = parser.Parse(''.join(f.readlines()), mojom_abspath) + conditional_features.RemoveDisabledDefinitions(ast, enabled_features) +@@ -234,7 +234,7 @@ + # and we have a complete dependency tree of the unprocessed inputs. Now we can + # load all the inputs, resolving dependencies among them recursively as we go. + num_existing_modules_loaded = len(loaded_modules) +- for mojom_abspath, mojom_path in mojom_files_to_parse.items(): ++ for mojom_abspath, mojom_path in list(mojom_files_to_parse.items()): + _EnsureInputLoaded(mojom_abspath, mojom_path, abs_paths, loaded_mojom_asts, + input_dependencies, loaded_modules) + assert (num_existing_modules_loaded + +@@ -242,7 +242,7 @@ + + # Now we have fully translated modules for every input and every transitive + # dependency. We can dump the modules to disk for other tools to use. +- for mojom_abspath, mojom_path in mojom_files_to_parse.items(): ++ for mojom_abspath, mojom_path in list(mojom_files_to_parse.items()): + module_path = os.path.join(output_root_path, _GetModuleFilename(mojom_path)) + module_dir = os.path.dirname(module_path) + if not os.path.exists(module_dir): +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom_parser_test_case.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom_parser_test_case.py 2025-01-16 02:26:08.530680575 +0800 +@@ -56,7 +56,7 @@ + args = [ + '--input-root', self._temp_dir, '--input-root', out_dir, + '--output-root', out_dir, '--mojoms' +- ] + list(map(lambda mojom: os.path.join(self._temp_dir, mojom), mojoms)) ++ ] + list([os.path.join(self._temp_dir, mojom) for mojom in mojoms]) + if metadata: + args.extend(['--check-imports', self.GetPath(metadata)]) + mojom_parser.Run(args) +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom_parser_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom_parser_unittest.py 2025-01-16 02:26:08.531763890 +0800 +@@ -86,7 +86,7 @@ + module a; + import "non-existent.mojom"; + struct Bar {};""") +- with self.assertRaisesRegexp(ValueError, "does not exist"): ++ with self.assertRaisesRegex(ValueError, "does not exist"): + self.ParseMojoms([a]) + + def testUnparsedImport(self): +@@ -106,7 +106,7 @@ + + # a.mojom has not been parsed yet, so its import will fail when processing + # b.mojom here. +- with self.assertRaisesRegexp(ValueError, "does not exist"): ++ with self.assertRaisesRegex(ValueError, "does not exist"): + self.ParseMojoms([b]) + + def testCheckImportsBasic(self): +@@ -167,5 +167,5 @@ + struct Foo { a.Bar bar; };""") + + self.ParseMojoms([a], metadata=a_metadata) +- with self.assertRaisesRegexp(ValueError, "not allowed by build"): ++ with self.assertRaisesRegex(ValueError, "not allowed by build"): + self.ParseMojoms([b], metadata=b_metadata) +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/stable_attribute_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/stable_attribute_unittest.py 2025-01-16 02:26:08.531763890 +0800 +@@ -52,20 +52,20 @@ + self.ExtractTypes( + '[Stable] interface F {}; [Stable] struct T { pending_remote f; };') + +- with self.assertRaisesRegexp(Exception, 'because it depends on E'): ++ with self.assertRaisesRegex(Exception, 'because it depends on E'): + self.ExtractTypes('enum E { A }; [Stable] struct S { E e; };') +- with self.assertRaisesRegexp(Exception, 'because it depends on X'): ++ with self.assertRaisesRegex(Exception, 'because it depends on X'): + self.ExtractTypes('struct X {}; [Stable] struct S { X x; };') +- with self.assertRaisesRegexp(Exception, 'because it depends on T'): ++ with self.assertRaisesRegex(Exception, 'because it depends on T'): + self.ExtractTypes('struct T {}; [Stable] struct S { array xs; };') +- with self.assertRaisesRegexp(Exception, 'because it depends on T'): ++ with self.assertRaisesRegex(Exception, 'because it depends on T'): + self.ExtractTypes('struct T {}; [Stable] struct S { map xs; };') +- with self.assertRaisesRegexp(Exception, 'because it depends on T'): ++ with self.assertRaisesRegex(Exception, 'because it depends on T'): + self.ExtractTypes('struct T {}; [Stable] struct S { map xs; };') +- with self.assertRaisesRegexp(Exception, 'because it depends on F'): ++ with self.assertRaisesRegex(Exception, 'because it depends on F'): + self.ExtractTypes( + 'interface F {}; [Stable] struct S { pending_remote f; };') +- with self.assertRaisesRegexp(Exception, 'because it depends on F'): ++ with self.assertRaisesRegex(Exception, 'because it depends on F'): + self.ExtractTypes( + 'interface F {}; [Stable] struct S { pending_receiver f; };') + +@@ -80,20 +80,20 @@ + self.ExtractTypes( + '[Stable] interface F {}; [Stable] union U { pending_remote f; };') + +- with self.assertRaisesRegexp(Exception, 'because it depends on E'): ++ with self.assertRaisesRegex(Exception, 'because it depends on E'): + self.ExtractTypes('enum E { A }; [Stable] union U { E e; };') +- with self.assertRaisesRegexp(Exception, 'because it depends on X'): ++ with self.assertRaisesRegex(Exception, 'because it depends on X'): + self.ExtractTypes('struct X {}; [Stable] union U { X x; };') +- with self.assertRaisesRegexp(Exception, 'because it depends on T'): ++ with self.assertRaisesRegex(Exception, 'because it depends on T'): + self.ExtractTypes('struct T {}; [Stable] union U { array xs; };') +- with self.assertRaisesRegexp(Exception, 'because it depends on T'): ++ with self.assertRaisesRegex(Exception, 'because it depends on T'): + self.ExtractTypes('struct T {}; [Stable] union U { map xs; };') +- with self.assertRaisesRegexp(Exception, 'because it depends on T'): ++ with self.assertRaisesRegex(Exception, 'because it depends on T'): + self.ExtractTypes('struct T {}; [Stable] union U { map xs; };') +- with self.assertRaisesRegexp(Exception, 'because it depends on F'): ++ with self.assertRaisesRegex(Exception, 'because it depends on F'): + self.ExtractTypes( + 'interface F {}; [Stable] union U { pending_remote f; };') +- with self.assertRaisesRegexp(Exception, 'because it depends on F'): ++ with self.assertRaisesRegex(Exception, 'because it depends on F'): + self.ExtractTypes( + 'interface F {}; [Stable] union U { pending_receiver f; };') + +@@ -109,19 +109,19 @@ + [Stable] interface F { A@0(E e, S s) => (bool b, array s); }; + """) + +- with self.assertRaisesRegexp(Exception, 'because it depends on E'): ++ with self.assertRaisesRegex(Exception, 'because it depends on E'): + self.ExtractTypes( + 'enum E { A, B, C }; [Stable] interface F { A@0(E e); };') +- with self.assertRaisesRegexp(Exception, 'because it depends on E'): ++ with self.assertRaisesRegex(Exception, 'because it depends on E'): + self.ExtractTypes( + 'enum E { A, B, C }; [Stable] interface F { A@0(int32 x) => (E e); };' + ) +- with self.assertRaisesRegexp(Exception, 'because it depends on S'): ++ with self.assertRaisesRegex(Exception, 'because it depends on S'): + self.ExtractTypes( + 'struct S {}; [Stable] interface F { A@0(int32 x) => (S s); };') +- with self.assertRaisesRegexp(Exception, 'because it depends on S'): ++ with self.assertRaisesRegex(Exception, 'because it depends on S'): + self.ExtractTypes( + 'struct S {}; [Stable] interface F { A@0(S s) => (bool b); };') + +- with self.assertRaisesRegexp(Exception, 'explicit method ordinals'): ++ with self.assertRaisesRegex(Exception, 'explicit method ordinals'): + self.ExtractTypes('[Stable] interface F { A() => (); };') +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/version_compatibility_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/version_compatibility_unittest.py 2025-01-16 02:26:08.531763890 +0800 +@@ -23,14 +23,14 @@ + + checker = module.BackwardCompatibilityChecker() + compatibility_map = {} +- for name in old.keys(): ++ for name in list(old.keys()): + compatibility_map[name] = checker.IsBackwardCompatible( + new[name], old[name]) + return compatibility_map + + def assertBackwardCompatible(self, old_mojom, new_mojom): + compatibility_map = self._GetTypeCompatibilityMap(old_mojom, new_mojom) +- for name, compatible in compatibility_map.items(): ++ for name, compatible in list(compatibility_map.items()): + if not compatible: + raise AssertionError( + 'Given the old mojom:\n\n %s\n\nand the new mojom:\n\n %s\n\n' +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/constant_resolver.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/constant_resolver.py 2025-01-16 02:26:08.531763890 +0800 +@@ -3,7 +3,7 @@ + # found in the LICENSE file. + """Resolves the values used for constants and enums.""" + +-from itertools import ifilter ++ + + from mojom.generate import module as mojom + +@@ -16,7 +16,7 @@ + assert isinstance(named_value, (mojom.EnumValue, mojom.ConstantValue)) + if isinstance(named_value, mojom.EnumValue): + field = next( +- ifilter(lambda field: field.name == named_value.name, ++ filter(lambda field: field.name == named_value.name, + named_value.enum.fields), None) + if not field: + raise RuntimeError( +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/generator.py 2025-01-14 21:29:17.871645499 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/generator.py 2025-01-16 02:26:08.531763890 +0800 +@@ -3,7 +3,7 @@ + # found in the LICENSE file. + """Code shared by the various language-specific code generators.""" + +-from __future__ import print_function ++ + + from functools import partial + import os.path +@@ -76,7 +76,7 @@ + + + if upper: +- words = map(lambda x: x.upper(), words) ++ words = [x.upper() for x in words] + + return '_'.join(words) + +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/generator_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/generator_unittest.py 2025-01-16 02:26:08.531763890 +0800 +@@ -30,43 +30,43 @@ + """generator contains some string utilities, this tests only those.""" + + def testSplitCamelCase(self): +- self.assertEquals(["camel", "case"], generator.SplitCamelCase("CamelCase")) +- self.assertEquals(["url", "loader", "factory"], ++ self.assertEqual(["camel", "case"], generator.SplitCamelCase("CamelCase")) ++ self.assertEqual(["url", "loader", "factory"], + generator.SplitCamelCase('URLLoaderFactory')) +- self.assertEquals(["get99", "entries"], ++ self.assertEqual(["get99", "entries"], + generator.SplitCamelCase('Get99Entries')) +- self.assertEquals(["get99entries"], ++ self.assertEqual(["get99entries"], + generator.SplitCamelCase('Get99entries')) + + def testToCamel(self): +- self.assertEquals("CamelCase", generator.ToCamel("camel_case")) +- self.assertEquals("CAMELCASE", generator.ToCamel("CAMEL_CASE")) +- self.assertEquals("camelCase", ++ self.assertEqual("CamelCase", generator.ToCamel("camel_case")) ++ self.assertEqual("CAMELCASE", generator.ToCamel("CAMEL_CASE")) ++ self.assertEqual("camelCase", + generator.ToCamel("camel_case", lower_initial=True)) +- self.assertEquals("CamelCase", generator.ToCamel( ++ self.assertEqual("CamelCase", generator.ToCamel( + "camel case", delimiter=' ')) +- self.assertEquals("CaMelCaSe", generator.ToCamel("caMel_caSe")) +- self.assertEquals("L2Tp", generator.ToCamel("l2tp", digits_split=True)) +- self.assertEquals("l2tp", generator.ToCamel("l2tp", lower_initial=True)) ++ self.assertEqual("CaMelCaSe", generator.ToCamel("caMel_caSe")) ++ self.assertEqual("L2Tp", generator.ToCamel("l2tp", digits_split=True)) ++ self.assertEqual("l2tp", generator.ToCamel("l2tp", lower_initial=True)) + + def testToSnakeCase(self): +- self.assertEquals("snake_case", generator.ToLowerSnakeCase("SnakeCase")) +- self.assertEquals("snake_case", generator.ToLowerSnakeCase("snakeCase")) +- self.assertEquals("snake_case", generator.ToLowerSnakeCase("SnakeCASE")) +- self.assertEquals("snake_d3d11_case", ++ self.assertEqual("snake_case", generator.ToLowerSnakeCase("SnakeCase")) ++ self.assertEqual("snake_case", generator.ToLowerSnakeCase("snakeCase")) ++ self.assertEqual("snake_case", generator.ToLowerSnakeCase("SnakeCASE")) ++ self.assertEqual("snake_d3d11_case", + generator.ToLowerSnakeCase("SnakeD3D11Case")) +- self.assertEquals("snake_d3d11_case", ++ self.assertEqual("snake_d3d11_case", + generator.ToLowerSnakeCase("SnakeD3d11Case")) +- self.assertEquals("snake_d3d11_case", ++ self.assertEqual("snake_d3d11_case", + generator.ToLowerSnakeCase("snakeD3d11Case")) +- self.assertEquals("SNAKE_CASE", generator.ToUpperSnakeCase("SnakeCase")) +- self.assertEquals("SNAKE_CASE", generator.ToUpperSnakeCase("snakeCase")) +- self.assertEquals("SNAKE_CASE", generator.ToUpperSnakeCase("SnakeCASE")) +- self.assertEquals("SNAKE_D3D11_CASE", ++ self.assertEqual("SNAKE_CASE", generator.ToUpperSnakeCase("SnakeCase")) ++ self.assertEqual("SNAKE_CASE", generator.ToUpperSnakeCase("snakeCase")) ++ self.assertEqual("SNAKE_CASE", generator.ToUpperSnakeCase("SnakeCASE")) ++ self.assertEqual("SNAKE_D3D11_CASE", + generator.ToUpperSnakeCase("SnakeD3D11Case")) +- self.assertEquals("SNAKE_D3D11_CASE", ++ self.assertEqual("SNAKE_D3D11_CASE", + generator.ToUpperSnakeCase("SnakeD3d11Case")) +- self.assertEquals("SNAKE_D3D11_CASE", ++ self.assertEqual("SNAKE_D3D11_CASE", + generator.ToUpperSnakeCase("snakeD3d11Case")) + + +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/module.py 2025-01-14 21:29:17.871645499 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/module.py 2025-01-16 02:26:08.531763890 +0800 +@@ -73,7 +73,7 @@ + return ('{\n%s\n}' % (',\n'.join( + ' %s: %s' % (Repr(key, as_ref).replace('\n', '\n '), + Repr(val, as_ref).replace('\n', '\n ')) +- for key, val in obj.items()))) ++ for key, val in list(obj.items())))) + else: + return repr(obj) + +@@ -96,7 +96,7 @@ + '\n', '\n ')) + + return '%s(\n%s\n)' % (obj.__class__.__name__, ',\n'.join( +- ReprIndent(name, as_ref) for (name, as_ref) in names.items())) ++ ReprIndent(name, as_ref) for (name, as_ref) in list(names.items()))) + + + class Kind(object): +@@ -646,7 +646,7 @@ + return False + + max_old_min_version = 0 +- for ordinal, old_field in old_fields.items(): ++ for ordinal, old_field in list(old_fields.items()): + new_field = new_fields.get(ordinal) + if not new_field: + # A field was removed, which is not OK. +@@ -1130,7 +1130,7 @@ + new_methods = buildOrdinalMethodMap(self) + old_methods = buildOrdinalMethodMap(older_interface) + max_old_min_version = 0 +- for ordinal, old_method in old_methods.items(): ++ for ordinal, old_method in list(old_methods.items()): + new_method = new_methods.get(ordinal) + if not new_method: + # A method was removed, which is not OK. +@@ -1312,10 +1312,10 @@ + old_fields = buildVersionFieldMap(older_enum) + new_fields = buildVersionFieldMap(self) + +- if new_fields.keys() != old_fields.keys() and not older_enum.extensible: ++ if list(new_fields.keys()) != list(old_fields.keys()) and not older_enum.extensible: + return False + +- for min_version, valid_values in old_fields.items(): ++ for min_version, valid_values in list(old_fields.items()): + if (min_version not in new_fields + or new_fields[min_version] != valid_values): + return False +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/module_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/module_unittest.py 2025-01-16 02:26:08.531763890 +0800 +@@ -15,7 +15,7 @@ + struct = mojom.Struct('TestStruct', module=module) + with self.assertRaises(Exception) as e: + mojom.InterfaceRequest(struct) +- self.assertEquals( ++ self.assertEqual( + e.exception.__str__(), + 'Interface request requires \'x:TestStruct\' to be an interface.') + +@@ -26,6 +26,6 @@ + struct = mojom.Struct('TestStruct', module=module) + with self.assertRaises(Exception) as e: + mojom.AssociatedInterface(struct) +- self.assertEquals( ++ self.assertEqual( + e.exception.__str__(), + 'Associated interface requires \'x:TestStruct\' to be an interface.') +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/template_expander.py 2025-01-14 21:29:17.871645499 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/template_expander.py 2025-01-16 02:26:08.531763890 +0800 +@@ -67,7 +67,7 @@ + + + def PrecompileTemplates(generator_modules, output_dir): +- for module in generator_modules.values(): ++ for module in list(generator_modules.values()): + generator = module.Generator(None) + jinja_env = jinja2.Environment( + loader=jinja2.FileSystemLoader([ +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/translate.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/translate.py 2025-01-16 02:26:08.531763890 +0800 +@@ -21,7 +21,7 @@ + + def _IsStrOrUnicode(x): + if sys.version_info[0] < 3: +- return isinstance(x, (unicode, str)) ++ return isinstance(x, str) + return isinstance(x, str) + + +@@ -195,7 +195,7 @@ + """Given a kind and an identifier, this attempts to resolve the given + identifier to a concrete NamedValue within the scope of the given kind.""" + scope = _GetScopeForKind(module, kind) +- for i in reversed(range(len(scope) + 1)): ++ for i in reversed(list(range(len(scope) + 1))): + qualified_name = '.'.join(scope[:i] + (identifier, )) + value = module.values.get(qualified_name) + if value: +@@ -304,12 +304,12 @@ + def _Import(module, import_module): + # Copy the struct kinds from our imports into the current module. + importable_kinds = (mojom.Struct, mojom.Union, mojom.Enum, mojom.Interface) +- for kind in import_module.kinds.values(): ++ for kind in list(import_module.kinds.values()): + if (isinstance(kind, importable_kinds) + and kind.module.path == import_module.path): + module.kinds[kind.spec] = kind + # Ditto for values. +- for value in import_module.values.values(): ++ for value in list(import_module.values.values()): + if value.module.path == import_module.path: + module.values[value.GetSpec()] = value + +@@ -458,12 +458,10 @@ + parsed_method.mojom_name, + ordinal=parsed_method.ordinal.value if parsed_method.ordinal else None) + method.parameters = list( +- map(lambda parameter: _Parameter(module, parameter, interface), +- parsed_method.parameter_list)) ++ [_Parameter(module, parameter, interface) for parameter in parsed_method.parameter_list]) + if parsed_method.response_parameter_list is not None: + method.response_parameters = list( +- map(lambda parameter: _Parameter(module, parameter, interface), +- parsed_method.response_parameter_list)) ++ [_Parameter(module, parameter, interface) for parameter in parsed_method.response_parameter_list]) + method.attributes = _AttributeListToDict(parsed_method.attribute_list) + + # Enforce that only methods with response can have a [Sync] attribute. +@@ -589,8 +587,7 @@ + + if not enum.native_only: + enum.fields = list( +- map(lambda field: _EnumField(module, enum, field), +- parsed_enum.enum_value_list)) ++ [_EnumField(module, enum, field) for field in parsed_enum.enum_value_list]) + _ResolveNumericEnumValues(enum) + + module.kinds[enum.spec] = enum +@@ -785,8 +782,7 @@ + all_defined_kinds = {} + for struct in module.structs: + struct.fields = list( +- map(lambda field: _StructField(module, field, struct), +- struct.fields_data)) ++ [_StructField(module, field, struct) for field in struct.fields_data]) + _AssignDefaultOrdinals(struct.fields) + del struct.fields_data + all_defined_kinds[struct.spec] = struct +@@ -795,15 +791,14 @@ + + for union in module.unions: + union.fields = list( +- map(lambda field: _UnionField(module, field, union), union.fields_data)) ++ [_UnionField(module, field, union) for field in union.fields_data]) + _AssignDefaultOrdinals(union.fields) + del union.fields_data + all_defined_kinds[union.spec] = union + + for interface in module.interfaces: + interface.methods = list( +- map(lambda method: _Method(module, method, interface), +- interface.methods_data)) ++ [_Method(module, method, interface) for method in interface.methods_data]) + _AssignDefaultOrdinals(interface.methods) + del interface.methods_data + all_defined_kinds[interface.spec] = interface +@@ -813,7 +808,7 @@ + all_defined_kinds[enum.spec] = enum + + all_referenced_kinds = _CollectReferencedKinds(module, +- all_defined_kinds.values()) ++ list(all_defined_kinds.values())) + imported_kind_specs = set(all_referenced_kinds.keys()).difference( + set(all_defined_kinds.keys())) + module.imported_kinds = dict( +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/translate_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/generate/translate_unittest.py 2025-01-16 02:26:08.531763890 +0800 +@@ -18,18 +18,18 @@ + def testSimpleArray(self): + """Tests a simple int32[].""" + # pylint: disable=W0212 +- self.assertEquals(translate._MapKind("int32[]"), "a:i32") ++ self.assertEqual(translate._MapKind("int32[]"), "a:i32") + + def testAssociativeArray(self): + """Tests a simple uint8{string}.""" + # pylint: disable=W0212 +- self.assertEquals(translate._MapKind("uint8{string}"), "m[s][u8]") ++ self.assertEqual(translate._MapKind("uint8{string}"), "m[s][u8]") + + def testLeftToRightAssociativeArray(self): + """Makes sure that parsing is done from right to left on the internal kinds + in the presence of an associative array.""" + # pylint: disable=W0212 +- self.assertEquals(translate._MapKind("uint8[]{string}"), "m[s][a:u8]") ++ self.assertEqual(translate._MapKind("uint8[]{string}"), "m[s][a:u8]") + + def testTranslateSimpleUnions(self): + """Makes sure that a simple union is translated correctly.""" +@@ -67,7 +67,7 @@ + def testAssociatedKinds(self): + """Tests type spec translation of associated interfaces and requests.""" + # pylint: disable=W0212 +- self.assertEquals( ++ self.assertEqual( + translate._MapKind("asso?"), "?asso:x:SomeInterface") +- self.assertEquals( ++ self.assertEqual( + translate._MapKind("asso?"), "?asso:r:x:SomeInterface") +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/parse/ast.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/parse/ast.py 2025-01-16 02:26:08.531763890 +0800 +@@ -14,7 +14,7 @@ + + def _IsStrOrUnicode(x): + if sys.version_info[0] < 3: +- return isinstance(x, (unicode, str)) ++ return isinstance(x, str) + return isinstance(x, str) + + +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/parse/ast_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/parse/ast_unittest.py 2025-01-16 02:26:08.531763890 +0800 +@@ -35,8 +35,8 @@ + # comparison by value and ignore filenames/line numbers (for convenience). + node1 = ast.NodeBase(filename="hello.mojom", lineno=123) + node2 = ast.NodeBase() +- self.assertEquals(node1, node2) +- self.assertEquals(node2, node1) ++ self.assertEqual(node1, node2) ++ self.assertEqual(node2, node1) + + # Check that |__ne__()| just defers to |__eq__()| properly. + self.assertFalse(node1 != node2) +@@ -44,8 +44,8 @@ + + # Check that |filename| and |lineno| are set properly (and are None by + # default). +- self.assertEquals(node1.filename, "hello.mojom") +- self.assertEquals(node1.lineno, 123) ++ self.assertEqual(node1.filename, "hello.mojom") ++ self.assertEqual(node1.lineno, 123) + self.assertIsNone(node2.filename) + self.assertIsNone(node2.lineno) + +@@ -59,9 +59,9 @@ + self.assertFalse(node3 == node1) + + node4 = _TestNode(123, filename="world.mojom", lineno=123) +- self.assertEquals(node4, node3) ++ self.assertEqual(node4, node3) + node5 = _TestNode(456) +- self.assertNotEquals(node5, node4) ++ self.assertNotEqual(node5, node4) + + def testNodeListBase(self): + node1 = _TestNode(1, filename="foo.mojom", lineno=1) +@@ -70,52 +70,52 @@ + node2 = _TestNode(2, filename="foo.mojom", lineno=2) + + nodelist1 = _TestNodeList() # Contains: (empty). +- self.assertEquals(nodelist1, nodelist1) +- self.assertEquals(nodelist1.items, []) ++ self.assertEqual(nodelist1, nodelist1) ++ self.assertEqual(nodelist1.items, []) + self.assertIsNone(nodelist1.filename) + self.assertIsNone(nodelist1.lineno) + + nodelist2 = _TestNodeList(node1) # Contains: 1. +- self.assertEquals(nodelist2, nodelist2) +- self.assertEquals(nodelist2.items, [node1]) ++ self.assertEqual(nodelist2, nodelist2) ++ self.assertEqual(nodelist2.items, [node1]) + self.assertNotEqual(nodelist2, nodelist1) +- self.assertEquals(nodelist2.filename, "foo.mojom") +- self.assertEquals(nodelist2.lineno, 1) ++ self.assertEqual(nodelist2.filename, "foo.mojom") ++ self.assertEqual(nodelist2.lineno, 1) + + nodelist3 = _TestNodeList([node2]) # Contains: 2. +- self.assertEquals(nodelist3.items, [node2]) ++ self.assertEqual(nodelist3.items, [node2]) + self.assertNotEqual(nodelist3, nodelist1) + self.assertNotEqual(nodelist3, nodelist2) +- self.assertEquals(nodelist3.filename, "foo.mojom") +- self.assertEquals(nodelist3.lineno, 2) ++ self.assertEqual(nodelist3.filename, "foo.mojom") ++ self.assertEqual(nodelist3.lineno, 2) + + nodelist1.Append(node1b) # Contains: 1. +- self.assertEquals(nodelist1.items, [node1]) +- self.assertEquals(nodelist1, nodelist2) ++ self.assertEqual(nodelist1.items, [node1]) ++ self.assertEqual(nodelist1, nodelist2) + self.assertNotEqual(nodelist1, nodelist3) +- self.assertEquals(nodelist1.filename, "foo.mojom") +- self.assertEquals(nodelist1.lineno, 1) ++ self.assertEqual(nodelist1.filename, "foo.mojom") ++ self.assertEqual(nodelist1.lineno, 1) + + nodelist1.Append(node2) # Contains: 1, 2. +- self.assertEquals(nodelist1.items, [node1, node2]) ++ self.assertEqual(nodelist1.items, [node1, node2]) + self.assertNotEqual(nodelist1, nodelist2) + self.assertNotEqual(nodelist1, nodelist3) +- self.assertEquals(nodelist1.lineno, 1) ++ self.assertEqual(nodelist1.lineno, 1) + + nodelist2.Append(node2) # Contains: 1, 2. +- self.assertEquals(nodelist2.items, [node1, node2]) +- self.assertEquals(nodelist2, nodelist1) ++ self.assertEqual(nodelist2.items, [node1, node2]) ++ self.assertEqual(nodelist2, nodelist1) + self.assertNotEqual(nodelist2, nodelist3) +- self.assertEquals(nodelist2.lineno, 1) ++ self.assertEqual(nodelist2.lineno, 1) + + nodelist3.Insert(node1) # Contains: 1, 2. +- self.assertEquals(nodelist3.items, [node1, node2]) +- self.assertEquals(nodelist3, nodelist1) +- self.assertEquals(nodelist3, nodelist2) +- self.assertEquals(nodelist3.lineno, 1) ++ self.assertEqual(nodelist3.items, [node1, node2]) ++ self.assertEqual(nodelist3, nodelist1) ++ self.assertEqual(nodelist3, nodelist2) ++ self.assertEqual(nodelist3.lineno, 1) + + # Test iteration: + i = 1 + for item in nodelist1: +- self.assertEquals(item.value, i) ++ self.assertEqual(item.value, i) + i += 1 +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py 2025-01-16 02:26:08.531763890 +0800 +@@ -37,7 +37,7 @@ + definition = parser.Parse(source, "my_file.mojom") + conditional_features.RemoveDisabledDefinitions(definition, ENABLED_FEATURES) + expected = parser.Parse(expected_source, "my_file.mojom") +- self.assertEquals(definition, expected) ++ self.assertEqual(definition, expected) + + def testFilterConst(self): + """Test that Consts are correctly filtered.""" +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py 2025-01-16 02:26:08.531763890 +0800 +@@ -63,117 +63,117 @@ + + def testValidKeywords(self): + """Tests valid keywords.""" +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("handle"), _MakeLexTokenForKeyword("handle")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("import"), _MakeLexTokenForKeyword("import")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("module"), _MakeLexTokenForKeyword("module")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("struct"), _MakeLexTokenForKeyword("struct")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("union"), _MakeLexTokenForKeyword("union")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("interface"), + _MakeLexTokenForKeyword("interface")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("enum"), _MakeLexTokenForKeyword("enum")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("const"), _MakeLexTokenForKeyword("const")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("true"), _MakeLexTokenForKeyword("true")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("false"), _MakeLexTokenForKeyword("false")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("default"), + _MakeLexTokenForKeyword("default")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("array"), _MakeLexTokenForKeyword("array")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("map"), _MakeLexTokenForKeyword("map")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("associated"), + _MakeLexTokenForKeyword("associated")) + + def testValidIdentifiers(self): + """Tests identifiers.""" +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("abcd"), _MakeLexToken("NAME", "abcd")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("AbC_d012_"), + _MakeLexToken("NAME", "AbC_d012_")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("_0123"), _MakeLexToken("NAME", "_0123")) + + def testInvalidIdentifiers(self): +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + mojom.parse.lexer.LexError, + r"^my_file\.mojom:1: Error: Illegal character '\$'$"): + self._TokensForInput("$abc") +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + mojom.parse.lexer.LexError, + r"^my_file\.mojom:1: Error: Illegal character '\$'$"): + self._TokensForInput("a$bc") + + def testDecimalIntegerConstants(self): +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("0"), _MakeLexToken("INT_CONST_DEC", "0")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("1"), _MakeLexToken("INT_CONST_DEC", "1")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("123"), _MakeLexToken("INT_CONST_DEC", "123")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("10"), _MakeLexToken("INT_CONST_DEC", "10")) + + def testValidTokens(self): + """Tests valid tokens (which aren't tested elsewhere).""" + # Keywords tested in |testValidKeywords|. + # NAME tested in |testValidIdentifiers|. +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("@123"), _MakeLexToken("ORDINAL", "@123")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("456"), _MakeLexToken("INT_CONST_DEC", "456")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("0x01aB2eF3"), + _MakeLexToken("INT_CONST_HEX", "0x01aB2eF3")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("123.456"), + _MakeLexToken("FLOAT_CONST", "123.456")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("\"hello\""), + _MakeLexToken("STRING_LITERAL", "\"hello\"")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("+"), _MakeLexToken("PLUS", "+")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("-"), _MakeLexToken("MINUS", "-")) +- self.assertEquals(self._SingleTokenForInput("&"), _MakeLexToken("AMP", "&")) +- self.assertEquals( ++ self.assertEqual(self._SingleTokenForInput("&"), _MakeLexToken("AMP", "&")) ++ self.assertEqual( + self._SingleTokenForInput("?"), _MakeLexToken("QSTN", "?")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("="), _MakeLexToken("EQUALS", "=")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("=>"), _MakeLexToken("RESPONSE", "=>")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("("), _MakeLexToken("LPAREN", "(")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput(")"), _MakeLexToken("RPAREN", ")")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("["), _MakeLexToken("LBRACKET", "[")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("]"), _MakeLexToken("RBRACKET", "]")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("{"), _MakeLexToken("LBRACE", "{")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("}"), _MakeLexToken("RBRACE", "}")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput("<"), _MakeLexToken("LANGLE", "<")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput(">"), _MakeLexToken("RANGLE", ">")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput(";"), _MakeLexToken("SEMI", ";")) +- self.assertEquals( ++ self.assertEqual( + self._SingleTokenForInput(","), _MakeLexToken("COMMA", ",")) +- self.assertEquals(self._SingleTokenForInput("."), _MakeLexToken("DOT", ".")) ++ self.assertEqual(self._SingleTokenForInput("."), _MakeLexToken("DOT", ".")) + + def _TokensForInput(self, input_string): + """Gets a list of tokens for the given input string.""" +--- a/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/parse/parser_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/mojo/public/tools/mojom/mojom/parse/parser_unittest.py 2025-01-16 02:26:08.531763890 +0800 +@@ -25,7 +25,7 @@ + """ + expected = ast.Mojom( + ast.Module(('IDENTIFIER', 'my_module'), None), ast.ImportList(), []) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testSourceWithCrLfs(self): + """Tests a .mojom source with CR-LFs instead of LFs.""" +@@ -33,7 +33,7 @@ + source = "// This is a comment.\r\n\r\nmodule my_module;\r\n" + expected = ast.Mojom( + ast.Module(('IDENTIFIER', 'my_module'), None), ast.ImportList(), []) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testUnexpectedEOF(self): + """Tests a "truncated" .mojom source.""" +@@ -43,7 +43,7 @@ + + module my_module + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom: Error: Unexpected end of file$"): + parser.Parse(source, "my_file.mojom") + +@@ -57,7 +57,7 @@ + // Foo. + asdf1 + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:4: Error: Unexpected 'asdf1':\n *asdf1$"): + parser.Parse(source1, "my_file.mojom") +@@ -74,7 +74,7 @@ + + asdf2 + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:10: Error: Unexpected 'asdf2':\n *asdf2$"): + parser.Parse(source2, "my_file.mojom") +@@ -86,7 +86,7 @@ + /* Baz. */ + asdf3 + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:5: Error: Unexpected 'asdf3':\n *asdf3$"): + parser.Parse(source3, "my_file.mojom") +@@ -103,7 +103,7 @@ + Quux. */ + asdf4 + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:10: Error: Unexpected 'asdf4':\n *asdf4$"): + parser.Parse(source4, "my_file.mojom") +@@ -128,7 +128,7 @@ + ast.StructField('b', None, None, 'double', None) + ])) + ]) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testSimpleStructWithoutModule(self): + """Tests a simple struct without an explict module statement.""" +@@ -147,7 +147,7 @@ + ast.StructField('b', None, None, 'double', None) + ])) + ]) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testValidStructDefinitions(self): + """Tests all types of definitions that can occur in a struct.""" +@@ -171,7 +171,7 @@ + ast.StructField('b', None, None, 'SomeOtherStruct', None) + ])) + ]) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testInvalidStructDefinitions(self): + """Tests that definitions that aren't allowed in a struct are correctly +@@ -182,7 +182,7 @@ + MyMethod(int32 a); + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected '\(':\n" + r" *MyMethod\(int32 a\);$"): + parser.Parse(source1, "my_file.mojom") +@@ -194,7 +194,7 @@ + }; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'struct':\n" + r" *struct MyInnerStruct {$"): + parser.Parse(source2, "my_file.mojom") +@@ -206,7 +206,7 @@ + }; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:2: Error: Unexpected 'interface':\n" + r" *interface MyInterface {$"): +@@ -222,7 +222,7 @@ + int32 a; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:2: Error: Unexpected ';':\n *module ;$"): + parser.Parse(source1, "my_file.mojom") +@@ -237,7 +237,7 @@ + int32 a; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:4: Error: Unexpected 'struct':\n" + r" *struct MyStruct {$"): + parser.Parse(source2, "my_file.mojom") +@@ -249,7 +249,7 @@ + module foo; + module bar; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:2: Error: Multiple \"module\" statements not " + r"allowed:\n *module bar;$"): +@@ -262,7 +262,7 @@ + import "foo.mojom"; + module foo; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:2: Error: \"module\" statements must precede imports " + r"and definitions:\n *module foo;$"): +@@ -277,7 +277,7 @@ + }; + module foo; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:4: Error: \"module\" statements must precede imports " + r"and definitions:\n *module foo;$"): +@@ -292,7 +292,7 @@ + }; + import "foo.mojom"; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:4: Error: \"import\" statements must precede " + r"definitions:\n *import \"foo.mojom\";$"): +@@ -334,21 +334,21 @@ + ast.EnumValue('VALUE7', None, None) + ])) + ]) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testInvalidEnumInitializers(self): + """Tests that invalid enum initializers are correctly detected.""" + + # Floating point value. + source2 = "enum MyEnum { VALUE = 0.123 };" +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:1: Error: Unexpected '0\.123':\n" + r"enum MyEnum { VALUE = 0\.123 };$"): + parser.Parse(source2, "my_file.mojom") + + # Boolean value. + source2 = "enum MyEnum { VALUE = true };" +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:1: Error: Unexpected 'true':\n" + r"enum MyEnum { VALUE = true };$"): + parser.Parse(source2, "my_file.mojom") +@@ -374,7 +374,7 @@ + ('IDENTIFIER', 'kNumber')) + ])) + ]) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testNoConditionals(self): + """Tests that ?: is not allowed.""" +@@ -386,7 +386,7 @@ + MY_ENUM_1 = 1 ? 2 : 3 + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:4: Error: Unexpected '\?':\n" + r" *MY_ENUM_1 = 1 \? 2 : 3$"): + parser.Parse(source, "my_file.mojom") +@@ -429,7 +429,7 @@ + ast.Ordinal(1234567890), 'int32', None) + ])) + ]) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testInvalidOrdinals(self): + """Tests that (lexically) invalid ordinals are correctly detected.""" +@@ -441,7 +441,7 @@ + int32 a_missing@; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + lexer.LexError, r"^my_file\.mojom:4: Error: Missing ordinal value$"): + parser.Parse(source1, "my_file.mojom") + +@@ -452,7 +452,7 @@ + int32 a_octal@01; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + lexer.LexError, r"^my_file\.mojom:4: Error: " + r"Octal and hexadecimal ordinal values not allowed$"): + parser.Parse(source2, "my_file.mojom") +@@ -460,19 +460,19 @@ + source3 = """\ + module my_module; struct MyStruct { int32 a_invalid_octal@08; }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + lexer.LexError, r"^my_file\.mojom:1: Error: " + r"Octal and hexadecimal ordinal values not allowed$"): + parser.Parse(source3, "my_file.mojom") + + source4 = "module my_module; struct MyStruct { int32 a_hex@0x1aB9; };" +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + lexer.LexError, r"^my_file\.mojom:1: Error: " + r"Octal and hexadecimal ordinal values not allowed$"): + parser.Parse(source4, "my_file.mojom") + + source5 = "module my_module; struct MyStruct { int32 a_hex@0X0; };" +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + lexer.LexError, r"^my_file\.mojom:1: Error: " + r"Octal and hexadecimal ordinal values not allowed$"): + parser.Parse(source5, "my_file.mojom") +@@ -482,7 +482,7 @@ + int32 a_too_big@999999999999; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: " + r"Ordinal value 999999999999 too large:\n" + r" *int32 a_too_big@999999999999;$"): +@@ -504,7 +504,7 @@ + 'MyStruct', None, + ast.StructBody(ast.StructField('a', None, None, 'int32', None))) + ]) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testValidHandleTypes(self): + """Tests (valid) handle types.""" +@@ -538,7 +538,7 @@ + ast.StructField('f', None, None, 'handle', None) + ])) + ]) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testInvalidHandleType(self): + """Tests an invalid (unknown) handle type.""" +@@ -548,7 +548,7 @@ + handle foo; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: " + r"Invalid handle type 'wtf_is_this':\n" + r" *handle foo;$"): +@@ -613,7 +613,7 @@ + ast.StructField('a22', None, None, 'double', '+.123E10') + ])) + ]) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testValidFixedSizeArray(self): + """Tests parsing a fixed size array.""" +@@ -639,7 +639,7 @@ + None) + ])) + ]) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testValidNestedArray(self): + """Tests parsing a nested array.""" +@@ -651,7 +651,7 @@ + ast.StructBody( + ast.StructField('nested_array', None, None, 'int32[][]', None))) + ]) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testInvalidFixedArraySize(self): + """Tests that invalid fixed array bounds are correctly detected.""" +@@ -661,7 +661,7 @@ + array zero_size_array; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:2: Error: Fixed array size 0 invalid:\n" + r" *array zero_size_array;$"): +@@ -672,7 +672,7 @@ + array too_big_array; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:2: Error: Fixed array size 999999999999 invalid:\n" + r" *array too_big_array;$"): +@@ -683,7 +683,7 @@ + array not_a_number; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'abcdefg':\n" + r" *array not_a_number;"): + parser.Parse(source3, "my_file.mojom") +@@ -698,7 +698,7 @@ + ast.StructBody( + [ast.StructField('data', None, None, 'uint8{string}', None)])) + ]) +- self.assertEquals(parser.Parse(source1, "my_file.mojom"), expected1) ++ self.assertEqual(parser.Parse(source1, "my_file.mojom"), expected1) + + source2 = "interface MyInterface { MyMethod(map a); };" + expected2 = ast.Mojom(None, ast.ImportList(), [ +@@ -711,7 +711,7 @@ + ast.Parameter('a', None, None, 'uint8{string}')), + None))) + ]) +- self.assertEquals(parser.Parse(source2, "my_file.mojom"), expected2) ++ self.assertEqual(parser.Parse(source2, "my_file.mojom"), expected2) + + source3 = "struct MyStruct { map> data; };" + expected3 = ast.Mojom(None, ast.ImportList(), [ +@@ -720,7 +720,7 @@ + ast.StructBody( + [ast.StructField('data', None, None, 'uint8[]{string}', None)])) + ]) +- self.assertEquals(parser.Parse(source3, "my_file.mojom"), expected3) ++ self.assertEqual(parser.Parse(source3, "my_file.mojom"), expected3) + + def testValidMethod(self): + """Tests parsing method declarations.""" +@@ -735,7 +735,7 @@ + ast.ParameterList(ast.Parameter('a', None, None, 'int32')), + None))) + ]) +- self.assertEquals(parser.Parse(source1, "my_file.mojom"), expected1) ++ self.assertEqual(parser.Parse(source1, "my_file.mojom"), expected1) + + source2 = """\ + interface MyInterface { +@@ -757,7 +757,7 @@ + ast.ParameterList(), ast.ParameterList()) + ])) + ]) +- self.assertEquals(parser.Parse(source2, "my_file.mojom"), expected2) ++ self.assertEqual(parser.Parse(source2, "my_file.mojom"), expected2) + + source3 = """\ + interface MyInterface { +@@ -776,7 +776,7 @@ + ast.Parameter('b', None, None, 'bool') + ])))) + ]) +- self.assertEquals(parser.Parse(source3, "my_file.mojom"), expected3) ++ self.assertEqual(parser.Parse(source3, "my_file.mojom"), expected3) + + def testInvalidMethods(self): + """Tests that invalid method declarations are correctly detected.""" +@@ -787,7 +787,7 @@ + MyMethod(string a,); + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected '\)':\n" + r" *MyMethod\(string a,\);$"): + parser.Parse(source1, "my_file.mojom") +@@ -798,7 +798,7 @@ + MyMethod(, string a); + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected ',':\n" + r" *MyMethod\(, string a\);$"): + parser.Parse(source2, "my_file.mojom") +@@ -826,7 +826,7 @@ + ast.ParameterList(ast.Parameter('y', None, None, 'MyEnum'))) + ])) + ]) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testInvalidInterfaceDefinitions(self): + """Tests that definitions that aren't allowed in an interface are correctly +@@ -839,7 +839,7 @@ + }; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'struct':\n" + r" *struct MyStruct {$"): + parser.Parse(source1, "my_file.mojom") +@@ -851,7 +851,7 @@ + }; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:2: Error: Unexpected 'interface':\n" + r" *interface MyInnerInterface {$"): +@@ -864,7 +864,7 @@ + """ + # The parser thinks that "int32" is a plausible name for a method, so it's + # "my_field" that gives it away. +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'my_field':\n" + r" *int32 my_field;$"): + parser.Parse(source3, "my_file.mojom") +@@ -879,7 +879,7 @@ + expected1 = ast.Mojom( + None, ast.ImportList(), + [ast.Struct('MyStruct', ast.AttributeList(), ast.StructBody())]) +- self.assertEquals(parser.Parse(source1, "my_file.mojom"), expected1) ++ self.assertEqual(parser.Parse(source1, "my_file.mojom"), expected1) + + # One-element attribute list, with name value. + source2 = "[MyAttribute=MyName] struct MyStruct {};" +@@ -888,7 +888,7 @@ + ast.AttributeList(ast.Attribute("MyAttribute", "MyName")), + ast.StructBody()) + ]) +- self.assertEquals(parser.Parse(source2, "my_file.mojom"), expected2) ++ self.assertEqual(parser.Parse(source2, "my_file.mojom"), expected2) + + # Two-element attribute list, with one string value and one integer value. + source3 = "[MyAttribute1 = \"hello\", MyAttribute2 = 5] struct MyStruct {};" +@@ -900,7 +900,7 @@ + ast.Attribute("MyAttribute2", 5) + ]), ast.StructBody()) + ]) +- self.assertEquals(parser.Parse(source3, "my_file.mojom"), expected3) ++ self.assertEqual(parser.Parse(source3, "my_file.mojom"), expected3) + + # Various places that attribute list is allowed. + source4 = """\ +@@ -966,7 +966,7 @@ + ast.Const('kMyConst', ast.AttributeList( + ast.Attribute("Attr12", 12)), 'double', '1.23') + ]) +- self.assertEquals(parser.Parse(source4, "my_file.mojom"), expected4) ++ self.assertEqual(parser.Parse(source4, "my_file.mojom"), expected4) + + # TODO(vtl): Boolean attributes don't work yet. (In fact, we just |eval()| + # literal (non-name) values, which is extremely dubious.) +@@ -977,21 +977,21 @@ + + # Trailing commas not allowed. + source1 = "[MyAttribute=MyName,] struct MyStruct {};" +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:1: Error: Unexpected '\]':\n" + r"\[MyAttribute=MyName,\] struct MyStruct {};$"): + parser.Parse(source1, "my_file.mojom") + + # Missing value. + source2 = "[MyAttribute=] struct MyStruct {};" +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:1: Error: Unexpected '\]':\n" + r"\[MyAttribute=\] struct MyStruct {};$"): + parser.Parse(source2, "my_file.mojom") + + # Missing key. + source3 = "[=MyName] struct MyStruct {};" +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:1: Error: Unexpected '=':\n" + r"\[=MyName\] struct MyStruct {};$"): + parser.Parse(source3, "my_file.mojom") +@@ -1004,7 +1004,7 @@ + expected1 = ast.Mojom(None, + ast.ImportList(ast.Import(None, "somedir/my.mojom")), + []) +- self.assertEquals(parser.Parse(source1, "my_file.mojom"), expected1) ++ self.assertEqual(parser.Parse(source1, "my_file.mojom"), expected1) + + # Two imports (no module statement). + source2 = """\ +@@ -1017,7 +1017,7 @@ + ast.Import(None, "somedir/my1.mojom"), + ast.Import(None, "somedir/my2.mojom") + ]), []) +- self.assertEquals(parser.Parse(source2, "my_file.mojom"), expected2) ++ self.assertEqual(parser.Parse(source2, "my_file.mojom"), expected2) + + # Imports with module statement. + source3 = """\ +@@ -1031,7 +1031,7 @@ + ast.Import(None, "somedir/my1.mojom"), + ast.Import(None, "somedir/my2.mojom") + ]), []) +- self.assertEquals(parser.Parse(source3, "my_file.mojom"), expected3) ++ self.assertEqual(parser.Parse(source3, "my_file.mojom"), expected3) + + def testInvalidImports(self): + """Tests that invalid import statements are correctly detected.""" +@@ -1040,7 +1040,7 @@ + // Make the error occur on line 2. + import invalid + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'invalid':\n" + r" *import invalid$"): + parser.Parse(source1, "my_file.mojom") +@@ -1051,7 +1051,7 @@ + int32 a; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'struct':\n" + r" *struct MyStruct {$"): + parser.Parse(source2, "my_file.mojom") +@@ -1062,7 +1062,7 @@ + int32 a; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'struct':\n" + r" *struct MyStruct {$"): + parser.Parse(source3, "my_file.mojom") +@@ -1114,7 +1114,7 @@ + ast.StructField('o', None, None, 'handle?', None) + ])) + ]) +- self.assertEquals(parser.Parse(source, "my_file.mojom"), expected) ++ self.assertEqual(parser.Parse(source, "my_file.mojom"), expected) + + def testInvalidNullableTypes(self): + """Tests that invalid nullable types are correctly detected.""" +@@ -1123,7 +1123,7 @@ + string?? a; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected '\?':\n" + r" *string\?\? a;$"): + parser.Parse(source1, "my_file.mojom") +@@ -1133,7 +1133,7 @@ + handle? a; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected '<':\n" + r" *handle\? a;$"): + parser.Parse(source2, "my_file.mojom") +@@ -1143,7 +1143,7 @@ + some_interface?& a; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected '&':\n" + r" *some_interface\?& a;$"): + parser.Parse(source3, "my_file.mojom") +@@ -1168,7 +1168,7 @@ + ])) + ]) + actual = parser.Parse(source, "my_file.mojom") +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + + def testUnionWithOrdinals(self): + """Test that ordinals are assigned to fields.""" +@@ -1190,7 +1190,7 @@ + ])) + ]) + actual = parser.Parse(source, "my_file.mojom") +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + + def testUnionWithStructMembers(self): + """Test that struct members are accepted.""" +@@ -1208,7 +1208,7 @@ + ast.UnionBody([ast.UnionField('s', None, None, 'SomeStruct')])) + ]) + actual = parser.Parse(source, "my_file.mojom") +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + + def testUnionWithArrayMember(self): + """Test that array members are accepted.""" +@@ -1226,7 +1226,7 @@ + ast.UnionBody([ast.UnionField('a', None, None, 'int32[]')])) + ]) + actual = parser.Parse(source, "my_file.mojom") +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + + def testUnionWithMapMember(self): + """Test that map members are accepted.""" +@@ -1245,7 +1245,7 @@ + [ast.UnionField('m', None, None, 'string{int32}')])) + ]) + actual = parser.Parse(source, "my_file.mojom") +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + + def testUnionDisallowNestedStruct(self): + """Tests that structs cannot be nested in unions.""" +@@ -1258,7 +1258,7 @@ + }; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:4: Error: Unexpected 'struct':\n" + r" *struct MyStruct {$"): + parser.Parse(source, "my_file.mojom") +@@ -1274,7 +1274,7 @@ + }; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:4: Error: Unexpected 'interface':\n" + r" *interface MyInterface {$"): +@@ -1291,7 +1291,7 @@ + }; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:4: Error: Unexpected 'union':\n" + r" *union MyOtherUnion {$"): + parser.Parse(source, "my_file.mojom") +@@ -1307,7 +1307,7 @@ + }; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:4: Error: Unexpected 'enum':\n" + r" *enum MyEnum {$"): + parser.Parse(source, "my_file.mojom") +@@ -1332,7 +1332,7 @@ + ast.StructField('d', None, None, 'asso?', None) + ])) + ]) +- self.assertEquals(parser.Parse(source1, "my_file.mojom"), expected1) ++ self.assertEqual(parser.Parse(source1, "my_file.mojom"), expected1) + + source2 = """\ + interface MyInterface { +@@ -1349,7 +1349,7 @@ + ast.ParameterList( + ast.Parameter('b', None, None, 'asso'))))) + ]) +- self.assertEquals(parser.Parse(source2, "my_file.mojom"), expected2) ++ self.assertEqual(parser.Parse(source2, "my_file.mojom"), expected2) + + def testInvalidAssociatedKinds(self): + """Tests that invalid associated interfaces and requests are correctly +@@ -1359,7 +1359,7 @@ + associated associated SomeInterface a; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, + r"^my_file\.mojom:2: Error: Unexpected 'associated':\n" + r" *associated associated SomeInterface a;$"): +@@ -1370,7 +1370,7 @@ + associated handle a; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'handle':\n" + r" *associated handle a;$"): + parser.Parse(source2, "my_file.mojom") +@@ -1380,7 +1380,7 @@ + associated? MyInterface& a; + }; + """ +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected '\?':\n" + r" *associated\? MyInterface& a;$"): + parser.Parse(source3, "my_file.mojom") +--- a/src/3rdparty/chromium/net/android/tools/proxy_test_cases.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/net/android/tools/proxy_test_cases.py 2025-01-16 02:26:08.531763890 +0800 +@@ -285,25 +285,25 @@ + + def Generate(self): + for test_case in test_cases: +- print ("TEST_F(ProxyConfigServiceAndroidTest, %s) {" % test_case["name"]) ++ print(("TEST_F(ProxyConfigServiceAndroidTest, %s) {" % test_case["name"])) + if "description" in test_case: + self._GenerateDescription(test_case["description"]); + self._GenerateConfiguration(test_case["properties"]) + self._GenerateMappings(test_case["mappings"]) +- print "}" +- print "" ++ print("}") ++ print("") + + def _GenerateDescription(self, description): +- print " // %s" % description ++ print(" // %s" % description) + + def _GenerateConfiguration(self, properties): +- for key in sorted(properties.iterkeys()): +- print " AddProperty(\"%s\", \"%s\");" % (key, properties[key]) +- print " ProxySettingsChanged();" ++ for key in sorted(properties.keys()): ++ print(" AddProperty(\"%s\", \"%s\");" % (key, properties[key])) ++ print(" ProxySettingsChanged();") + + def _GenerateMappings(self, mappings): +- for url in sorted(mappings.iterkeys()): +- print " TestMapping(\"%s\", \"%s\");" % (url, mappings[url]) ++ for url in sorted(mappings.keys()): ++ print(" TestMapping(\"%s\", \"%s\");" % (url, mappings[url])) + + + class GenerateJava: +@@ -315,32 +315,32 @@ + continue + if "description" in test_case: + self._GenerateDescription(test_case["description"]); +- print " @SmallTest" +- print " @Feature({\"AndroidWebView\"})" +- print " public void test%s() throws Exception {" % test_case["name"] ++ print(" @SmallTest") ++ print(" @Feature({\"AndroidWebView\"})") ++ print(" public void test%s() throws Exception {" % test_case["name"]) + self._GenerateConfiguration(test_case["properties"]) + self._GenerateMappings(test_case["mappings"]) +- print " }" +- print "" ++ print(" }") ++ print("") + + def _GenerateDescription(self, description): +- print " /**" +- print " * %s" % description +- print " *" +- print " * @throws Exception" +- print " */" ++ print(" /**") ++ print(" * %s" % description) ++ print(" *") ++ print(" * @throws Exception") ++ print(" */") + + def _GenerateConfiguration(self, properties): +- for key in sorted(properties.iterkeys()): +- print " System.setProperty(\"%s\", \"%s\");" % ( +- key, properties[key]) ++ for key in sorted(properties.keys()): ++ print(" System.setProperty(\"%s\", \"%s\");" % ( ++ key, properties[key])) + + def _GenerateMappings(self, mappings): +- for url in sorted(mappings.iterkeys()): ++ for url in sorted(mappings.keys()): + mapping = mappings[url] + if 'HTTPS' in mapping: + mapping = mapping.replace('HTTPS', 'PROXY') +- print " checkMapping(\"%s\", \"%s\");" % (url, mapping) ++ print(" checkMapping(\"%s\", \"%s\");" % (url, mapping)) + + + def main(): +--- a/src/3rdparty/chromium/net/data/gencerts/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/net/data/gencerts/__init__.py 2025-01-16 02:26:08.531763890 +0800 +@@ -17,7 +17,7 @@ + import subprocess + import sys + +-import openssl_conf ++from . import openssl_conf + + # Enum for the "type" of certificate that is to be created. This is used to + # select sane defaults for the .cnf file and command line flags, but they can +--- a/src/3rdparty/chromium/net/data/ssl/root_stores/update_root_stores.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/net/data/ssl/root_stores/update_root_stores.py 2025-01-16 02:26:08.531763890 +0800 +@@ -80,7 +80,7 @@ + + def main(): + if len(sys.argv) > 1: +- print >>sys.stderr, 'No arguments expected!' ++ print('No arguments expected!', file=sys.stderr) + sys.stderr.write(__doc__) + return 1 + +--- a/src/3rdparty/chromium/net/data/ssl/scripts/crlsetutil.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/net/data/ssl/scripts/crlsetutil.py 2025-01-16 02:26:08.531763890 +0800 +@@ -91,7 +91,7 @@ + if length & 0x80: + num_length_bytes = length & 0x7f + length = 0 +- for i in xrange(2, 2 + num_length_bytes): ++ for i in range(2, 2 + num_length_bytes): + length <<= 8 + length += ord(der_bytes[i]) + header_length = 2 + num_length_bytes +@@ -263,14 +263,14 @@ + pem_cert_file_to_serial(issued_cert_file) + for issued_cert_file in issued_certs + ] +- for pem_file, issued_certs in config.get('BlockedByHash', {}).iteritems() ++ for pem_file, issued_certs in config.get('BlockedByHash', {}).items() + } + limited_subjects = { + pem_cert_file_to_subject_hash(pem_file).encode('base64').strip(): [ + pem_cert_file_to_spki_hash(filename).encode('base64').strip() + for filename in allowed_pems + ] +- for pem_file, allowed_pems in config.get('LimitedSubjects', {}).iteritems() ++ for pem_file, allowed_pems in config.get('LimitedSubjects', {}).items() + } + known_interception_spkis = [ + pem_cert_file_to_spki_hash(pem_file).encode('base64').strip() +@@ -291,7 +291,7 @@ + header = json.dumps(header_json) + outfile.write(struct.pack('''' + + +-class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): ++class RequestHandler(http.server.BaseHTTPRequestHandler): + keep_running = True + local_ip = '' + port = 0 +@@ -76,7 +76,7 @@ + self.path[6:])) + return + +- params = urlparse.parse_qs(urlparse.urlparse(self.path).query) ++ params = urllib.parse.parse_qs(urllib.parse.urlparse(self.path).query) + + if not params or not 'code' in params or params['code'][0] == '200': + self.send_response(200) +@@ -92,13 +92,13 @@ + + def main(): + if len(sys.argv) != 3: +- print "Usage: %s LOCAL_IP PORT" % sys.argv[0] ++ print("Usage: %s LOCAL_IP PORT" % sys.argv[0]) + sys.exit(1) + RequestHandler.local_ip = sys.argv[1] + port = int(sys.argv[2]) + RequestHandler.port = port +- print "To stop the server, go to http://localhost:%d/quitquitquit" % port +- httpd = BaseHTTPServer.HTTPServer(('', port), RequestHandler) ++ print("To stop the server, go to http://localhost:%d/quitquitquit" % port) ++ httpd = http.server.HTTPServer(('', port), RequestHandler) + while RequestHandler.keep_running: + httpd.handle_request() + +--- a/src/3rdparty/chromium/net/tools/testserver/echo_message.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/net/tools/testserver/echo_message.py 2025-01-16 02:26:08.531763890 +0800 +@@ -26,7 +26,7 @@ + + + from itertools import cycle +-from itertools import izip ++ + import random + + +@@ -261,7 +261,7 @@ + Returns: + An encoded/decoded string. + """ +- return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(payload, cycle(key))) ++ return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in zip(payload, cycle(key))) + + + def Checksum(payload, payload_size): +--- a/src/3rdparty/chromium/net/tools/testserver/minica.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/net/tools/testserver/minica.py 2025-01-16 02:26:08.532847204 +0800 +@@ -406,7 +406,7 @@ + single_responses = [ + MakeOCSPSingleResponse(issuer_name_hash, issuer_key_hash, serial, + ocsp_state, ocsp_date) +- for ocsp_state, ocsp_date in itertools.izip(ocsp_states, ocsp_dates) ++ for ocsp_state, ocsp_date in zip(ocsp_states, ocsp_dates) + ] + + basic_resp_data_der = asn1.ToDER(asn1.SEQUENCE([ +@@ -557,13 +557,13 @@ + + der_root = MakeCertificate(ROOT_CN, ROOT_CN, 1, ROOT_KEY, ROOT_KEY, + is_ca=True, path_len=1) +- print 'ocsp-test-root.pem:' +- print DERToPEM(der_root) ++ print('ocsp-test-root.pem:') ++ print(DERToPEM(der_root)) + +- print +- print 'kOCSPTestCertFingerprint:' +- print bin_to_array(hashlib.sha1(der_root).digest()) +- +- print +- print 'kOCSPTestCertSPKI:' +- print bin_to_array(crlsetutil.der_cert_to_spki_hash(der_root)) ++ print() ++ print('kOCSPTestCertFingerprint:') ++ print(bin_to_array(hashlib.sha1(der_root).digest())) ++ ++ print() ++ print('kOCSPTestCertSPKI:') ++ print(bin_to_array(crlsetutil.der_cert_to_spki_hash(der_root))) +--- a/src/3rdparty/chromium/net/tools/testserver/testserver.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/net/tools/testserver/testserver.py 2025-01-16 02:26:08.532847204 +0800 +@@ -15,7 +15,7 @@ + """ + + import base64 +-import BaseHTTPServer ++import http.server + import cgi + import hashlib + import logging +@@ -26,14 +26,14 @@ + import re + import select + import socket +-import SocketServer ++import socketserver + import ssl + import struct + import sys + import threading + import time +-import urllib +-import urlparse ++import urllib.request, urllib.parse, urllib.error ++import urllib.parse + import zlib + + BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +@@ -123,7 +123,7 @@ + + pass + +-class ThreadingHTTPServer(SocketServer.ThreadingMixIn, ++class ThreadingHTTPServer(socketserver.ThreadingMixIn, + HTTPServer): + """This variant of HTTPServer creates a new thread for every connection. It + should only be used with handlers that are known to be threadsafe.""" +@@ -132,7 +132,7 @@ + + class OCSPServer(testserver_base.ClientRestrictingServerMixIn, + testserver_base.BrokenPipeHandlerMixIn, +- BaseHTTPServer.HTTPServer): ++ http.server.HTTPServer): + """This is a specialization of HTTPServer that serves an + OCSP response""" + +@@ -248,8 +248,8 @@ + except tlslite.api.TLSAbruptCloseError: + # Ignore abrupt close. + return True +- except tlslite.api.TLSError, error: +- print "Handshake failure:", str(error) ++ except tlslite.api.TLSError as error: ++ print("Handshake failure:", str(error)) + return False + + +@@ -261,13 +261,13 @@ + + + class TCPEchoServer(testserver_base.ClientRestrictingServerMixIn, +- SocketServer.TCPServer): ++ socketserver.TCPServer): + """A TCP echo server that echoes back what it has received.""" + + def server_bind(self): + """Override server_bind to store the server name.""" + +- SocketServer.TCPServer.server_bind(self) ++ socketserver.TCPServer.server_bind(self) + host, port = self.socket.getsockname()[:2] + self.server_name = socket.getfqdn(host) + self.server_port = port +@@ -281,13 +281,13 @@ + + + class UDPEchoServer(testserver_base.ClientRestrictingServerMixIn, +- SocketServer.UDPServer): ++ socketserver.UDPServer): + """A UDP echo server that echoes back what it has received.""" + + def server_bind(self): + """Override server_bind to store the server name.""" + +- SocketServer.UDPServer.server_bind(self) ++ socketserver.UDPServer.server_bind(self) + host, port = self.socket.getsockname()[:2] + self.server_name = socket.getfqdn(host) + self.server_port = port +@@ -684,7 +684,7 @@ + if not self._ShouldHandleRequest("/echo"): + return False + +- _, _, _, _, query, _ = urlparse.urlparse(self.path) ++ _, _, _, _, query, _ = urllib.parse.urlparse(self.path) + query_params = cgi.parse_qs(query, True) + if 'status' in query_params: + self.send_response(int(query_params['status'][0])) +@@ -760,7 +760,7 @@ + + # Since the data can be binary, we encode them by base64. + post_multipart_base64_encoded = {} +- for field, values in post_multipart.items(): ++ for field, values in list(post_multipart.items()): + post_multipart_base64_encoded[field] = [base64.b64encode(value) + for value in values] + +@@ -871,7 +871,7 @@ + if self.command == 'POST' or self.command == 'PUT' : + self.ReadRequestBody() + +- _, _, url_path, _, query, _ = urlparse.urlparse(self.path) ++ _, _, url_path, _, query, _ = urllib.parse.urlparse(self.path) + + if not query in ('C', 'U', 'S', 'M', 'L'): + return False +@@ -883,7 +883,7 @@ + file_path = os.path.join(file_path, 'index.html') + + if not os.path.isfile(file_path): +- print "File not found " + sub_path + " full path:" + file_path ++ print("File not found " + sub_path + " full path:" + file_path) + self.send_error(404) + return True + +@@ -930,7 +930,7 @@ + def PostOnlyFileHandler(self): + """This handler sends the contents of the requested file on a POST.""" + +- prefix = urlparse.urljoin(self.server.file_root_url, 'post/') ++ prefix = urllib.parse.urljoin(self.server.file_root_url, 'post/') + if not self.path.startswith(prefix): + return False + return self._FileHandlerHelper(prefix) +@@ -941,7 +941,7 @@ + # Consume a request body if present. + request_body = self.ReadRequestBody() + +- _, _, url_path, _, query, _ = urlparse.urlparse(self.path) ++ _, _, url_path, _, query, _ = urllib.parse.urlparse(self.path) + query_dict = cgi.parse_qs(query) + + expected_body = query_dict.get('expected_body', []) +@@ -967,7 +967,7 @@ + file_path = os.path.join(file_path, 'index.html') + + if not os.path.isfile(file_path): +- print "File not found " + sub_path + " full path:" + file_path ++ print("File not found " + sub_path + " full path:" + file_path) + self.send_error(404) + return True + +@@ -1066,7 +1066,7 @@ + if not self._ShouldHandleRequest("/expect-and-set-cookie"): + return False + +- _, _, _, _, query, _ = urlparse.urlparse(self.path) ++ _, _, _, _, query, _ = urllib.parse.urlparse(self.path) + query_dict = cgi.parse_qs(query) + cookies = set() + if 'Cookie' in self.headers: +@@ -1101,7 +1101,7 @@ + self.send_response(200) + self.send_header('Content-Type', 'text/html') + for header_value in headers_values: +- header_value = urllib.unquote(header_value) ++ header_value = urllib.parse.unquote(header_value) + (key, value) = header_value.split(': ', 1) + self.send_header(key, value) + self.end_headers() +@@ -1121,7 +1121,7 @@ + realm = 'testrealm' + set_cookie_if_challenged = False + +- _, _, url_path, _, query, _ = urlparse.urlparse(self.path) ++ _, _, url_path, _, query, _ = urllib.parse.urlparse(self.path) + query_params = cgi.parse_qs(query, True) + if 'set-cookie-if-challenged' in query_params: + set_cookie_if_challenged = True +@@ -1139,7 +1139,7 @@ + username, password = re.findall(r'([^:]+):(\S+)', userpass)[0] + if password != expected_password: + raise Exception('wrong password') +- except Exception, e: ++ except Exception as e: + # Authentication failed. + self.send_response(401) + self.send_header('WWW-Authenticate', 'Basic realm="%s"' % realm) +@@ -1265,7 +1265,7 @@ + + if pairs['response'] != response: + raise Exception('wrong password') +- except Exception, e: ++ except Exception as e: + # Authentication failed. + self.send_response(401) + hdr = ('Digest ' +@@ -1387,7 +1387,7 @@ + if query_char < 0 or len(self.path) <= query_char + 1: + self.sendRedirectHelp(test_name) + return True +- dest = urllib.unquote(self.path[query_char + 1:]) ++ dest = urllib.parse.unquote(self.path[query_char + 1:]) + + self.send_response(301) # moved permanently + self.send_header('Location', dest) +@@ -1408,7 +1408,7 @@ + if not self._ShouldHandleRequest(test_name): + return False + +- params = urllib.unquote(self.path[(len(test_name) + 1):]) ++ params = urllib.parse.unquote(self.path[(len(test_name) + 1):]) + slash = params.find('/') + if slash < 0: + self.sendRedirectHelp(test_name) +@@ -1440,7 +1440,7 @@ + if query_char < 0 or len(self.path) <= query_char + 1: + self.sendRedirectHelp(test_name) + return True +- dest = urllib.unquote(self.path[query_char + 1:]) ++ dest = urllib.parse.unquote(self.path[query_char + 1:]) + + self.send_response(200) + self.send_header('Content-Type', 'text/html') +@@ -1484,7 +1484,7 @@ + self.end_headers() + + # Write ~26K of data, in 1350 byte chunks +- for i in xrange(20): ++ for i in range(20): + self.wfile.write('*' * 1350) + self.wfile.flush() + return True +@@ -1639,7 +1639,7 @@ + response = self.ocsp_response_intermediate + else: + return False +- print 'handling ocsp request' ++ print('handling ocsp request') + self.send_response(200) + self.send_header('Content-Type', 'application/ocsp-response') + self.send_header('Content-Length', str(len(response))) +@@ -1650,7 +1650,7 @@ + def CaIssuersResponse(self): + if not self._ShouldHandleRequest("/ca_issuers"): + return False +- print 'handling ca_issuers request' ++ print('handling ca_issuers request') + self.send_response(200) + self.send_header('Content-Type', 'application/pkix-cert') + self.send_header('Content-Length', str(len(self.ca_issuers_response))) +@@ -1659,7 +1659,7 @@ + self.wfile.write(self.ca_issuers_response) + + +-class TCPEchoHandler(SocketServer.BaseRequestHandler): ++class TCPEchoHandler(socketserver.BaseRequestHandler): + """The RequestHandler class for TCP echo server. + + It is instantiated once per connection to the server, and overrides the +@@ -1682,7 +1682,7 @@ + self.request.send(return_data) + + +-class UDPEchoHandler(SocketServer.BaseRequestHandler): ++class UDPEchoHandler(socketserver.BaseRequestHandler): + """The RequestHandler class for UDP echo server. + + It is instantiated once per connection to the server, and overrides the +@@ -1705,7 +1705,7 @@ + request_socket.sendto(return_data, self.client_address) + + +-class ProxyRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): ++class ProxyRequestHandler(http.server.BaseHTTPRequestHandler): + """A request handler that behaves as a proxy server. Only CONNECT, GET and + HEAD methods are supported. + """ +@@ -1737,7 +1737,7 @@ + other.send(received) + + def _do_common_method(self): +- url = urlparse.urlparse(self.path) ++ url = urllib.parse.urlparse(self.path) + port = url.port + if not port: + if url.scheme == 'http': +@@ -1962,8 +1962,8 @@ + pem_cert_and_key = file(self.options.cert_and_key_file, 'r').read() + elif self.options.aia_intermediate: + self.__ocsp_server = OCSPServer((host, 0), OCSPHandler) +- print ('AIA server started on %s:%d...' % +- (host, self.__ocsp_server.server_port)) ++ print(('AIA server started on %s:%d...' % ++ (host, self.__ocsp_server.server_port))) + + ocsp_server_port = self.__ocsp_server.server_port + if self.options.ocsp_proxy_port_number != 0: +@@ -1984,8 +1984,8 @@ + else: + # generate a new certificate and run an OCSP server for it. + self.__ocsp_server = OCSPServer((host, 0), OCSPHandler) +- print ('OCSP server started on %s:%d...' % +- (host, self.__ocsp_server.server_port)) ++ print(('OCSP server started on %s:%d...' % ++ (host, self.__ocsp_server.server_port))) + + ocsp_states, ocsp_dates, ocsp_produced = self.__parse_ocsp_options( + self.options.ocsp, +@@ -2063,12 +2063,12 @@ + self.options.simulate_tls13_downgrade, + self.options.simulate_tls12_downgrade, + self.options.tls_max_version) +- print 'HTTPS server started on https://%s:%d...' % \ +- (host, server.server_port) ++ print('HTTPS server started on https://%s:%d...' % \ ++ (host, server.server_port)) + else: + server = HTTPServer((host, port), TestPageHandler) +- print 'HTTP server started on http://%s:%d...' % \ +- (host, server.server_port) ++ print('HTTP server started on http://%s:%d...' % \ ++ (host, server.server_port)) + + server.data_dir = self.__make_data_dir() + server.file_root_url = self.options.file_root_url +@@ -2101,11 +2101,11 @@ + 'specified trusted client CA file not found: ' + + self.options.ssl_client_ca[0] + ' exiting...') + websocket_options.tls_client_ca = self.options.ssl_client_ca[0] +- print 'Trying to start websocket server on %s://%s:%d...' % \ +- (scheme, websocket_options.server_host, websocket_options.port) ++ print('Trying to start websocket server on %s://%s:%d...' % \ ++ (scheme, websocket_options.server_host, websocket_options.port)) + server = WebSocketServer(websocket_options) +- print 'WebSocket server started on %s://%s:%d...' % \ +- (scheme, host, server.server_port) ++ print('WebSocket server started on %s://%s:%d...' % \ ++ (scheme, host, server.server_port)) + server_data['port'] = server.server_port + websocket_options.use_basic_auth = self.options.ws_basic_auth + elif self.options.server_type == SERVER_TCP_ECHO: +@@ -2113,26 +2113,26 @@ + # message. + random.seed() + server = TCPEchoServer((host, port), TCPEchoHandler) +- print 'Echo TCP server started on port %d...' % server.server_port ++ print('Echo TCP server started on port %d...' % server.server_port) + server_data['port'] = server.server_port + elif self.options.server_type == SERVER_UDP_ECHO: + # Used for generating the key (randomly) that encodes the "echo request" + # message. + random.seed() + server = UDPEchoServer((host, port), UDPEchoHandler) +- print 'Echo UDP server started on port %d...' % server.server_port ++ print('Echo UDP server started on port %d...' % server.server_port) + server_data['port'] = server.server_port + elif self.options.server_type == SERVER_PROXY: + ProxyRequestHandler.redirect_connect_to_localhost = \ + self.options.redirect_connect_to_localhost + server = ThreadingHTTPServer((host, port), ProxyRequestHandler) +- print 'Proxy server started on port %d...' % server.server_port ++ print('Proxy server started on port %d...' % server.server_port) + server_data['port'] = server.server_port + elif self.options.server_type == SERVER_BASIC_AUTH_PROXY: + ProxyRequestHandler.redirect_connect_to_localhost = \ + self.options.redirect_connect_to_localhost + server = ThreadingHTTPServer((host, port), BasicAuthProxyRequestHandler) +- print 'BasicAuthProxy server started on port %d...' % server.server_port ++ print('BasicAuthProxy server started on port %d...' % server.server_port) + server_data['port'] = server.server_port + elif self.options.server_type == SERVER_FTP: + my_data_dir = self.__make_data_dir() +@@ -2158,7 +2158,7 @@ + # Instantiate FTP server class and listen to address:port + server = pyftpdlib.ftpserver.FTPServer((host, port), ftp_handler) + server_data['port'] = server.socket.getsockname()[1] +- print 'FTP server started on port %d...' % server_data['port'] ++ print('FTP server started on port %d...' % server_data['port']) + else: + raise testserver_base.OptionError('unknown server type' + + self.options.server_type) +--- a/src/3rdparty/chromium/net/tools/testserver/testserver_base.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/net/tools/testserver/testserver_base.py 2025-01-16 02:26:08.532847204 +0800 +@@ -258,8 +258,8 @@ + if self.options.startup_pipe is not None: + server_data_json = json.dumps(server_data).encode() + server_data_len = len(server_data_json) +- print('sending server_data: %s (%d bytes)' % +- (server_data_json, server_data_len)) ++ print(('sending server_data: %s (%d bytes)' % ++ (server_data_json, server_data_len))) + if sys.platform == 'win32': + fd = msvcrt.open_osfhandle(self.options.startup_pipe, 0) + else: +--- a/src/3rdparty/chromium/ppapi/generate_ppapi_include_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/generate_ppapi_include_tests.py 2025-01-16 02:26:08.532847204 +0800 +@@ -17,7 +17,7 @@ + # tests to some 'generated' area, and remove them from version + # control. + +-from __future__ import print_function ++ + + import re + import os +--- a/src/3rdparty/chromium/ppapi/generate_ppapi_size_checks.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/generate_ppapi_size_checks.py 2025-01-16 02:26:08.532847204 +0800 +@@ -7,7 +7,7 @@ + have appropriate size checking. + """ + +-from __future__ import print_function ++ + + import optparse + import os +@@ -130,7 +130,7 @@ + # contain any number of lines (0 or more) delimited by carriage returns. + for linenum_to_delete in self.linenums_to_delete: + file_lines[linenum_to_delete] = ""; +- for linenum, sourcelines in self.lines_to_add.items(): ++ for linenum, sourcelines in list(self.lines_to_add.items()): + # Sort the lines we're adding so we get relatively consistent results. + sourcelines.sort() + # Prepend the new lines. When we output +@@ -331,7 +331,7 @@ + # their structure. If we find types which could easily be consistent but + # aren't, spit out an error and exit. + types_independent = {} +- for typename, typeinfo32 in types32.items(): ++ for typename, typeinfo32 in list(types32.items()): + if (typename in types64): + typeinfo64 = types64[typename] + if (typeinfo64.size == typeinfo32.size): +@@ -377,7 +377,7 @@ + # to be arch-independent has changed to now be arch-dependent (e.g., because + # a pointer was added), and we want to delete the old check in that case. + for name, typeinfo in \ +- types_independent.items() + types32.items() + types64.items(): ++ list(types_independent.items()) + list(types32.items()) + list(types64.items()): + if IsMacroDefinedName(name): + sourcefile = typeinfo.source_location.filename + if sourcefile not in file_patches: +@@ -387,7 +387,7 @@ + + # Add a compile-time assertion for each type whose size is independent of + # architecture. These assertions go immediately after the class definition. +- for name, typeinfo in types_independent.items(): ++ for name, typeinfo in list(types_independent.items()): + # Ignore dummy types that were defined by macros and also ignore types that + # are 0 bytes (i.e., typedefs to void). + if not IsMacroDefinedName(name) and typeinfo.size > 0: +@@ -405,17 +405,17 @@ + + # Apply our patches. This actually edits the files containing the definitions + # for the types in types_independent. +- for filename, patch in file_patches.items(): ++ for filename, patch in list(file_patches.items()): + patch.Apply() + + # Write out a file of checks for 32-bit architectures and a separate file for + # 64-bit architectures. These only have checks for types that are + # architecture-dependent. + c_source_root = os.path.join(options.ppapi_root, "tests") +- WriteArchSpecificCode(types32.values(), ++ WriteArchSpecificCode(list(types32.values()), + c_source_root, + "arch_dependent_sizes_32.h") +- WriteArchSpecificCode(types64.values(), ++ WriteArchSpecificCode(list(types64.values()), + c_source_root, + "arch_dependent_sizes_64.h") + +--- a/src/3rdparty/chromium/ppapi/c/documentation/doxy_cleanup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/c/documentation/doxy_cleanup.py 2025-01-16 02:26:08.532847204 +0800 +@@ -8,7 +8,7 @@ + that they are suitable for publication on a Google documentation site. + ''' + +-from __future__ import print_function ++ + + import optparse + import os +@@ -60,7 +60,7 @@ + for tag in self.soup.findAll('tr'): + if tag.td and tag.td.h2 and tag.td.h2.a and tag.td.h2.a['name']: + #tag['id'] = tag.td.h2.a['name'] +- tag.string = tag.td.h2.a.next ++ tag.string = tag.td.h2.a.__next__ + tag.name = 'h2' + table_headers.append(tag) + +--- a/src/3rdparty/chromium/ppapi/cpp/documentation/doxy_cleanup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/cpp/documentation/doxy_cleanup.py 2025-01-16 02:26:08.532847204 +0800 +@@ -7,7 +7,7 @@ + that they are suitable for publication on a Google documentation site. + ''' + +-from __future__ import print_function ++ + + import optparse + import os +@@ -59,7 +59,7 @@ + for tag in self.soup.findAll('tr'): + if tag.td and tag.td.h2 and tag.td.h2.a and tag.td.h2.a['name']: + #tag['id'] = tag.td.h2.a['name'] +- tag.string = tag.td.h2.a.next ++ tag.string = tag.td.h2.a.__next__ + tag.name = 'h2' + table_headers.append(tag) + +--- a/src/3rdparty/chromium/ppapi/generators/generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/generators/generator.py 2025-01-16 02:26:08.532847204 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/ppapi/generators/idl_ast.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/generators/idl_ast.py 2025-01-16 02:26:08.532847204 +0800 +@@ -4,7 +4,7 @@ + + """Nodes for PPAPI IDL AST.""" + +-from __future__ import print_function ++ + + from idl_namespace import IDLNamespace + from idl_node import IDLNode +--- a/src/3rdparty/chromium/ppapi/generators/idl_c_header.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/generators/idl_c_header.py 2025-01-16 02:26:08.532847204 +0800 +@@ -5,7 +5,7 @@ + + """ Generator for C style prototypes and definitions """ + +-from __future__ import print_function ++ + + import glob + import os +--- a/src/3rdparty/chromium/ppapi/generators/idl_c_proto.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/generators/idl_c_proto.py 2025-01-16 02:26:08.533930519 +0800 +@@ -5,7 +5,7 @@ + + """ Generator for C style prototypes and definitions """ + +-from __future__ import print_function ++ + + import glob + import os +@@ -665,7 +665,7 @@ + def Copyright(self, node, cpp_style=False): + lines = node.GetName().split('\n') + if cpp_style: +- return '//' + '\n//'.join(filter(lambda f: f != '', lines)) + '\n' ++ return '//' + '\n//'.join([f for f in lines if f != '']) + '\n' + return CommentLines(lines) + + +--- a/src/3rdparty/chromium/ppapi/generators/idl_diff.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/generators/idl_diff.py 2025-01-16 02:26:08.533930519 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import glob + import os +@@ -49,7 +49,7 @@ + print('src: >>%s<<' % line) + for line in self.now: + print('gen: >>%s<<' % line) +- print ++ print() + + # + # IsCopyright +--- a/src/3rdparty/chromium/ppapi/generators/idl_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/generators/idl_generator.py 2025-01-16 02:26:08.533930519 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import sys + +--- a/src/3rdparty/chromium/ppapi/generators/idl_lexer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/generators/idl_lexer.py 2025-01-16 02:26:08.533930519 +0800 +@@ -16,7 +16,7 @@ + # PLY can be found at: + # http://www.dabeaz.com/ply/ + +-from __future__ import print_function ++ + + import os.path + import re +--- a/src/3rdparty/chromium/ppapi/generators/idl_namespace.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/generators/idl_namespace.py 2025-01-16 02:26:08.533930519 +0800 +@@ -10,7 +10,7 @@ + a symbol as one or more AST nodes given a release or range of releases. + """ + +-from __future__ import print_function ++ + + import sys + +--- a/src/3rdparty/chromium/ppapi/generators/idl_propertynode.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/generators/idl_propertynode.py 2025-01-16 02:26:08.533930519 +0800 +@@ -46,7 +46,7 @@ + return self.property_map.get(name, None) + + def GetPropertyList(self): +- return self.property_map.keys() ++ return list(self.property_map.keys()) + + # + # Testing functions +--- a/src/3rdparty/chromium/ppapi/generators/idl_release.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/generators/idl_release.py 2025-01-16 02:26:08.533930519 +0800 +@@ -10,7 +10,7 @@ + a symbol as one or more AST nodes given a Release or range of Releases. + """ + +-from __future__ import print_function ++ + + import sys + +--- a/src/3rdparty/chromium/ppapi/generators/idl_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/generators/idl_tests.py 2025-01-16 02:26:08.533930519 +0800 +@@ -5,7 +5,7 @@ + + """ Test runner for IDL Generator changes """ + +-from __future__ import print_function ++ + + import subprocess + import sys +--- a/src/3rdparty/chromium/ppapi/generators/idl_thunk.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/generators/idl_thunk.py 2025-01-16 02:26:08.533930519 +0800 +@@ -5,7 +5,7 @@ + + """ Generator for C++ style thunks """ + +-from __future__ import print_function ++ + + import glob + import os +@@ -405,8 +405,7 @@ + """ + build_list = member.GetUniqueReleases(releases) + release = build_list[0] # Pick the oldest release. +- same_name_siblings = filter( +- lambda n: str(n) == str(member) and n != member, members) ++ same_name_siblings = [n for n in members if str(n) == str(member) and n != member] + + for s in same_name_siblings: + sibling_build_list = s.GetUniqueReleases(releases) +@@ -504,7 +503,7 @@ + for child in members: + build_list = child.GetUniqueReleases(releases) + # We have to filter out releases this node isn't in. +- build_list = filter(lambda r: child.InReleases([r]), build_list) ++ build_list = [r for r in build_list if child.InReleases([r])] + if len(build_list) == 0: + continue + release = build_list[-1] +--- a/src/3rdparty/chromium/ppapi/native_client/tools/browser_tester/browser_tester.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/native_client/tools/browser_tester/browser_tester.py 2025-01-16 02:26:08.533930519 +0800 +@@ -3,16 +3,16 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import glob + import optparse + import os.path + import socket + import sys +-import thread ++import _thread + import time +-import urllib ++import urllib.request, urllib.parse, urllib.error + + # Allow the import of third party modules + script_dir = os.path.dirname(os.path.abspath(__file__)) +@@ -228,7 +228,7 @@ + file_mapping = dict(options.map_files) + for filename in options.files: + file_mapping[os.path.basename(filename)] = filename +- for _, real_path in file_mapping.items(): ++ for _, real_path in list(file_mapping.items()): + if not os.path.exists(real_path): + raise AssertionError('\'%s\' does not exist.' % real_path) + mime_types = {} +@@ -254,7 +254,7 @@ + + full_url = 'http://%s:%d/%s' % (host, port, url) + if len(options.test_args) > 0: +- full_url += '?' + urllib.urlencode(options.test_args) ++ full_url += '?' + urllib.parse.urlencode(options.test_args) + browser.Run(full_url, host, port) + server.TestingBegun(0.125) + +@@ -263,7 +263,7 @@ + def Serve(): + while server.test_in_progress or options.interactive: + server.handle_request() +- thread.start_new_thread(Serve, ()) ++ _thread.start_new_thread(Serve, ()) + + tool_failed = False + time_started = time.time() +--- a/src/3rdparty/chromium/ppapi/native_client/tools/browser_tester/browsertester/browserlauncher.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/native_client/tools/browser_tester/browsertester/browserlauncher.py 2025-01-16 02:26:08.533930519 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import os.path + import re +@@ -11,9 +11,9 @@ + import sys + import tempfile + import time +-import urlparse ++import urllib.parse + +-import browserprocess ++from . import browserprocess + + class LaunchFailure(Exception): + pass +@@ -195,7 +195,7 @@ + self.options.nacl_exe_stdout, True) + self.SetStandardStream(env, 'NACL_EXE_STDERR', + self.options.nacl_exe_stderr, True) +- print('ENV:', ' '.join(['='.join(pair) for pair in env.items()])) ++ print('ENV:', ' '.join(['='.join(pair) for pair in list(env.items())])) + print('LAUNCHING: %s' % ' '.join(cmd)) + sys.stdout.flush() + self.browser_process = RunCommand(cmd, env=env) +--- a/src/3rdparty/chromium/ppapi/native_client/tools/browser_tester/browsertester/browserprocess.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/native_client/tools/browser_tester/browsertester/browserprocess.py 2025-01-16 02:26:08.533930519 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import os + import signal +--- a/src/3rdparty/chromium/ppapi/native_client/tools/browser_tester/browsertester/server.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/ppapi/native_client/tools/browser_tester/browsertester/server.py 2025-01-16 02:26:08.533930519 +0800 +@@ -2,25 +2,25 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-import BaseHTTPServer ++import http.server + import cgi + import mimetypes + import os + import os.path + import posixpath +-import SimpleHTTPServer +-import SocketServer ++import http.server ++import socketserver + import threading + import time +-import urllib +-import urlparse ++import urllib.request, urllib.parse, urllib.error ++import urllib.parse + +-class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): ++class RequestHandler(http.server.SimpleHTTPRequestHandler): + + def NormalizePath(self, path): + path = path.split('?', 1)[0] + path = path.split('#', 1)[0] +- path = posixpath.normpath(urllib.unquote(path)) ++ path = posixpath.normpath(urllib.parse.unquote(path)) + words = path.split('/') + + bad = set((os.curdir, os.pardir, '')) +@@ -84,7 +84,7 @@ + + def HandleRPC(self, name, query): + kargs = {} +- for k, v in query.items(): ++ for k, v in list(query.items()): + assert len(v) == 1, k + kargs[k] = v[0] + +@@ -110,13 +110,13 @@ + new_value_in_secs = old_value_in_secs - 360 + value = time.strftime(last_mod_format, + time.localtime(new_value_in_secs)) +- SimpleHTTPServer.SimpleHTTPRequestHandler.send_header(self, ++ http.server.SimpleHTTPRequestHandler.send_header(self, + keyword, + value) + + def do_POST(self): + # Backwards compatible - treat result as tuple without named fields. +- _, _, path, _, query, _ = urlparse.urlparse(self.path) ++ _, _, path, _, query, _ = urllib.parse.urlparse(self.path) + + self.server.listener.Log('POST %s (%s)' % (self.path, path)) + if path == '/echo': +@@ -166,7 +166,7 @@ + + def do_GET(self): + # Backwards compatible - treat result as tuple without named fields. +- _, _, path, _, query, _ = urlparse.urlparse(self.path) ++ _, _, path, _, query, _ = urllib.parse.urlparse(self.path) + + tester = '/TESTER/' + if path.startswith(tester): +@@ -211,7 +211,7 @@ + def copyfile(self, source, outputfile): + # Bandwidth values <= 0.0 are considered infinite + if self.server.bandwidth <= 0.0: +- return SimpleHTTPServer.SimpleHTTPRequestHandler.copyfile( ++ return http.server.SimpleHTTPRequestHandler.copyfile( + self, source, outputfile) + + self.server.listener.Log('Simulating %f mbps server BW' % +@@ -246,7 +246,7 @@ + # helps reduce the chance this will happen. + # There were apparently some problems using this Mixin with Python 2.5, but we + # are no longer using anything older than 2.6. +-class Server(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer): ++class Server(socketserver.ThreadingMixIn, http.server.HTTPServer): + + def Configure( + self, file_mapping, redirect_mapping, extensions_mapping, allow_404, +--- a/src/3rdparty/chromium/printing/cups_config_helper.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/printing/cups_config_helper.py 2025-01-16 02:26:08.533930519 +0800 +@@ -17,7 +17,7 @@ + is fixed. + """ + +-from __future__ import print_function ++ + + import os + import subprocess +--- a/src/3rdparty/chromium/printing/backend/tools/code_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/printing/backend/tools/code_generator.py 2025-01-16 02:26:08.533930519 +0800 +@@ -189,7 +189,7 @@ + continue + + if not KEYWORD_PATTERN.match(attr_name): +- print('Warning: attribute name %s is invalid' % attr_name) ++ print(('Warning: attribute name %s is invalid' % attr_name)) + continue + + syntax = attr[4] +--- a/src/3rdparty/chromium/sandbox/policy/mac/package_sb_file.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/sandbox/policy/mac/package_sb_file.py 2025-01-16 02:26:08.533930519 +0800 +@@ -28,7 +28,7 @@ + + def pack_file(argv): + if len(argv) != 2: +- print >> sys.stderr, 'usage: package_sb_file.py input_filename output_dir' ++ print('usage: package_sb_file.py input_filename output_dir', file=sys.stderr) + return 1 + input_filename = argv[0] + output_directory = argv[1] +@@ -56,7 +56,7 @@ + outfile.write(cc_definition_end) + outfile.write(namespace_end) + except IOError: +- print >> sys.stderr, 'Failed to process %s' % input_filename ++ print('Failed to process %s' % input_filename, file=sys.stderr) + return 1 + return 0 + +--- a/src/3rdparty/chromium/services/device/public/cpp/usb/tools/usb_ids.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/services/device/public/cpp/usb/tools/usb_ids.py 2025-01-16 02:26:08.533930519 +0800 +@@ -64,7 +64,7 @@ + return output + + def GenerateVendorDefinitions(table): +- output = "const size_t UsbIds::vendor_size_ = %d;\n" % len(table.keys()) ++ output = "const size_t UsbIds::vendor_size_ = %d;\n" % len(list(table.keys())) + output += "const UsbVendor UsbIds::vendors_[] = {\n" + + for vendor_id in sorted(table.keys()): +--- a/src/3rdparty/chromium/testing/run_with_dummy_home.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/run_with_dummy_home.py 2025-01-16 02:26:08.533930519 +0800 +@@ -44,7 +44,7 @@ + def main(): + try: + dummy_home = tempfile.mkdtemp() +- print 'Creating dummy home in %s' % dummy_home ++ print('Creating dummy home in %s' % dummy_home) + + original_home = os.environ['HOME'] + os.environ['HOME'] = dummy_home +--- a/src/3rdparty/chromium/testing/test_env.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/test_env.py 2025-01-16 02:26:08.533930519 +0800 +@@ -5,7 +5,7 @@ + + """Sets environment variables needed to run a chromium unit test.""" + +-from __future__ import print_function ++ + import io + import os + import signal +--- a/src/3rdparty/chromium/testing/test_env_test_script.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/test_env_test_script.py 2025-01-16 02:26:08.533930519 +0800 +@@ -5,7 +5,7 @@ + + """Script for use in test_env unittests.""" + +-from __future__ import print_function ++ + import signal + import sys + import time +--- a/src/3rdparty/chromium/testing/xvfb.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/xvfb.py 2025-01-16 02:26:08.533930519 +0800 +@@ -40,13 +40,12 @@ + + thread.join(timeout_in_seconds) + if thread.is_alive(): +- print >> sys.stderr, '%s running after SIGTERM, trying SIGKILL.' % name ++ print('%s running after SIGTERM, trying SIGKILL.' % name, file=sys.stderr) + proc.kill() + + thread.join(timeout_in_seconds) + if thread.is_alive(): +- print >> sys.stderr, \ +- '%s running after SIGTERM and SIGKILL; good luck!' % name ++ print('%s running after SIGTERM and SIGKILL; good luck!' % name, file=sys.stderr) + + + def launch_dbus(env): +@@ -79,7 +78,7 @@ + env[m.group(1)] = m.group(2) + return int(env['DBUS_SESSION_BUS_PID']) + except (subprocess.CalledProcessError, OSError, KeyError, ValueError) as e: +- print 'Exception while running dbus_launch: %s' % e ++ print('Exception while running dbus_launch: %s' % e) + + + # TODO(crbug.com/949194): Encourage setting flags to False. +@@ -124,7 +123,7 @@ + use_weston = False + if '--use-weston' in cmd: + if use_xvfb: +- print >> sys.stderr, 'Unable to use Weston with xvfb.' ++ print('Unable to use Weston with xvfb.', file=sys.stderr) + return 1 + use_weston = True + cmd.remove('--use-weston') +@@ -217,10 +216,10 @@ + + return test_env.run_executable(cmd, env, stdoutfile) + except OSError as e: +- print >> sys.stderr, 'Failed to start Xvfb or Openbox: %s' % str(e) ++ print('Failed to start Xvfb or Openbox: %s' % str(e), file=sys.stderr) + return 1 + except _XvfbProcessError as e: +- print >> sys.stderr, 'Xvfb fail: %s' % str(e) ++ print('Xvfb fail: %s' % str(e), file=sys.stderr) + return 1 + finally: + kill(openbox_proc, 'openbox') +@@ -278,10 +277,10 @@ + env['WAYLAND_DISPLAY'] = weston_proc_display + return test_env.run_executable(cmd, env, stdoutfile) + except OSError as e: +- print >> sys.stderr, 'Failed to start Weston: %s' % str(e) ++ print('Failed to start Weston: %s' % str(e), file=sys.stderr) + return 1 + except _WestonProcessError as e: +- print >> sys.stderr, 'Weston fail: %s' % str(e) ++ print('Weston fail: %s' % str(e), file=sys.stderr) + return 1 + finally: + kill(weston_proc, 'weston') +@@ -379,22 +378,22 @@ + if not runtime_dir: + runtime_dir = '/tmp/xdg-tmp-dir/' + if not os.path.exists(runtime_dir): +- os.makedirs(runtime_dir, 0700) ++ os.makedirs(runtime_dir, 0o700) + env['XDG_RUNTIME_DIR'] = runtime_dir + + + def main(): + usage = 'Usage: xvfb.py [command [--no-xvfb or --use-weston] args...]' + if len(sys.argv) < 2: +- print >> sys.stderr, usage ++ print(usage, file=sys.stderr) + return 2 + + # If the user still thinks the first argument is the execution directory then + # print a friendly error message and quit. + if os.path.isdir(sys.argv[1]): +- print >> sys.stderr, ( +- 'Invalid command: \"%s\" is a directory' % sys.argv[1]) +- print >> sys.stderr, usage ++ print(( ++ 'Invalid command: \"%s\" is a directory' % sys.argv[1]), file=sys.stderr) ++ print(usage, file=sys.stderr) + return 3 + + return run_executable(sys.argv[1:], os.environ.copy()) +--- a/src/3rdparty/chromium/testing/xvfb_test_script.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/xvfb_test_script.py 2025-01-16 02:26:08.533930519 +0800 +@@ -16,7 +16,7 @@ + + + def print_signal(sig, *_): +- print 'Signal :{}'.format(sig) ++ print('Signal :{}'.format(sig)) + + + if __name__ == '__main__': +@@ -24,10 +24,10 @@ + signal.signal(signal.SIGINT, print_signal) + + # test if inside xvfb flag is set. +- print 'Inside_xvfb :{}'.format( +- os.environ.get('_CHROMIUM_INSIDE_XVFB', 'None')) ++ print('Inside_xvfb :{}'.format( ++ os.environ.get('_CHROMIUM_INSIDE_XVFB', 'None'))) + # test the subprocess display number. +- print 'Display :{}'.format(os.environ.get('DISPLAY', 'None')) ++ print('Display :{}'.format(os.environ.get('DISPLAY', 'None'))) + + if len(sys.argv) > 1 and sys.argv[1] == '--sleep': + time.sleep(2) # gives process time to receive signal. +--- a/src/3rdparty/chromium/testing/chromoting/browser_tests_launcher.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/chromoting/browser_tests_launcher.py 2025-01-16 02:26:08.533930519 +0800 +@@ -67,10 +67,10 @@ + # record instances where a test passed despite a JID mismatch. + if jids_used and host_jid.rstrip() not in jids_used: + host_jid_mismatch = True +- print 'Host JID mismatch. JID in host log = %s.' % host_jid.rstrip() +- print 'Host JIDs used by test:' ++ print('Host JID mismatch. JID in host log = %s.' % host_jid.rstrip()) ++ print('Host JIDs used by test:') + for jid in jids_used: +- print jid ++ print(jid) + + if host_jid_mismatch: + # The JID for the remote-host did not match the JID that was used for this +@@ -83,7 +83,7 @@ + time.sleep(30) + continue + elif jids_used: +- print 'JID used by test matched me2me host JID: %s' % host_jid ++ print('JID used by test matched me2me host JID: %s' % host_jid) + else: + # There wasn't a mismatch and no JIDs were returned. If no JIDs were + # returned, that means the test didn't use any JIDs, so there is nothing +@@ -102,9 +102,9 @@ + # and, because sometimes that line gets logged even if the test + # eventually passes, we'll also look for "(TIMED OUT)", before retrying. + if BROWSER_NOT_STARTED_ERROR in results and TIME_OUT_INDICATOR in results: +- print 'Browser-instance not started (http://crbug/480025). Retrying.' ++ print('Browser-instance not started (http://crbug/480025). Retrying.') + else: +- print 'Test failed for unknown reason. Retrying.' ++ print('Test failed for unknown reason. Retrying.') + + retries += 1 + +@@ -156,9 +156,9 @@ + try: + host_logs = main(command_line_args) + if TEST_FAILURE: +- print '++++++++++AT LEAST 1 TEST FAILED++++++++++' +- print FAILING_TESTS.rstrip('\n') +- print '++++++++++++++++++++++++++++++++++++++++++' ++ print('++++++++++AT LEAST 1 TEST FAILED++++++++++') ++ print(FAILING_TESTS.rstrip('\n')) ++ print('++++++++++++++++++++++++++++++++++++++++++') + raise Exception('At least one test failed.') + finally: + # Stop host and cleanup user-profile-dir. +--- a/src/3rdparty/chromium/testing/chromoting/chromoting_test_driver_launcher.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/chromoting/chromoting_test_driver_launcher.py 2025-01-16 02:26:08.533930519 +0800 +@@ -38,7 +38,7 @@ + + if not host_jid: + # Host-JID not found in log. Let's not attempt to run this test. +- print 'Host-JID not found in log %s.' % host_log_file_names[-1] ++ print('Host-JID not found in log %s.' % host_log_file_names[-1]) + return '[Command failed]: %s, %s' % (command, host_log_file_names) + + retries = 0 +@@ -123,9 +123,9 @@ + try: + failing_tests, host_logs = main(command_line_args) + if failing_tests: +- print '++++++++++FAILED TESTS++++++++++' +- print failing_tests.rstrip('\n') +- print '++++++++++++++++++++++++++++++++' ++ print('++++++++++FAILED TESTS++++++++++') ++ print(failing_tests.rstrip('\n')) ++ print('++++++++++++++++++++++++++++++++') + raise Exception('At least one test failed.') + finally: + # Stop host and cleanup user-profile-dir. +--- a/src/3rdparty/chromium/testing/chromoting/chromoting_test_utilities.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/chromoting/chromoting_test_utilities.py 2025-01-16 02:26:08.533930519 +0800 +@@ -47,13 +47,13 @@ + + cmd_line = [command] + try: +- print 'Going to run:\n%s' % command ++ print('Going to run:\n%s' % command) + results = subprocess.check_output(cmd_line, stderr=subprocess.STDOUT, + shell=True) +- except subprocess.CalledProcessError, e: ++ except subprocess.CalledProcessError as e: + results = e.output + finally: +- print results ++ print(results) + return results + + +@@ -133,7 +133,7 @@ + # Stop chromoting host. + RunCommandInSubProcess(CHROMOTING_HOST_PATH + ' --stop') + # Start chromoting host. +- print 'Starting chromoting host from %s' % CHROMOTING_HOST_PATH ++ print('Starting chromoting host from %s' % CHROMOTING_HOST_PATH) + results = RunCommandInSubProcess(CHROMOTING_HOST_PATH + ' --start') + + os.chdir(previous_directory) +@@ -148,7 +148,7 @@ + if HOST_READY_INDICATOR not in results: + # Host start failed. Print out host-log. Don't run any tests. + with open(log_file, 'r') as f: +- print f.read() ++ print(f.read()) + raise HostOperationFailedException('Host restart failed.') + + return log_file +@@ -194,9 +194,9 @@ + processes = psutil.get_process_list() + processes = sorted(processes, key=lambda process: process.name) + +- print 'List of running processes:\n' ++ print('List of running processes:\n') + for process in processes: +- print process.name ++ print(process.name) + + + def PrintHostLogContents(host_log_files=None): +@@ -206,7 +206,7 @@ + with open(log_file, 'r') as log: + host_log_contents += '\nHOST LOG %s\n CONTENTS:\n%s' % ( + log_file, log.read()) +- print host_log_contents ++ print(host_log_contents) + + + def TestCaseSetup(args): +--- a/src/3rdparty/chromium/testing/chromoting/download_test_files.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/chromoting/download_test_files.py 2025-01-16 02:26:08.533930519 +0800 +@@ -49,8 +49,8 @@ + cp_cmd = ['gsutil.py', 'cp', line, output_file] + try: + subprocess.check_call(cp_cmd) +- except subprocess.CalledProcessError, e: +- print e.output ++ except subprocess.CalledProcessError as e: ++ print(e.output) + sys.exit(1) + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/testing/clusterfuzz/common/fuzzy_types.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/clusterfuzz/common/fuzzy_types.py 2025-01-16 02:26:08.533930519 +0800 +@@ -19,8 +19,8 @@ + """Returns an integer derived from the input by one of several mutations.""" + int_sizes = [8, 16, 32, 64, 128] + mutations = [ +- lambda n: utils.UniformExpoInteger(0, sys.maxint.bit_length() + 1), +- lambda n: -utils.UniformExpoInteger(0, sys.maxint.bit_length()), ++ lambda n: utils.UniformExpoInteger(0, sys.maxsize.bit_length() + 1), ++ lambda n: -utils.UniformExpoInteger(0, sys.maxsize.bit_length()), + lambda n: 2 ** random.choice(int_sizes) - 1, + lambda n: 2 ** random.choice(int_sizes), + lambda n: 0, +@@ -55,9 +55,9 @@ + # If we're still here, apply a more generic mutation + mutations = [ + lambda s: "".join(random.choice(string.printable) for i in +- xrange(utils.UniformExpoInteger(0, 14))), +- lambda s: "".join(unichr(random.randint(0, sys.maxunicode)) for i in +- xrange(utils.UniformExpoInteger(0, 14))).encode("utf-8"), ++ range(utils.UniformExpoInteger(0, 14))), ++ lambda s: "".join(chr(random.randint(0, sys.maxunicode)) for i in ++ range(utils.UniformExpoInteger(0, 14))).encode("utf-8"), + lambda s: os.urandom(utils.UniformExpoInteger(0, 14)), + lambda s: s * utils.UniformExpoInteger(1, 5), + lambda s: s + "A" * utils.UniformExpoInteger(0, 14), +@@ -124,7 +124,7 @@ + if amount is None: + amount = utils.RandomLowInteger(min(1, len(self)), len(self) - location) + if hasattr(value, "__call__"): +- new_elements = (value() for i in xrange(amount)) ++ new_elements = (value() for i in range(amount)) + else: + new_elements = itertools.repeat(value, amount) + self[location:location+amount] = new_elements +@@ -140,7 +140,7 @@ + if amount is None: + amount = utils.UniformExpoInteger(0, max_exponent) + if hasattr(value, "__call__"): +- new_elements = (value() for i in xrange(amount)) ++ new_elements = (value() for i in range(amount)) + else: + new_elements = itertools.repeat(value, amount) + self[location:location] = new_elements +@@ -171,7 +171,7 @@ + ] + if count is None: + count = utils.RandomLowInteger(1, 5, beta=3.0) +- for _ in xrange(count): ++ for _ in range(count): + random.choice(mutations)() + + +@@ -185,7 +185,7 @@ + """Flip num_bits bits in the buffer at random.""" + if num_bits is None: + num_bits = utils.RandomLowInteger(min(1, len(self)), len(self) * 8) +- for bit in random.sample(xrange(len(self) * 8), num_bits): ++ for bit in random.sample(range(len(self) * 8), num_bits): + self[bit / 8] ^= 1 << (bit % 8) + + def RandomMutation(self, count=None): +@@ -203,5 +203,5 @@ + ] + if count is None: + count = utils.RandomLowInteger(1, 5, beta=3.0) +- for _ in xrange(count): ++ for _ in range(count): + utils.WeightedChoice(mutations)() +--- a/src/3rdparty/chromium/testing/clusterfuzz/common/utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/clusterfuzz/common/utils.py 2025-01-16 02:26:08.533930519 +0800 +@@ -6,6 +6,7 @@ + import functools + import math + import random ++from functools import reduce + + + def RandomLowInteger(low, high, beta=31.0): +--- a/src/3rdparty/chromium/testing/libfuzzer/archive_corpus.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/libfuzzer/archive_corpus.py 2025-01-16 02:26:08.533930519 +0800 +@@ -9,7 +9,7 @@ + Invoked by GN from fuzzer_test.gni. + """ + +-from __future__ import print_function ++ + import argparse + import os + import sys +--- a/src/3rdparty/chromium/testing/libfuzzer/dictionary_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/libfuzzer/dictionary_generator.py 2025-01-16 02:26:08.533930519 +0800 +@@ -11,7 +11,7 @@ + """ + + import argparse +-import HTMLParser ++import html.parser + import io + import logging + import os +@@ -29,7 +29,7 @@ + + def DecodeHTML(html_data): + """HTML-decoding of the data.""" +- html_parser = HTMLParser.HTMLParser() ++ html_parser = html.parser.HTMLParser() + data = html_parser.unescape(html_data.decode('ascii', 'ignore')) + return data.encode('ascii', 'ignore') + +@@ -54,7 +54,7 @@ + for encoding in ENCODING_TYPES: + data = rodata.decode(encoding, 'ignore').encode('ascii', 'ignore') + raw_strings = strings_re.findall(data) +- for splitted_line in map(lambda line: line.split(), raw_strings): ++ for splitted_line in [line.split() for line in raw_strings]: + words += splitted_line + + return set(words) +@@ -85,7 +85,7 @@ + previous_number_of_spaces = 0 + + # Go through every line and concatenate space-indented blocks into lines. +- for i in xrange(0, len(lines), 1): ++ for i in range(0, len(lines), 1): + if not lines[i]: + # Ignore empty lines. + continue +--- a/src/3rdparty/chromium/testing/libfuzzer/gen_fuzzer_config.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/libfuzzer/gen_fuzzer_config.py 2025-01-16 02:26:08.533930519 +0800 +@@ -8,7 +8,7 @@ + Invoked by GN from fuzzer_test.gni. + """ + +-import ConfigParser ++import configparser + import argparse + import os + import sys +@@ -54,7 +54,7 @@ + args.grammar_options): + return + +- config = ConfigParser.ConfigParser() ++ config = configparser.ConfigParser() + libfuzzer_options = [] + if args.dict: + libfuzzer_options.append(('dict', os.path.basename(args.dict))) +--- a/src/3rdparty/chromium/testing/libfuzzer/zip_sources.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/libfuzzer/zip_sources.py 2025-01-16 02:26:08.536097149 +0800 +@@ -9,7 +9,7 @@ + Invoked by libfuzzer buildbots. Executes dwarfdump to parse debug info. + """ + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/testing/merge_scripts/common_merge_script_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/merge_scripts/common_merge_script_tests.py 2025-01-16 02:26:08.536097149 +0800 +@@ -28,9 +28,9 @@ + summary_json = os.path.join(task_output_dir, 'summary.json') + with open(summary_json, 'w') as summary_file: + summary_contents = { +- u'shards': [ ++ 'shards': [ + { +- u'state': u'COMPLETED', ++ 'state': 'COMPLETED', + }, + ], + } +@@ -48,4 +48,4 @@ + '--output-json', output_json, + shard0_json, + ] +- self.assertEquals(0, self._module.main(raw_args)) ++ self.assertEqual(0, self._module.main(raw_args)) +--- a/src/3rdparty/chromium/testing/merge_scripts/noop_merge.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/merge_scripts/noop_merge.py 2025-01-16 02:26:08.536097149 +0800 +@@ -21,8 +21,8 @@ + jsons_to_merge: A list of paths to JSON files. + """ + if len(jsons_to_merge) > 1: +- print >> sys.stderr, ( +- 'Multiple JSONs provided: %s' % ','.join(jsons_to_merge)) ++ print(( ++ 'Multiple JSONs provided: %s' % ','.join(jsons_to_merge)), file=sys.stderr) + return 1 + if jsons_to_merge: + shutil.copyfile(jsons_to_merge[0], output_json) +--- a/src/3rdparty/chromium/testing/merge_scripts/results_merger.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/merge_scripts/results_merger.py 2025-01-16 02:26:08.536097149 +0800 +@@ -179,7 +179,7 @@ + if result_json: + raise MergeException( # pragma: no cover (covered by + # results_merger_unittest). +- 'Unmergable values %s' % result_json.keys()) ++ 'Unmergable values %s' % list(result_json.keys())) + + return merged_results + +@@ -202,7 +202,7 @@ + pending_nodes = [('', dest, source)] + while pending_nodes: + prefix, dest_node, curr_node = pending_nodes.pop() +- for k, v in curr_node.iteritems(): ++ for k, v in curr_node.items(): + if k in dest_node: + if not isinstance(v, dict): + raise MergeException( +@@ -234,7 +234,7 @@ + + This is intended for use as a merge_func parameter to merge_value. + """ +- for k, v in source.iteritems(): ++ for k, v in source.items(): + dest.setdefault(k, 0) + dest[k] += v + +@@ -274,7 +274,7 @@ + for f in files[1:]: + sys.stderr.write('Merging %s\n' % f) + result = merge_test_results([result, json.load(open(f))]) +- print json.dumps(result) ++ print(json.dumps(result)) + return 0 + + +--- a/src/3rdparty/chromium/testing/merge_scripts/results_merger_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/merge_scripts/results_merger_test.py 2025-01-16 02:26:08.536097149 +0800 +@@ -162,13 +162,13 @@ + maxDiff = None # Show full diff if assertion fail + + def test_merge_tries(self): +- self.assertEquals( ++ self.assertEqual( + {'a': 'A', 'b': {'c': 'C'}}, + results_merger.merge_tries( + {'a': 'A', 'b': {}}, {'b': {'c': 'C'}})) + + def test_merge_tries_unmergable(self): +- with self.assertRaisesRegexp(results_merger.MergeException, "a:b"): ++ with self.assertRaisesRegex(results_merger.MergeException, "a:b"): + results_merger.merge_tries( + {'a': {'b': 'A'}}, {'a': {'b': 'C'}}) + +@@ -178,7 +178,7 @@ + merged_results = results_merger.merge_test_results( + [extend(GOOD_JSON_TEST_RESULT_0, metadata1), + extend(GOOD_JSON_TEST_RESULT_1, metadata2)]) +- self.assertEquals( ++ self.assertEqual( + merged_results['metadata']['tags'], ['foo', 'bat']) + + def test_merge_json_test_results_nop(self): +@@ -190,8 +190,8 @@ + for j in good_json_results: + # Clone so we can check the input dictionaries are not modified + a = copy.deepcopy(j) +- self.assertEquals(results_merger.merge_test_results([a]), j) +- self.assertEquals(a, j) ++ self.assertEqual(results_merger.merge_test_results([a]), j) ++ self.assertEqual(a, j) + + def test_merge_json_test_results_invalid_version(self): + with self.assertRaises(results_merger.MergeException): +@@ -242,7 +242,7 @@ + ]) + + def test_merge_json_test_results_multiple(self): +- self.assertEquals( ++ self.assertEqual( + results_merger.merge_test_results([ + GOOD_JSON_TEST_RESULT_0, + GOOD_JSON_TEST_RESULT_1, +@@ -251,7 +251,7 @@ + GOOD_JSON_TEST_RESULT_MERGED) + + def test_merge_json_test_results_optional_matches(self): +- self.assertEquals( ++ self.assertEqual( + results_merger.merge_test_results([ + extend(GOOD_JSON_TEST_RESULT_0, {'path_delimiter': '.'}), + extend(GOOD_JSON_TEST_RESULT_1, {'path_delimiter': '.'}), +@@ -268,7 +268,7 @@ + ]) + + def test_merge_json_test_results_optional_count(self): +- self.assertEquals( ++ self.assertEqual( + results_merger.merge_test_results([ + extend(GOOD_JSON_TEST_RESULT_0, {'fixable': 1}), + extend(GOOD_JSON_TEST_RESULT_1, {'fixable': 2}), +@@ -277,7 +277,7 @@ + extend(GOOD_JSON_TEST_RESULT_MERGED, {'fixable': 6})) + + def test_merge_nothing(self): +- self.assertEquals( ++ self.assertEqual( + results_merger.merge_test_results([]), + {}) + +--- a/src/3rdparty/chromium/testing/merge_scripts/standard_gtest_merge.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/merge_scripts/standard_gtest_merge.py 2025-01-16 02:26:08.536097149 +0800 +@@ -27,13 +27,13 @@ + + + def emit_warning(title, log=None): +- print '@@@STEP_WARNINGS@@@' +- print title ++ print('@@@STEP_WARNINGS@@@') ++ print(title) + if log: + title = title.rstrip() + for line in log.splitlines(): +- print '@@@STEP_LOG_LINE@%s@%s@@@' % (title, line.rstrip()) +- print '@@@STEP_LOG_END@%s@@@' % title ++ print('@@@STEP_LOG_LINE@%s@%s@@@' % (title, line.rstrip())) ++ print('@@@STEP_LOG_END@%s@@@' % title) + + + def merge_shard_results(summary_json, jsons_to_merge): +@@ -72,16 +72,16 @@ + # client/swarming.py, which means the state enum is saved in its string + # name form, not in the number form. + state = result.get('state') +- if state == u'BOT_DIED': ++ if state == 'BOT_DIED': + emit_warning('Shard #%d had a Swarming internal failure' % index) +- elif state == u'EXPIRED': ++ elif state == 'EXPIRED': + emit_warning('There wasn\'t enough capacity to run your test') +- elif state == u'TIMED_OUT': ++ elif state == 'TIMED_OUT': + emit_warning( + 'Test runtime exceeded allocated time', + 'Either it ran for too long (hard timeout) or it didn\'t produce ' + 'I/O for an extended period of time (I/O timeout)') +- elif state != u'COMPLETED': ++ elif state != 'COMPLETED': + emit_warning('Invalid Swarming task state: %s' % state) + + json_data, err_msg = load_shard_json(index, result.get('task_id'), +@@ -145,10 +145,10 @@ + os.path.basename(os.path.dirname(j)) == task_id))] + + if not matching_json_files: +- print >> sys.stderr, 'shard %s test output missing' % index ++ print('shard %s test output missing' % index, file=sys.stderr) + return (None, 'shard %s test output was missing' % index) + elif len(matching_json_files) > 1: +- print >> sys.stderr, 'duplicate test output for shard %s' % index ++ print('duplicate test output for shard %s' % index, file=sys.stderr) + return (None, 'shard %s test output was duplicated' % index) + + path = matching_json_files[0] +@@ -156,15 +156,15 @@ + try: + filesize = os.stat(path).st_size + if filesize > OUTPUT_JSON_SIZE_LIMIT: +- print >> sys.stderr, 'output.json is %d bytes. Max size is %d' % ( +- filesize, OUTPUT_JSON_SIZE_LIMIT) ++ print('output.json is %d bytes. Max size is %d' % ( ++ filesize, OUTPUT_JSON_SIZE_LIMIT), file=sys.stderr) + return (None, 'shard %s test output exceeded the size limit' % index) + + with open(path) as f: + return (json.load(f), None) + except (IOError, ValueError, OSError) as e: +- print >> sys.stderr, 'Missing or invalid gtest JSON file: %s' % path +- print >> sys.stderr, '%s: %s' % (type(e).__name__, e) ++ print('Missing or invalid gtest JSON file: %s' % path, file=sys.stderr) ++ print('%s: %s' % (type(e).__name__, e), file=sys.stderr) + + return (None, 'shard %s test output was missing or invalid' % index) + +@@ -172,7 +172,7 @@ + def merge_list_of_dicts(left, right): + """Merges dicts left[0] with right[0], left[1] with right[1], etc.""" + output = [] +- for i in xrange(max(len(left), len(right))): ++ for i in range(max(len(left), len(right))): + left_dict = left[i] if i < len(left) else {} + right_dict = right[i] if i < len(right) else {} + merged_dict = left_dict.copy() +--- a/src/3rdparty/chromium/testing/merge_scripts/standard_gtest_merge_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/merge_scripts/standard_gtest_merge_test.py 2025-01-16 02:26:08.536097149 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-import cStringIO ++import io + import json + import logging + import os +@@ -200,11 +200,11 @@ + }], + }], + 'swarming_summary': { +- u'shards': [ ++ 'shards': [ + { +- u'state': u'COMPLETED', +- u'outputs_ref': { +- u'view_url': u'blah', ++ 'state': 'COMPLETED', ++ 'outputs_ref': { ++ 'view_url': 'blah', + }, + } + ], +@@ -326,12 +326,12 @@ + }], + }], + 'swarming_summary': { +- u'shards': [ ++ 'shards': [ + { +- u'state': u'COMPLETED', ++ 'state': 'COMPLETED', + }, + { +- u'state': u'TIMED_OUT', ++ 'state': 'TIMED_OUT', + }, + ], + }, +@@ -381,7 +381,7 @@ + + def test_double_digit_jsons(self): + jsons_to_merge = [] +- for i in xrange(15): ++ for i in range(15): + json_dir = os.path.join(self.temp_dir, str(i)) + json_path = os.path.join(json_dir, 'output.json') + if not os.path.exists(json_dir): +@@ -402,7 +402,7 @@ + + def test_double_task_id_jsons(self): + jsons_to_merge = [] +- for i in xrange(15): ++ for i in range(15): + json_dir = os.path.join(self.temp_dir, 'deadbeef%d' % i) + json_path = os.path.join(json_dir, 'output.json') + if not os.path.exists(json_dir): +@@ -434,12 +434,12 @@ + + def stage(self, summary, files): + self.summary = self._write_temp_file('summary.json', summary) +- for path, content in files.iteritems(): ++ for path, content in files.items(): + abs_path = self._write_temp_file(path, content) + self.test_files.append(abs_path) + + def call(self): +- stdout = cStringIO.StringIO() ++ stdout = io.StringIO() + with mock.patch('sys.stdout', stdout): + merged = standard_gtest_merge.merge_shard_results( + self.summary, self.test_files) +@@ -448,27 +448,27 @@ + def assertUnicodeEquals(self, expectation, result): + def convert_to_unicode(key_or_value): + if isinstance(key_or_value, str): +- return unicode(key_or_value) ++ return str(key_or_value) + if isinstance(key_or_value, dict): + return {convert_to_unicode(k): convert_to_unicode(v) +- for k, v in key_or_value.items()} ++ for k, v in list(key_or_value.items())} + if isinstance(key_or_value, list): + return [convert_to_unicode(x) for x in key_or_value] + return key_or_value + + unicode_expectations = convert_to_unicode(expectation) + unicode_result = convert_to_unicode(result) +- self.assertEquals(unicode_expectations, unicode_result) ++ self.assertEqual(unicode_expectations, unicode_result) + + def test_ok(self): + # Two shards, both successfully finished. + self.stage({ +- u'shards': [ ++ 'shards': [ + { +- u'state': u'COMPLETED', ++ 'state': 'COMPLETED', + }, + { +- u'state': u'COMPLETED', ++ 'state': 'COMPLETED', + }, + ], + }, +@@ -480,9 +480,9 @@ + merged['swarming_summary'] = { + 'shards': [ + { +- u'state': u'COMPLETED', +- u'outputs_ref': { +- u'view_url': u'blah', ++ 'state': 'COMPLETED', ++ 'outputs_ref': { ++ 'view_url': 'blah', + }, + } + ], +@@ -523,15 +523,15 @@ + def test_unfinished_shards(self): + # Only one shard (#1) finished. Shard #0 did not. + self.stage({ +- u'shards': [ ++ 'shards': [ + None, + { +- u'state': u'COMPLETED', ++ 'state': 'COMPLETED', + }, + ], + }, + { +- u'1/output.json': GOOD_GTEST_JSON_1, ++ '1/output.json': GOOD_GTEST_JSON_1, + }) + merged, stdout = self.call() + merged.pop('swarming_summary') +@@ -545,17 +545,17 @@ + def test_missing_output_json(self): + # Shard #0 output json is missing. + self.stage({ +- u'shards': [ ++ 'shards': [ + { +- u'state': u'COMPLETED', ++ 'state': 'COMPLETED', + }, + { +- u'state': u'COMPLETED', ++ 'state': 'COMPLETED', + }, + ], + }, + { +- u'1/output.json': GOOD_GTEST_JSON_1, ++ '1/output.json': GOOD_GTEST_JSON_1, + }) + merged, stdout = self.call() + merged.pop('swarming_summary') +@@ -567,12 +567,12 @@ + def test_large_output_json(self): + # a shard is too large. + self.stage({ +- u'shards': [ ++ 'shards': [ + { +- u'state': u'COMPLETED', ++ 'state': 'COMPLETED', + }, + { +- u'state': u'COMPLETED', ++ 'state': 'COMPLETED', + }, + ], + }, +--- a/src/3rdparty/chromium/testing/merge_scripts/standard_isolated_script_merge.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/merge_scripts/standard_isolated_script_merge.py 2025-01-16 02:26:08.536097149 +0800 +@@ -26,9 +26,9 @@ + with open(summary_json) as f: + summary = json.load(f) + except (IOError, ValueError): +- print >> sys.stderr, ( ++ print(( + 'summary.json is missing or can not be read', +- 'Something is seriously wrong with swarming client or the bot.') ++ 'Something is seriously wrong with swarming client or the bot.'), file=sys.stderr) + return 1 + + missing_shards = [] +@@ -81,10 +81,10 @@ + os.path.basename(os.path.dirname(j)) == task_id))] + + if not matching_json_files: +- print >> sys.stderr, 'shard %s test output missing' % index ++ print('shard %s test output missing' % index, file=sys.stderr) + return None + elif len(matching_json_files) > 1: +- print >> sys.stderr, 'duplicate test output for shard %s' % index ++ print('duplicate test output for shard %s' % index, file=sys.stderr) + return None + + return matching_json_files[0] +--- a/src/3rdparty/chromium/testing/merge_scripts/standard_isolated_script_merge_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/merge_scripts/standard_isolated_script_merge_test.py 2025-01-16 02:26:08.536097149 +0800 +@@ -21,12 +21,12 @@ + + + TWO_COMPLETED_SHARDS = { +- u'shards': [ ++ 'shards': [ + { +- u'state': u'COMPLETED', ++ 'state': 'COMPLETED', + }, + { +- u'state': u'COMPLETED', ++ 'state': 'COMPLETED', + }, + ], + } +@@ -55,7 +55,7 @@ + + def _stage(self, summary, files): + self.summary = self._write_temp_file('summary.json', summary) +- for path, content in files.iteritems(): ++ for path, content in files.items(): + abs_path = self._write_temp_file(path, content) + self.test_files.append(abs_path) + +@@ -80,8 +80,8 @@ + + with open(output_json_file, 'r') as f: + results = json.load(f) +- self.assertEquals(results['successes'], ['fizz', 'baz', 'buzz', 'bar']) +- self.assertEquals(results['failures'], ['failing_test_one']) ++ self.assertEqual(results['successes'], ['fizz', 'baz', 'buzz', 'bar']) ++ self.assertEqual(results['failures'], ['failing_test_one']) + self.assertTrue(results['valid']) + + def test_missing_shard(self): +@@ -98,11 +98,11 @@ + + with open(output_json_file, 'r') as f: + results = json.load(f) +- self.assertEquals(results['successes'], ['fizz', 'baz']) +- self.assertEquals(results['failures'], []) ++ self.assertEqual(results['successes'], ['fizz', 'baz']) ++ self.assertEqual(results['failures'], []) + self.assertTrue(results['valid']) +- self.assertEquals(results['global_tags'], ['UNRELIABLE_RESULTS']) +- self.assertEquals(results['missing_shards'], [1]) ++ self.assertEqual(results['global_tags'], ['UNRELIABLE_RESULTS']) ++ self.assertEqual(results['missing_shards'], [1]) + + class InputParsingTest(StandardIsolatedScriptMergeTest): + def setUp(self): +@@ -141,8 +141,8 @@ + exit_code = standard_isolated_script_merge.StandardIsolatedScriptMerge( + output_json_file, self.summary, self.test_files) + +- self.assertEquals(0, exit_code) +- self.assertEquals( ++ self.assertEqual(0, exit_code) ++ self.assertEqual( + [ + [ + { +@@ -161,7 +161,7 @@ + + def test_no_jsons(self): + self._stage({ +- u'shards': [], ++ 'shards': [], + }, {}) + + json_files = [] +@@ -169,8 +169,8 @@ + exit_code = standard_isolated_script_merge.StandardIsolatedScriptMerge( + output_json_file, self.summary, json_files) + +- self.assertEquals(0, exit_code) +- self.assertEquals([[]], self.merge_test_results_args) ++ self.assertEqual(0, exit_code) ++ self.assertEqual([[]], self.merge_test_results_args) + + + class CommandLineTest(common_merge_script_tests.CommandLineTest): +--- a/src/3rdparty/chromium/testing/scripts/blink_python_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/scripts/blink_python_tests.py 2025-01-16 02:26:08.536097149 +0800 +@@ -29,7 +29,7 @@ + json.dump({ + 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and + ((rc == 0) or failures)), +- 'failures': failures.keys(), ++ 'failures': list(failures.keys()), + }, args.output) + + return rc +--- a/src/3rdparty/chromium/testing/scripts/check_static_initializers.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/scripts/check_static_initializers.py 2025-01-16 02:26:08.536097149 +0800 +@@ -121,12 +121,12 @@ + if re.match('0x[0-9a-f]+', line) and not any( + f in line for f in _MAC_SI_FILE_ALLOWLIST): + ret = 1 +- print 'Found invalid static initializer: {}'.format(line) +- print stdout ++ print('Found invalid static initializer: {}'.format(line)) ++ print(stdout) + elif si_count > FALLBACK_EXPECTED_MAC_SI_COUNT: +- print('Expected <= %d static initializers in %s, but found %d' % ++ print(('Expected <= %d static initializers in %s, but found %d' % + (FALLBACK_EXPECTED_MAC_SI_COUNT, chromium_framework_executable, +- si_count)) ++ si_count))) + ret = 1 + show_mod_init_func = os.path.join(mac_tools_path, + 'show_mod_init_func.py') +@@ -134,14 +134,14 @@ + if os.path.exists(framework_unstripped_name): + args.append(framework_unstripped_name) + else: +- print '# Warning: Falling back to potentially stripped output.' ++ print('# Warning: Falling back to potentially stripped output.') + args.append(chromium_framework_executable) + + if os.path.exists(hermetic_xcode_path): + args.extend(['--xcode-path', hermetic_xcode_path]) + + stdout = run_process(args) +- print stdout ++ print(stdout) + return ret + + +@@ -175,11 +175,11 @@ + for f in files_with_si: + if f not in allowlist[binary_name]: + ret = 1 +- print('Error: file "%s" is not expected to have static initializers in' +- ' binary "%s"') % (f, binary_name) ++ print(('Error: file "%s" is not expected to have static initializers in' ++ ' binary "%s"') % (f, binary_name)) + +- print '\n# Static initializers in %s:' % binary_name +- print stdout ++ print('\n# Static initializers in %s:' % binary_name) ++ print(stdout) + + return ret + +--- a/src/3rdparty/chromium/testing/scripts/common.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/scripts/common.py 2025-01-16 02:26:08.536097149 +0800 +@@ -74,9 +74,9 @@ + + + def run_command(argv, env=None, cwd=None): +- print 'Running %r in %r (env: %r)' % (argv, cwd, env) ++ print('Running %r in %r (env: %r)' % (argv, cwd, env)) + rc = test_env.run_command(argv, env=env, cwd=cwd) +- print 'Command %r returned exit code %d' % (argv, rc) ++ print('Command %r returned exit code %d' % (argv, rc)) + return rc + + +@@ -94,7 +94,7 @@ + def convert_trie_to_flat_paths(trie, prefix=None): + # Also see blinkpy.web_tests.layout_package.json_results_generator + result = {} +- for name, data in trie.iteritems(): ++ for name, data in trie.items(): + if prefix: + name = prefix + test_separator + name + if len(data) and not 'actual' in data and not 'expected' in data: +@@ -118,7 +118,7 @@ + passing_statuses = ('PASS', 'SLOW', 'NEEDSREBASELINE') + + for test, result in convert_trie_to_flat_paths( +- json_results['tests']).iteritems(): ++ json_results['tests']).items(): + key = 'unexpected_' if result.get('is_unexpected') else '' + data = result['actual'] + actual_results = data.split() +@@ -177,7 +177,7 @@ + mapping = {} + + for cur_iteration_data in output.get('per_iteration_data', []): +- for test_fullname, results in cur_iteration_data.iteritems(): ++ for test_fullname, results in cur_iteration_data.items(): + # Results is a list with one entry per test try. Last one is the final + # result. + last_result = results[-1] +@@ -345,13 +345,13 @@ + valid = True + try: + env['CHROME_HEADLESS'] = '1' +- print 'Running command: %s\nwith env: %r' % ( +- ' '.join(cmd), env) ++ print('Running command: %s\nwith env: %r' % ( ++ ' '.join(cmd), env)) + if self.options.xvfb: + exit_code = xvfb.run_executable(cmd, env) + else: + exit_code = test_env.run_command(cmd, env=env) +- print 'Command returned exit code %d' % exit_code ++ print('Command returned exit code %d' % exit_code) + self.do_post_test_run_tasks() + return exit_code + except Exception: +--- a/src/3rdparty/chromium/testing/scripts/headless_python_unittests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/scripts/headless_python_unittests.py 2025-01-16 02:26:08.536097149 +0800 +@@ -36,7 +36,7 @@ + json.dump({ + 'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and + ((rc == 0) or failures)), +- 'failures': failures.keys(), ++ 'failures': list(failures.keys()), + }, args.output) + + return rc +--- a/src/3rdparty/chromium/testing/scripts/host_info.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/scripts/host_info.py 2025-01-16 02:26:08.536097149 +0800 +@@ -97,7 +97,7 @@ + 'build_types': unique_build_details(2), + } + +- for k, v in parsed_details.iteritems(): ++ for k, v in parsed_details.items(): + if len(v) == 1: + results[k] = v[0] + else: +--- a/src/3rdparty/chromium/testing/scripts/run_performance_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/scripts/run_performance_tests.py 2025-01-16 02:26:08.536097149 +0800 +@@ -160,7 +160,7 @@ + + + def print_duration(step, start): +- print 'Duration of %s: %d seconds' % (step, time.time() - start) ++ print('Duration of %s: %d seconds' % (step, time.time() - start)) + + + def IsWindows(): +@@ -295,8 +295,8 @@ + return_code = test_env.run_command_output_to_handle( + command, handle, env=env) + except OSError as e: +- print('Command to run gtest perf test %s failed with an OSError: %s' % +- (output_paths.name, e)) ++ print(('Command to run gtest perf test %s failed with an OSError: %s' % ++ (output_paths.name, e))) + return_code = 1 + if (not os.path.exists(output_paths.perf_results) and + os.path.exists(output_paths.logs)): +@@ -319,8 +319,8 @@ + # pylint: enable=no-name-in-module + gtest_json_converter.ConvertGtestJsonFile(output_paths.perf_results) + else: +- print('ERROR: gtest perf test %s did not generate perf output' % +- output_paths.name) ++ print(('ERROR: gtest perf test %s did not generate perf output' % ++ output_paths.name)) + return_code = 1 + write_simple_test_results(return_code, output_paths.test_results, + output_paths.name) +@@ -460,7 +460,7 @@ + except Exception: + print ('The following exception may have prevented the code from ' + 'outputing structured test results and perf results output:') +- print traceback.format_exc() ++ print(traceback.format_exc()) + finally: + # Add ignore_errors=True because otherwise rmtree may fail due to leaky + # processes of tests are still holding opened handles to files under +@@ -475,8 +475,8 @@ + # TODO(crbug.com/1019139): Make 111 be the exit code that means + # "no stories were run.". + if return_code in (111, -1, 255): +- print ('Exit code %s indicates that no stories were run, so we are marking ' +- 'this as a success.' % return_code) ++ print(('Exit code %s indicates that no stories were run, so we are marking ' ++ 'this as a success.' % return_code)) + return 0 + if return_code: + return return_code +@@ -564,7 +564,7 @@ + if not benchmark_name: + benchmark_name = options.executable + output_paths = OutputFilePaths(isolated_out_dir, benchmark_name).SetUp() +- print('\n### {folder} ###'.format(folder=benchmark_name)) ++ print(('\n### {folder} ###'.format(folder=benchmark_name))) + overall_return_code = execute_gtest_perf_test( + command_generator, output_paths, options.xvfb) + test_results_files.append(output_paths.test_results) +@@ -577,7 +577,7 @@ + output_paths = OutputFilePaths(isolated_out_dir, benchmark).SetUp() + command_generator = TelemetryCommandGenerator( + benchmark, options) +- print('\n### {folder} ###'.format(folder=benchmark)) ++ print(('\n### {folder} ###'.format(folder=benchmark))) + return_code = execute_telemetry_benchmark( + command_generator, output_paths, options.xvfb) + overall_return_code = return_code or overall_return_code +@@ -620,14 +620,14 @@ + if 'benchmarks' in shard_configuration: + benchmarks_and_configs = shard_configuration['benchmarks'] + for (benchmark, story_selection_config +- ) in benchmarks_and_configs.iteritems(): ++ ) in benchmarks_and_configs.items(): + # Need to run the benchmark on both latest browser and reference + # build. + output_paths = OutputFilePaths(isolated_out_dir, benchmark).SetUp() + command_generator = TelemetryCommandGenerator( + benchmark, options, + story_selection_config=story_selection_config) +- print('\n### {folder} ###'.format(folder=benchmark)) ++ print(('\n### {folder} ###'.format(folder=benchmark))) + return_code = execute_telemetry_benchmark( + command_generator, output_paths, options.xvfb) + overall_return_code = return_code or overall_return_code +@@ -640,8 +640,8 @@ + benchmark, options, + story_selection_config=story_selection_config, + is_reference=True) +- print('\n### {folder} ###'.format( +- folder=reference_benchmark_foldername)) ++ print(('\n### {folder} ###'.format( ++ folder=reference_benchmark_foldername))) + # We intentionally ignore the return code and test results of the + # reference build. + execute_telemetry_benchmark( +@@ -650,7 +650,7 @@ + if 'executables' in shard_configuration: + names_and_configs = shard_configuration['executables'] + for (name, configuration +- ) in names_and_configs.iteritems(): ++ ) in names_and_configs.items(): + additional_flags = [] + if 'arguments' in configuration: + additional_flags = configuration['arguments'] +@@ -658,7 +658,7 @@ + options, override_executable=configuration['path'], + additional_flags=additional_flags, ignore_shard_env_vars=True) + output_paths = OutputFilePaths(isolated_out_dir, name).SetUp() +- print('\n### {folder} ###'.format(folder=name)) ++ print(('\n### {folder} ###'.format(folder=name))) + return_code = execute_gtest_perf_test( + command_generator, output_paths, options.xvfb) + overall_return_code = return_code or overall_return_code +--- a/src/3rdparty/chromium/testing/scripts/run_rendering_benchmark_with_gated_performance.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/scripts/run_rendering_benchmark_with_gated_performance.py 2025-01-16 02:26:08.536097149 +0800 +@@ -15,7 +15,7 @@ + use with other benchmarks. + """ + +-from __future__ import print_function ++ + + import argparse + import csv +--- a/src/3rdparty/chromium/testing/scripts/test_buildbucket_api_gpu_use_cases.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/scripts/test_buildbucket_api_gpu_use_cases.py 2025-01-16 02:26:08.536097149 +0800 +@@ -95,11 +95,11 @@ + error_msg = test() + if error_msg is not None: + result = '%s: %s' % (test_name, error_msg) +- print 'FAIL: %s' % result ++ print('FAIL: %s' % result) + failures.append(result) + + if not failures: +- print 'PASS: test_buildbucket_api_gpu_use_cases ran successfully.' ++ print('PASS: test_buildbucket_api_gpu_use_cases ran successfully.') + retval = 0 + + with open(args.isolated_script_test_output, 'w') as json_file: +--- a/src/3rdparty/chromium/testing/scripts/wpt_common.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/scripts/wpt_common.py 2025-01-16 02:26:08.536097149 +0800 +@@ -143,7 +143,7 @@ + # UnicodeDecodeErrors when writing to file. This can happen if + # the diff contains unicode characters but the file is written + # as ascii because of the default system-level encoding. +- html_diff_content = unicode(html_diff_content, 'utf-8') ++ html_diff_content = str(html_diff_content, 'utf-8') + html_diff_subpath = self._write_text_artifact( + test_failures.FILENAME_SUFFIX_HTML_DIFF, results_dir, + path_so_far, html_diff_content, extension=".html") +@@ -154,7 +154,7 @@ + if screenshot_artifact: + screenshot_paths_dict = self._write_screenshot_artifact( + results_dir, path_so_far, screenshot_artifact) +- for screenshot_key, path in screenshot_paths_dict.items(): ++ for screenshot_key, path in list(screenshot_paths_dict.items()): + root_node["artifacts"][screenshot_key] = [path] + + crashlog_artifact = root_node["artifacts"].pop("wpt_crash_log", +--- a/src/3rdparty/chromium/testing/trigger_scripts/base_test_triggerer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/trigger_scripts/base_test_triggerer.py 2025-01-16 02:26:08.536097149 +0800 +@@ -20,7 +20,7 @@ + import subprocess + import sys + import tempfile +-import urllib ++import urllib.request, urllib.parse, urllib.error + import logging + + SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath( +@@ -31,14 +31,14 @@ + def strip_unicode(obj): + """Recursively re-encodes strings as utf-8 inside |obj|. Returns the result. + """ +- if isinstance(obj, unicode): ++ if isinstance(obj, str): + return obj.encode('utf-8', 'replace') + if isinstance(obj, list): + return list(map(strip_unicode, obj)) + + if isinstance(obj, dict): + new_obj = type(obj)( +- (strip_unicode(k), strip_unicode(v)) for k, v in obj.iteritems() ) ++ (strip_unicode(k), strip_unicode(v)) for k, v in obj.items() ) + return new_obj + return obj + +@@ -78,7 +78,7 @@ + bot_args.append('GTEST_TOTAL_SHARDS') + bot_args.append(str(total_shards)) + if self._bot_configs: +- for key, val in sorted(self._bot_configs[bot_index].iteritems()): ++ for key, val in sorted(self._bot_configs[bot_index].items()): + bot_args.append('--dimension') + bot_args.append(key) + bot_args.append(val) +@@ -121,7 +121,7 @@ + try: + temp_file = self.make_temp_file(prefix='base_trigger_dimensions', + suffix='.json') +- encoded_args = urllib.urlencode(query_args) ++ encoded_args = urllib.parse.urlencode(query_args) + args =['query', + '-S', + server, +@@ -146,7 +146,7 @@ + # Query Swarming to figure out which bots are available. + for config in self._bot_configs: + values = [] +- for key, value in sorted(config.iteritems()): ++ for key, value in sorted(config.items()): + values.append(('dimensions', '%s:%s' % (key, value))) + # Ignore dead and quarantined bots. + values.append(('is_dead', 'FALSE')) +@@ -169,7 +169,7 @@ + 'Total bots: %d' % (self._total_bots)) + + def remove_swarming_dimension(self, args, dimension): +- for i in xrange(len(args)): ++ for i in range(len(args)): + if args[i] == '--dimension' and args[i+1] == dimension: + return args[:i] + args[i+3:] + return args +@@ -216,7 +216,7 @@ + def indices_to_trigger(self, args): + """Returns the indices of the swarming shards that should be triggered.""" + if args.shard_index is None: +- return range(args.shards) ++ return list(range(args.shards)) + else: + return [args.shard_index] + +@@ -244,7 +244,7 @@ + # dimensions on the command line. + filtered_remaining_args = copy.deepcopy(remaining) + for config in self._bot_configs: +- for k in config.iterkeys(): ++ for k in config.keys(): + filtered_remaining_args = self.remove_swarming_dimension( + filtered_remaining_args, k) + +@@ -276,7 +276,7 @@ + # However, reset the "tasks" entry to an empty dictionary, + # which will be handled specially. + merged_json['tasks'] = {} +- for k, v in result_json['tasks'].items(): ++ for k, v in list(result_json['tasks'].items()): + v['shard_index'] = shard_index + merged_json['tasks'][k + ':%d:%d' % (shard_index, args.shards)] = v + finally: +--- a/src/3rdparty/chromium/testing/trigger_scripts/perf_device_trigger.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/trigger_scripts/perf_device_trigger.py 2025-01-16 02:26:08.536097149 +0800 +@@ -44,7 +44,7 @@ + + """ + +-from __future__ import print_function ++ + + import argparse + import copy +@@ -53,7 +53,7 @@ + import subprocess + import sys + import tempfile +-import urllib ++import urllib.request, urllib.parse, urllib.error + import logging + + import base_test_triggerer +@@ -113,7 +113,7 @@ + self._bot_configs = [] + # For each eligible bot, append the dimension + # to the eligible bot_configs +- for _, bot in self._eligible_bots_by_ids.iteritems(): ++ for _, bot in self._eligible_bots_by_ids.items(): + self._bot_configs.append(bot.as_json_config()) + + def select_config_indices(self, args, verbose): +@@ -154,12 +154,12 @@ + existing_shard_bot_to_shard_map = copy.deepcopy(shard_to_bot_assignment_map) + # Now create sets of remaining healthy and bad bots + unallocated_healthy_bots = { +- b for b in unallocated_bots_by_ids.values() if b.is_alive()} ++ b for b in list(unallocated_bots_by_ids.values()) if b.is_alive()} + unallocated_bad_bots = { +- b for b in unallocated_bots_by_ids.values() if not b.is_alive()} ++ b for b in list(unallocated_bots_by_ids.values()) if not b.is_alive()} + + # Try assigning healthy bots for new shards first. +- for shard_index, bot in sorted(shard_to_bot_assignment_map.iteritems()): ++ for shard_index, bot in sorted(shard_to_bot_assignment_map.items()): + if not bot and unallocated_healthy_bots: + shard_to_bot_assignment_map[shard_index] = \ + unallocated_healthy_bots.pop() +@@ -169,7 +169,7 @@ + shard_to_bot_assignment_map[shard_index] = unallocated_bad_bots.pop() + + # Handle the rest of shards that were assigned dead bots: +- for shard_index, bot in sorted(shard_to_bot_assignment_map.iteritems()): ++ for shard_index, bot in sorted(shard_to_bot_assignment_map.items()): + if not bot.is_alive() and unallocated_healthy_bots: + dead_bot = bot + healthy_bot = unallocated_healthy_bots.pop() +@@ -195,7 +195,7 @@ + def _print_device_affinity_info( + self, new_map, existing_map, health_map, num_shards): + print() +- for shard_index in xrange(num_shards): ++ for shard_index in range(num_shards): + existing = existing_map.get(shard_index, None) + new = new_map.get(shard_index, None) + existing_id = '' +@@ -209,7 +209,7 @@ + + healthy_bots = [] + dead_bots = [] +- for _, b in health_map.iteritems(): ++ for _, b in health_map.items(): + if b.is_alive(): + healthy_bots.append(b.id()) + else: +@@ -230,7 +230,7 @@ + of the bots. + """ + values = [] +- for key, value in sorted(dimensions.iteritems()): ++ for key, value in sorted(dimensions.items()): + values.append(('dimensions', '%s:%s' % (key, value))) + + query_result = self.query_swarming( +@@ -257,7 +257,7 @@ + # Example: swarming.py query -S server-url.com --limit 1 \\ + # 'tasks/list?tags=os:Windows&tags=pool:chrome.tests.perf&tags=shard:12' + values = [ +- ('tags', '%s:%s' % (k, v)) for k, v in self._dimensions.iteritems() ++ ('tags', '%s:%s' % (k, v)) for k, v in self._dimensions.items() + ] + # Append the shard as a tag + values.append(('tags', '%s:%s' % ('shard', str(shard_index)))) +@@ -286,13 +286,13 @@ + + def _get_swarming_dimensions(self, args): + dimensions = {} +- for i in xrange(len(args) - 2): ++ for i in range(len(args) - 2): + if args[i] == '--dimension': + dimensions[args[i+1]] = args[i+2] + return dimensions + + def _get_swarming_server(self, args): +- for i in xrange(len(args)): ++ for i in range(len(args)): + if '--swarming' in args[i]: + server = args[i+1] + slashes_index = server.index('//') + 2 +@@ -300,7 +300,7 @@ + return server[slashes_index:] + + def _get_service_account(self, args): +- for i in xrange(len(args) - 1): ++ for i in range(len(args) - 1): + if '--auth-service-account-json' in args[i]: + return args[i+1] + +--- a/src/3rdparty/chromium/testing/trigger_scripts/perf_device_trigger_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/trigger_scripts/perf_device_trigger_unittest.py 2025-01-16 02:26:08.536097149 +0800 +@@ -98,12 +98,12 @@ + # the last build that ran the shard that corresponds to that + # index. If that shard hasn't been run before the entry + # should be an empty string. +- for i in xrange(num_shards): ++ for i in range(num_shards): + bot_id = previous_task_assignment_map.get(i) + files['base_trigger_dimensions%d.json' % file_index] = ( + self.generate_last_task_to_shard_query_response(i, bot_id)) + file_index = file_index + 1 +- for i in xrange(num_shards): ++ for i in range(num_shards): + task = { + 'base_task_name': 'webgl_conformance_tests', + 'request': { +@@ -154,7 +154,7 @@ + + def list_contains_sublist(self, main_list, sub_list): + return any(sub_list == main_list[offset:offset + len(sub_list)] +- for offset in xrange(len(main_list) - (len(sub_list) - 1))) ++ for offset in range(len(main_list) - (len(sub_list) - 1))) + + def assert_query_swarming_args(self, triggerer, num_shards): + # Assert the calls to query swarming send the right args +@@ -186,13 +186,13 @@ + dead_bots=['build1', 'build2']) + expected_task_assignment = self.get_triggered_shard_to_bot( + triggerer, num_shards=3) +- self.assertEquals(len(set(expected_task_assignment.values())), 3) ++ self.assertEqual(len(set(expected_task_assignment.values())), 3) + + # All three bots were healthy so we should expect the task assignment to + # stay the same +- self.assertEquals(expected_task_assignment.get(0), 'build3') +- self.assertEquals(expected_task_assignment.get(1), 'build4') +- self.assertEquals(expected_task_assignment.get(2), 'build5') ++ self.assertEqual(expected_task_assignment.get(0), 'build3') ++ self.assertEqual(expected_task_assignment.get(1), 'build4') ++ self.assertEqual(expected_task_assignment.get(2), 'build5') + + def test_no_bot_returned(self): + with self.assertRaises(ValueError) as context: +@@ -212,13 +212,13 @@ + dead_bots=['build1', 'build2']) + expected_task_assignment = self.get_triggered_shard_to_bot( + triggerer, num_shards=3) +- self.assertEquals(len(set(expected_task_assignment.values())), 3) ++ self.assertEqual(len(set(expected_task_assignment.values())), 3) + + # The first two should be assigned to one of the unassigned healthy bots + new_healthy_bots = ['build4', 'build5'] + self.assertIn(expected_task_assignment.get(0), new_healthy_bots) + self.assertIn(expected_task_assignment.get(1), new_healthy_bots) +- self.assertEquals(expected_task_assignment.get(2), 'build3') ++ self.assertEqual(expected_task_assignment.get(2), 'build3') + + def test_not_enough_healthy_bots(self): + triggerer = self.setup_and_trigger( +@@ -228,17 +228,17 @@ + dead_bots=['build1', 'build2']) + expected_task_assignment = self.get_triggered_shard_to_bot( + triggerer, num_shards=5) +- self.assertEquals(len(set(expected_task_assignment.values())), 5) ++ self.assertEqual(len(set(expected_task_assignment.values())), 5) + + # We have 5 shards and 5 bots that ran them, but two + # are now dead and there aren't any other healthy bots + # to swap out to. Make sure they still assign to the + # same shards. +- self.assertEquals(expected_task_assignment.get(0), 'build1') +- self.assertEquals(expected_task_assignment.get(1), 'build2') +- self.assertEquals(expected_task_assignment.get(2), 'build3') +- self.assertEquals(expected_task_assignment.get(3), 'build4') +- self.assertEquals(expected_task_assignment.get(4), 'build5') ++ self.assertEqual(expected_task_assignment.get(0), 'build1') ++ self.assertEqual(expected_task_assignment.get(1), 'build2') ++ self.assertEqual(expected_task_assignment.get(2), 'build3') ++ self.assertEqual(expected_task_assignment.get(3), 'build4') ++ self.assertEqual(expected_task_assignment.get(4), 'build5') + + def test_not_enough_healthy_bots_shard_not_seen(self): + triggerer = self.setup_and_trigger( +@@ -248,18 +248,18 @@ + dead_bots=['build1', 'build2']) + expected_task_assignment = self.get_triggered_shard_to_bot( + triggerer, num_shards=5) +- self.assertEquals(len(set(expected_task_assignment.values())), 5) ++ self.assertEqual(len(set(expected_task_assignment.values())), 5) + + # Not enough healthy bots so make sure shard 0 is still assigned to its + # same dead bot. +- self.assertEquals(expected_task_assignment.get(0), 'build1') ++ self.assertEqual(expected_task_assignment.get(0), 'build1') + # Shard 1 had not been triggered yet, but there weren't enough + # healthy bots. Make sure it got assigned to the other dead bot. +- self.assertEquals(expected_task_assignment.get(1), 'build2') ++ self.assertEqual(expected_task_assignment.get(1), 'build2') + # The rest of the assignments should stay the same. +- self.assertEquals(expected_task_assignment.get(2), 'build3') +- self.assertEquals(expected_task_assignment.get(3), 'build4') +- self.assertEquals(expected_task_assignment.get(4), 'build5') ++ self.assertEqual(expected_task_assignment.get(2), 'build3') ++ self.assertEqual(expected_task_assignment.get(3), 'build4') ++ self.assertEqual(expected_task_assignment.get(4), 'build5') + + def test_shards_not_triggered_yet(self): + # First time this configuration has been seen. Choose three +@@ -270,7 +270,7 @@ + dead_bots=['build1', 'build2']) + expected_task_assignment = self.get_triggered_shard_to_bot( + triggerer, num_shards=3) +- self.assertEquals(len(set(expected_task_assignment.values())), 3) ++ self.assertEqual(len(set(expected_task_assignment.values())), 3) + new_healthy_bots = ['build3', 'build4', 'build5'] + self.assertIn(expected_task_assignment.get(0), new_healthy_bots) + self.assertIn(expected_task_assignment.get(1), new_healthy_bots) +@@ -288,7 +288,7 @@ + # Test that the new assignment will add a new bot to avoid + # assign 'build3' to both shard 0 & shard 1 as before. + # It also replaces the dead 'build6' bot. +- self.assertEquals(set(expected_task_assignment.values()), ++ self.assertEqual(set(expected_task_assignment.values()), + {'build3', 'build4', 'build5', 'build7'}) + + +--- a/src/3rdparty/chromium/testing/variations/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/testing/variations/PRESUBMIT.py 2025-01-16 02:26:08.536097149 +0800 +@@ -77,7 +77,7 @@ + ('experiments', [])]) + for experiment in experiment_config['experiments']: + ordered_experiment = OrderedDict() +- for index in xrange(0, 10): ++ for index in range(0, 10): + comment_key = '//' + str(index) + if comment_key in experiment: + ordered_experiment[comment_key] = experiment[comment_key] +@@ -86,7 +86,7 @@ + ordered_experiment['forcing_flag'] = experiment['forcing_flag'] + if 'params' in experiment: + ordered_experiment['params'] = OrderedDict( +- sorted(experiment['params'].items(), key=lambda t: t[0])) ++ sorted(list(experiment['params'].items()), key=lambda t: t[0])) + if 'enable_features' in experiment: + ordered_experiment['enable_features'] = \ + sorted(experiment['enable_features']) +@@ -127,7 +127,7 @@ + + if not isinstance(json_data, dict): + return _CreateMessage('Expecting dict') +- for (study, experiment_configs) in json_data.iteritems(): ++ for (study, experiment_configs) in json_data.items(): + warnings = _ValidateEntry(study, experiment_configs, _CreateMessage) + if warnings: + return warnings +@@ -137,7 +137,7 @@ + + def _ValidateEntry(study, experiment_configs, create_message_fn): + """Validates one entry of the field trial configuration.""" +- if not isinstance(study, unicode): ++ if not isinstance(study, str): + return create_message_fn('Expecting keys to be string, got %s', type(study)) + if not isinstance(experiment_configs, list): + return create_message_fn('Expecting list for study %s', study) +@@ -185,7 +185,7 @@ + def _ValidateExperimentGroup(experiment_group, create_message_fn): + """Validates one group of one config in a configuration entry.""" + name = experiment_group.get('name', '') +- if not name or not isinstance(name, unicode): ++ if not name or not isinstance(name, str): + return create_message_fn('Missing valid name for experiment') + + # Add context to other messages. +@@ -197,10 +197,10 @@ + params = experiment_group['params'] + if not isinstance(params, dict): + return _CreateGroupMessage('Expected dict for params') +- for (key, value) in params.iteritems(): +- if not isinstance(key, unicode) or not isinstance(value, unicode): ++ for (key, value) in params.items(): ++ if not isinstance(key, str) or not isinstance(value, str): + return _CreateGroupMessage('Invalid param (%s: %s)', key, value) +- for key in experiment_group.keys(): ++ for key in list(experiment_group.keys()): + if key not in VALID_EXPERIMENT_KEYS: + return _CreateGroupMessage('Key[%s] is not a valid key', key) + return [] +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/__init__.py 2025-01-16 02:26:08.537180464 +0800 +@@ -30,8 +30,8 @@ + License: BSD (see LICENSE for details). + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from .__version__ import version, version_info # noqa + import codecs + import sys +@@ -126,7 +126,7 @@ + DeprecationWarning) + + # Loop through kwargs and assign defaults +- for option, default in self.option_defaults.items(): ++ for option, default in list(self.option_defaults.items()): + setattr(self, option, kwargs.get(option, default)) + + self.safeMode = kwargs.get('safe_mode', False) +@@ -364,14 +364,14 @@ + + # Split into lines and run the line preprocessors. + self.lines = source.split("\n") +- for prep in self.preprocessors.values(): ++ for prep in list(self.preprocessors.values()): + self.lines = prep.run(self.lines) + + # Parse the high-level elements. + root = self.parser.parseDocument(self.lines).getroot() + + # Run the tree-processors +- for treeprocessor in self.treeprocessors.values(): ++ for treeprocessor in list(self.treeprocessors.values()): + newRoot = treeprocessor.run(root) + if newRoot is not None: + root = newRoot +@@ -394,7 +394,7 @@ + 'tags. Document=%r' % output.strip()) + + # Run the text post-processors +- for pp in self.postprocessors.values(): ++ for pp in list(self.postprocessors.values()): + output = pp.run(output) + + return output.strip() +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/blockparser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/blockparser.py 2025-01-16 02:26:08.537180464 +0800 +@@ -1,5 +1,5 @@ +-from __future__ import unicode_literals +-from __future__ import absolute_import ++ ++ + from . import util + from . import odict + +@@ -93,7 +93,7 @@ + + """ + while blocks: +- for processor in self.blockprocessors.values(): ++ for processor in list(self.blockprocessors.values()): + if processor.test(parent, blocks[0]): + if processor.run(parent, blocks) is not False: + # run returns True or None +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/blockprocessors.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/blockprocessors.py 2025-01-16 02:26:08.537180464 +0800 +@@ -11,9 +11,9 @@ + as they need to alter how markdown blocks are parsed. + """ + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import unicode_literals ++ ++ ++ + import logging + import re + from . import util +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/inlinepatterns.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/inlinepatterns.py 2025-01-16 02:26:08.537180464 +0800 +@@ -41,19 +41,19 @@ + * finally we apply strong and emphasis + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import util + from . import odict + import re + try: # pragma: no cover + from urllib.parse import urlparse, urlunparse + except ImportError: # pragma: no cover +- from urlparse import urlparse, urlunparse ++ from urllib.parse import urlparse, urlunparse + try: # pragma: no cover + from html import entities + except ImportError: # pragma: no cover +- import htmlentitydefs as entities ++ import html.entities as entities + + + def build_inlinepatterns(md_instance, **kwargs): +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/odict.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/odict.py 2025-01-16 02:26:08.537180464 +0800 +@@ -1,5 +1,5 @@ +-from __future__ import unicode_literals +-from __future__ import absolute_import ++ ++ + from . import util + from copy import deepcopy + +@@ -33,7 +33,7 @@ + + def __deepcopy__(self, memo): + return self.__class__([(key, deepcopy(value, memo)) +- for key, value in self.items()]) ++ for key, value in list(self.items())]) + + def __copy__(self): + # The Python's default copy implementation will alter the state +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/postprocessors.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/postprocessors.py 2025-01-16 02:26:08.537180464 +0800 +@@ -8,8 +8,8 @@ + + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import util + from . import odict + import re +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/preprocessors.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/preprocessors.py 2025-01-16 02:26:08.537180464 +0800 +@@ -6,8 +6,8 @@ + complicated. + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import util + from . import odict + import re +@@ -166,7 +166,7 @@ + left_tag, left_index, ''.join(items[i:])) + right_listindex = \ + self._stringindex_to_listindex(data_index, items[i:]) + i +- if 'markdown' in attrs.keys(): ++ if 'markdown' in list(attrs.keys()): + items[i] = items[i][left_index:] # remove opening tag + placeholder = self.markdown.htmlStash.store_tag( + left_tag, attrs, i + 1, right_listindex + 1) +@@ -230,7 +230,7 @@ + + if block.rstrip().endswith(">") \ + and self._equal_tags(left_tag, right_tag): +- if self.markdown_in_raw and 'markdown' in attrs.keys(): ++ if self.markdown_in_raw and 'markdown' in list(attrs.keys()): + block = block[left_index:-len(right_tag) - 2] + new_blocks.append(self.markdown.htmlStash. + store_tag(left_tag, attrs, 0, 2)) +@@ -268,7 +268,7 @@ + text.insert(0, block[data_index:]) + + in_tag = False +- if self.markdown_in_raw and 'markdown' in attrs.keys(): ++ if self.markdown_in_raw and 'markdown' in list(attrs.keys()): + items[0] = items[0][left_index:] + items[-1] = items[-1][:-len(right_tag) - 2] + if items[len(items) - 1]: # not a newline/empty string +@@ -290,7 +290,7 @@ + items = [] + + if items: +- if self.markdown_in_raw and 'markdown' in attrs.keys(): ++ if self.markdown_in_raw and 'markdown' in list(attrs.keys()): + items[0] = items[0][left_index:] + items[-1] = items[-1][:-len(right_tag) - 2] + if items[len(items) - 1]: # not a newline/empty string +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/serializers.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/serializers.py 2025-01-16 02:26:08.537180464 +0800 +@@ -37,8 +37,8 @@ + # -------------------------------------------------------------------- + + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import util + ElementTree = util.etree.ElementTree + QName = util.etree.QName +@@ -153,7 +153,7 @@ + _serialize_html(write, e, qnames, None, format) + else: + write("<" + tag) +- items = elem.items() ++ items = list(elem.items()) + if items or namespaces: + items = sorted(items) # lexical order + for k, v in items: +@@ -169,7 +169,7 @@ + else: + write(" %s=\"%s\"" % (qnames[k], v)) + if namespaces: +- items = namespaces.items() ++ items = list(namespaces.items()) + items.sort(key=lambda x: x[1]) # sort on prefix + for v, k in items: + if k: +@@ -261,7 +261,7 @@ + add_qname(tag) + elif tag is not None and tag is not Comment and tag is not PI: + _raise_serialization_error(tag) +- for key, value in elem.items(): ++ for key, value in list(elem.items()): + if isinstance(key, QName): + key = key.text + if key not in qnames: +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/treeprocessors.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/treeprocessors.py 2025-01-16 02:26:08.537180464 +0800 +@@ -1,5 +1,5 @@ +-from __future__ import unicode_literals +-from __future__ import absolute_import ++ ++ + from . import util + from . import odict + from . import inlinepatterns +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/util.py 2025-01-16 02:26:08.537180464 +0800 +@@ -1,5 +1,5 @@ + # -*- coding: utf-8 -*- +-from __future__ import unicode_literals ++ + import re + import sys + +@@ -15,9 +15,9 @@ + text_type = str + int2str = chr + else: # pragma: no cover +- string_type = basestring # noqa +- text_type = unicode # noqa +- int2str = unichr # noqa ++ string_type = str # noqa ++ text_type = str # noqa ++ int2str = chr # noqa + + + """ +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/__init__.py 2025-01-16 02:26:08.537180464 +0800 +@@ -3,7 +3,7 @@ + ----------------------------------------------------------------------------- + """ + +-from __future__ import unicode_literals ++ + from ..util import parseBoolValue + import warnings + +@@ -36,7 +36,7 @@ + 'Python-Markdown version 2.6 for more info.', + DeprecationWarning) + # check for configs kwarg for backward compat. +- if 'configs' in kwargs.keys(): ++ if 'configs' in list(kwargs.keys()): + if kwargs['configs'] is not None: + self.setConfigs(kwargs.pop('configs', {})) + warnings.warn('Extension classes accepting a dict on the single ' +@@ -59,11 +59,11 @@ + + def getConfigs(self): + """ Return all configs settings as a dict. """ +- return dict([(key, self.getConfig(key)) for key in self.config.keys()]) ++ return dict([(key, self.getConfig(key)) for key in list(self.config.keys())]) + + def getConfigInfo(self): + """ Return all config descriptions as a list of tuples. """ +- return [(key, self.config[key][1]) for key in self.config.keys()] ++ return [(key, self.config[key][1]) for key in list(self.config.keys())] + + def setConfig(self, key, value): + """ Set a config setting for `key` with the given `value`. """ +@@ -77,7 +77,7 @@ + """ Set multiple config settings given a dict or list of tuples. """ + if hasattr(items, 'items'): + # it's a dict +- items = items.items() ++ items = list(items.items()) + for key, value in items: + self.setConfig(key, value) + +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/abbr.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/abbr.py 2025-01-16 02:26:08.537180464 +0800 +@@ -16,8 +16,8 @@ + + ''' + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..preprocessors import Preprocessor + from ..inlinepatterns import Pattern +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/admonition.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/admonition.py 2025-01-16 02:26:08.537180464 +0800 +@@ -17,8 +17,8 @@ + + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..blockprocessors import BlockProcessor + from ..util import etree +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/attr_list.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/attr_list.py 2025-01-16 02:26:08.537180464 +0800 +@@ -17,8 +17,8 @@ + + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..treeprocessors import Treeprocessor + from ..util import isBlockLevel +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/codehilite.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/codehilite.py 2025-01-16 02:26:08.538263779 +0800 +@@ -15,8 +15,8 @@ + + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..treeprocessors import Treeprocessor + +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/def_list.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/def_list.py 2025-01-16 02:26:08.538263779 +0800 +@@ -15,8 +15,8 @@ + + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..blockprocessors import BlockProcessor, ListIndentProcessor + from ..util import etree +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/extra.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/extra.py 2025-01-16 02:26:08.538263779 +0800 +@@ -29,8 +29,8 @@ + + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..blockprocessors import BlockProcessor + from .. import util +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/fenced_code.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/fenced_code.py 2025-01-16 02:26:08.538263779 +0800 +@@ -15,8 +15,8 @@ + License: [BSD](http://www.opensource.org/licenses/bsd-license.php) + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..preprocessors import Preprocessor + from .codehilite import CodeHilite, CodeHiliteExtension, parse_hl_lines +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/footnotes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/footnotes.py 2025-01-16 02:26:08.538263779 +0800 +@@ -13,8 +13,8 @@ + + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..preprocessors import Preprocessor + from ..inlinepatterns import Pattern +@@ -137,7 +137,7 @@ + etree.SubElement(div, "hr") + ol = etree.SubElement(div, "ol") + +- for id in self.footnotes.keys(): ++ for id in list(self.footnotes.keys()): + li = etree.SubElement(ol, "li") + li.set("id", self.makeFootnoteId(id)) + self.parser.parseChunk(li, self.footnotes[id]) +@@ -265,7 +265,7 @@ + + def handleMatch(self, m): + id = m.group(2) +- if id in self.footnotes.footnotes.keys(): ++ if id in list(self.footnotes.footnotes.keys()): + sup = etree.Element("sup") + a = etree.SubElement(sup, "a") + sup.set('id', self.footnotes.makeFootnoteRefId(id)) +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/headerid.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/headerid.py 2025-01-16 02:26:08.538263779 +0800 +@@ -15,8 +15,8 @@ + + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..treeprocessors import Treeprocessor + from ..util import parseBoolValue +@@ -82,7 +82,7 @@ + self.processor = HeaderIdTreeprocessor() + self.processor.md = md + self.processor.config = self.getConfigs() +- if 'attr_list' in md.treeprocessors.keys(): ++ if 'attr_list' in list(md.treeprocessors.keys()): + # insert after attr_list treeprocessor + md.treeprocessors.add('headerid', self.processor, '>attr_list') + else: +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/meta.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/meta.py 2025-01-16 02:26:08.538263779 +0800 +@@ -15,8 +15,8 @@ + + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..preprocessors import Preprocessor + import re +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/nl2br.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/nl2br.py 2025-01-16 02:26:08.538263779 +0800 +@@ -16,8 +16,8 @@ + + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..inlinepatterns import SubstituteTagPattern + +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/sane_lists.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/sane_lists.py 2025-01-16 02:26:08.538263779 +0800 +@@ -15,8 +15,8 @@ + + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..blockprocessors import OListProcessor, UListProcessor + import re +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/smart_strong.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/smart_strong.py 2025-01-16 02:26:08.538263779 +0800 +@@ -15,8 +15,8 @@ + + ''' + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..inlinepatterns import SimpleTagPattern + +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/smarty.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/smarty.py 2025-01-16 02:26:08.538263779 +0800 +@@ -81,7 +81,7 @@ + ''' + + +-from __future__ import unicode_literals ++ + from . import Extension + from ..inlinepatterns import HtmlPattern + from ..odict import OrderedDict +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/tables.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/tables.py 2025-01-16 02:26:08.538263779 +0800 +@@ -15,8 +15,8 @@ + + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..blockprocessors import BlockProcessor + from ..util import etree +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/toc.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/toc.py 2025-01-16 02:26:08.538263779 +0800 +@@ -13,8 +13,8 @@ + + """ + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..treeprocessors import Treeprocessor + from ..util import etree, parseBoolValue, AMP_SUBSTITUTE, HTML_PLACEHOLDER_RE, string_type +@@ -257,7 +257,7 @@ + + # serialize and attach to markdown instance. + toc = self.markdown.serializer(div) +- for pp in self.markdown.postprocessors.values(): ++ for pp in list(self.markdown.postprocessors.values()): + toc = pp.run(toc) + self.markdown.toc = toc + +--- a/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/wikilinks.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/Python-Markdown/markdown/extensions/wikilinks.py 2025-01-16 02:26:08.538263779 +0800 +@@ -15,8 +15,8 @@ + + ''' + +-from __future__ import absolute_import +-from __future__ import unicode_literals ++ ++ + from . import Extension + from ..inlinepatterns import Pattern + from ..util import etree +--- a/src/3rdparty/chromium/third_party/SPIRV-Tools/src/utils/check_copyright.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/SPIRV-Tools/src/utils/check_copyright.py 2025-01-16 02:26:08.538263779 +0800 +@@ -212,8 +212,8 @@ + + if args.author: + if args.author not in AUTHORS: +- print('error: --update argument must be in the AUTHORS list in ' +- 'check_copyright.py: {}'.format(AUTHORS)) ++ print(('error: --update argument must be in the AUTHORS list in ' ++ 'check_copyright.py: {}'.format(AUTHORS))) + sys.exit(1) + for pair in glob_comment_pairs: + insert_copyright(args.author, *pair) +--- a/src/3rdparty/chromium/third_party/SPIRV-Tools/src/utils/check_symbol_exports.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/SPIRV-Tools/src/utils/check_symbol_exports.py 2025-01-16 02:26:08.538263779 +0800 +@@ -71,7 +71,7 @@ + seen.add(symbol) + #print("look at '{}'".format(symbol)) + if not (symbol_allowlist_pattern.match(symbol) or symbol_ok_pattern.match(symbol)): +- print('{}: error: Unescaped exported symbol: {}'.format(PROG, symbol)) ++ print(('{}: error: Unescaped exported symbol: {}'.format(PROG, symbol))) + result = 1 + return result + +@@ -83,7 +83,7 @@ + args = parser.parse_args() + + if not os.path.isfile(args.library): +- print('{}: error: {} does not exist'.format(PROG, args.library)) ++ print(('{}: error: {} does not exist'.format(PROG, args.library))) + sys.exit(1) + + if os.name == 'posix': +--- a/src/3rdparty/chromium/third_party/SPIRV-Tools/src/utils/generate_grammar_tables.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/SPIRV-Tools/src/utils/generate_grammar_tables.py 2025-01-16 02:26:08.538263779 +0800 +@@ -447,7 +447,7 @@ + exts = [] + params = entry.get('parameters', []) + params = [p.get('kind') for p in params] +- params = zip(params, [''] * len(params)) ++ params = list(zip(params, [''] * len(params))) + version = entry.get('version', None) + max_version = entry.get('lastVersion', None) + +@@ -492,7 +492,7 @@ + for ext in exts: + if ext not in extension_map[value]: + extension_map[value].append(ext) +- synthetic_exts_list.extend(extension_map.values()) ++ synthetic_exts_list.extend(list(extension_map.values())) + + name = '{}_{}Entries'.format(PYGEN_VARIABLE_PREFIX, kind) + entries = [' {}'.format(generate_enum_operand_kind_entry(e, extension_map)) +@@ -529,14 +529,14 @@ + three_optional_enums = [e for e in enums if e[0] in three_optional_enums] + enums.extend(three_optional_enums) + +- enum_kinds, enum_names, enum_entries = zip(*enums) ++ enum_kinds, enum_names, enum_entries = list(zip(*enums)) + # Mark the last three as optional ones. + enum_quantifiers = [''] * (len(enums) - 3) + ['?'] * 3 + # And we don't want redefinition of them. + enum_entries = enum_entries[:-3] + enum_kinds = [convert_operand_kind(e) + for e in zip(enum_kinds, enum_quantifiers)] +- table_entries = zip(enum_kinds, enum_names, enum_names) ++ table_entries = list(zip(enum_kinds, enum_names, enum_names)) + table_entries = [' {{{}, ARRAY_SIZE({}), {}}}'.format(*e) + for e in table_entries] + +--- a/src/3rdparty/chromium/third_party/SPIRV-Tools/src/utils/generate_vim_syntax.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/SPIRV-Tools/src/utils/generate_vim_syntax.py 2025-01-16 02:26:08.538263779 +0800 +@@ -123,12 +123,12 @@ + + def EmitAsStatement(name): + """Emits the given name as a statement token""" +- print('syn keyword spvasmStatement', name) ++ print(('syn keyword spvasmStatement', name)) + + + def EmitAsEnumerant(name): + """Emits the given name as an named operand token""" +- print('syn keyword spvasmConstant', name) ++ print(('syn keyword spvasmConstant', name)) + + + def main(): +--- a/src/3rdparty/chromium/third_party/SPIRV-Tools/src/utils/update_build_version.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/SPIRV-Tools/src/utils/update_build_version.py 2025-01-16 02:26:08.538263779 +0800 +@@ -125,7 +125,7 @@ + + def main(): + if len(sys.argv) != 3: +- print('usage: {} '.format(sys.argv[0])) ++ print(('usage: {} '.format(sys.argv[0]))) + sys.exit(1) + + output_file = sys.argv[2] +--- a/src/3rdparty/chromium/third_party/abseil-cpp/absl/abseil.podspec.gen.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/abseil-cpp/absl/abseil.podspec.gen.py 2025-01-16 02:26:08.538263779 +0800 +@@ -199,7 +199,7 @@ + + def generate(args): + """Generates a podspec file from all BUILD files under absl directory.""" +- rules = filter(relevant_rule, collect_rules("absl")) ++ rules = list(filter(relevant_rule, collect_rules("absl"))) + with open(args.output, "wt") as f: + write_podspec(f, rules, vars(args)) + +--- a/src/3rdparty/chromium/third_party/angle/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/PRESUBMIT.py 2025-01-16 02:26:08.538263779 +0800 +@@ -240,7 +240,7 @@ + cmd = [input_api.python_executable, code_gen_path, '--verify-no-dirty'] + test_cmd = input_api.Command(name=cmd_name, cmd=cmd, kwargs={}, message=Msg) + if input_api.verbose: +- print('Running ' + cmd_name) ++ print(('Running ' + cmd_name)) + return input_api.RunTests([test_cmd]) + + +--- a/src/3rdparty/chromium/third_party/angle/scripts/apply_clang_format_on_all_sources.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/apply_clang_format_on_all_sources.py 2025-01-16 02:26:08.538263779 +0800 +@@ -9,7 +9,7 @@ + # example usage: + # ./scripts/apply_clang_format_on_all_sources.py src + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/third_party/angle/scripts/bootstrap.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/bootstrap.py 2025-01-16 02:26:08.538263779 +0800 +@@ -18,11 +18,11 @@ + try: + rc = subprocess.call(gclient_cmd, shell=True) + except OSError: +- print 'could not run "%s" via shell' % gclient_cmd ++ print('could not run "%s" via shell' % gclient_cmd) + sys.exit(1) + + if rc: +- print 'failed command: "%s"' % gclient_cmd ++ print('failed command: "%s"' % gclient_cmd) + sys.exit(1) + + with open('.gclient') as gclient_file: +@@ -35,7 +35,7 @@ + with open('.gclient', 'w') as gclient_file: + gclient_file.write(content) + +- print 'created .gclient' ++ print('created .gclient') + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/angle/scripts/file_exists.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/file_exists.py 2025-01-16 02:26:08.538263779 +0800 +@@ -5,7 +5,7 @@ + # + # Simple helper for use in 'gn' files to check if a file exists. + +-from __future__ import print_function ++ + + import os, shutil, sys + +--- a/src/3rdparty/chromium/third_party/angle/scripts/gen_angle_gn_info_json.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/gen_angle_gn_info_json.py 2025-01-16 02:26:08.538263779 +0800 +@@ -108,7 +108,7 @@ + fh.write(json.dumps(desc, indent=4, sort_keys=True)) + fh.close() + +- print("Output written to: %s" % args.output) ++ print(("Output written to: %s" % args.output)) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/angle/scripts/gen_gl_enum_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/gen_gl_enum_utils.py 2025-01-16 02:26:08.539347094 +0800 +@@ -118,9 +118,9 @@ + + def dump_value_to_string_mapping(gl_enum_in_groups, exporting_enums): + exporting_groups = list() +- for group_name, inner_mapping in gl_enum_in_groups.iteritems(): ++ for group_name, inner_mapping in gl_enum_in_groups.items(): + string_value_pairs = list( +- filter(lambda x: x[0] in exporting_enums, inner_mapping.iteritems())) ++ [x for x in iter(inner_mapping.items()) if x[0] in exporting_enums]) + if not string_value_pairs: + continue + +@@ -211,7 +211,7 @@ + script_name=os.path.basename(sys.argv[0]), + data_source_name="gl.xml and gl_angle_ext.xml", + year=date.today().year, +- gl_enum_groups=',\n'.join(sorted(gl_enum_in_groups.iterkeys()))) ++ gl_enum_groups=',\n'.join(sorted(gl_enum_in_groups.keys()))) + + header_output_path = registry_xml.script_relative(header_output_path) + with open(header_output_path, 'w') as f: +@@ -249,9 +249,9 @@ + + if len(sys.argv) > 1: + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + sys.exit( + main( +--- a/src/3rdparty/chromium/third_party/angle/scripts/gen_proc_table.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/gen_proc_table.py 2025-01-16 02:26:08.539347094 +0800 +@@ -94,9 +94,9 @@ + inputs = [source for source in registry_xml.xml_inputs] + outputs = [out_file_name_gles, out_file_name_gl] + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +@@ -116,7 +116,7 @@ + + # Also don't add GLES extension commands to libGL proc table + extension_commands = [] +- for extension_name, ext_cmd_names in sorted(glesxml.ext_data.iteritems()): ++ for extension_name, ext_cmd_names in sorted(glesxml.ext_data.items()): + extension_commands.extend(glesxml.ext_data[extension_name]) + for name in extension_commands: + name_no_suffix = name +@@ -155,7 +155,7 @@ + all_functions[function] = function + + proc_data = [(' {"%s", P(%s)}' % (func, angle_func)) +- for func, angle_func in sorted(all_functions.iteritems())] ++ for func, angle_func in sorted(all_functions.items())] + + with open(out_file_name_gles, 'w') as out_file: + output_cpp = template_cpp.format( +@@ -204,7 +204,7 @@ + all_functions[function] = function + + proc_data = [(' {"%s", P(%s)}' % (func, angle_func)) +- for func, angle_func in sorted(all_functions.iteritems())] ++ for func, angle_func in sorted(all_functions.items())] + + with open(out_file_name_gl, 'w') as out_file: + output_cpp = template_cpp.format( +--- a/src/3rdparty/chromium/third_party/angle/scripts/gen_vk_gl_cts_build.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/gen_vk_gl_cts_build.py 2025-01-16 02:26:08.539347094 +0800 +@@ -119,9 +119,9 @@ + outputs = [dataGniFilename, buildGnPath] + + if sys.argv[1] == 'inputs': +- print(','.join(inputs)) ++ print((','.join(inputs))) + elif sys.argv[1] == 'outputs': +- print(','.join(outputs)) ++ print((','.join(outputs))) + else: + print('Invalid script parameters') + return 1 +--- a/src/3rdparty/chromium/third_party/angle/scripts/generate_android_bp.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/generate_android_bp.py 2025-01-16 02:26:08.539347094 +0800 +@@ -42,7 +42,7 @@ + if isinstance(value, list): + return len(value) > 0 + if isinstance(value, dict): +- for (item, item_value) in value.items(): ++ for (item, item_value) in list(value.items()): + if has_child_values(item_value): + return True + return False +@@ -68,7 +68,7 @@ + if not value: + return + output.append(tabs(indent) + '%s: {' % name) +- for (item, item_value) in value.items(): ++ for (item, item_value) in list(value.items()): + write_blueprint_key_value(output, item, item_value, indent + 1) + output.append(tabs(indent) + '},') + return +@@ -80,7 +80,7 @@ + + def write_blueprint(output, target_type, values): + output.append('%s {' % target_type) +- for (key, value) in values.items(): ++ for (key, value) in list(values.items()): + write_blueprint_key_value(output, key, value) + output.append('}') + +@@ -288,20 +288,20 @@ + if key == 'defaults': + # arch-specific defaults are not supported + break +- value_in_all_abis = value_in_all_abis and (key in bps_for_abis[abi2].keys( +- )) and (value in bps_for_abis[abi2][key]) ++ value_in_all_abis = value_in_all_abis and (key in list(bps_for_abis[abi2].keys( ++ ))) and (value in bps_for_abis[abi2][key]) + if value_in_all_abis: +- if key in common_bp.keys(): ++ if key in list(common_bp.keys()): + common_bp[key].append(value) + else: + common_bp[key] = [value] + else: +- if 'arch' not in common_bp.keys(): ++ if 'arch' not in list(common_bp.keys()): + # Make sure there is an 'arch' entry to hold ABI-specific values + common_bp['arch'] = {} + for abi3 in abi_targets: + common_bp['arch'][abi3] = {} +- if key in common_bp['arch'][abi].keys(): ++ if key in list(common_bp['arch'][abi].keys()): + common_bp['arch'][abi][key].append(value) + else: + common_bp['arch'][abi][key] = [value] +@@ -316,7 +316,7 @@ + bps_for_abis = {} + blueprint_type = "" + for abi in abi_targets: +- if target not in build_info[abi].keys(): ++ if target not in list(build_info[abi].keys()): + bps_for_abis[abi] = {} + continue + +@@ -423,7 +423,7 @@ + bp_outputs = [] + for gn_output in target_info['outputs']: + output = os.path.basename(gn_output) +- if output in outputs_remap.keys(): ++ if output in list(outputs_remap.keys()): + output = outputs_remap[output] + bp_outputs.append(output) + +@@ -567,7 +567,7 @@ + for (blueprint_type, blueprint_data) in blueprint_targets: + write_blueprint(output, blueprint_type, blueprint_data) + +- print('\n'.join(output)) ++ print(('\n'.join(output))) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/angle/scripts/generate_entry_points.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/generate_entry_points.py 2025-01-16 02:26:08.539347094 +0800 +@@ -738,7 +738,7 @@ + + def just_the_type_packed(param, entry): + name = just_the_name(param) +- if entry.has_key(name): ++ if name in entry: + return entry[name] + else: + return just_the_type(param) +@@ -1239,7 +1239,7 @@ + + + def write_context_api_decls(template, decls, api): +- for ver in decls['core'].keys(): ++ for ver in list(decls['core'].keys()): + interface_lines = [] + + for i in decls['core'][ver]: +@@ -1263,9 +1263,9 @@ + out.write(content) + out.close() + +- if 'exts' in decls.keys(): ++ if 'exts' in list(decls.keys()): + interface_lines = [] +- for annotation in decls['exts'].keys(): ++ for annotation in list(decls['exts'].keys()): + interface_lines.append("\\\n /* " + annotation + " */ \\\n\\") + + for extname in sorted(decls['exts'][annotation].keys()): +@@ -1437,7 +1437,7 @@ + + + def get_resource_id_types(all_param_types): +- return [t[:-2] for t in filter(lambda t: t.endswith("ID"), all_param_types)] ++ return [t[:-2] for t in [t for t in all_param_types if t.endswith("ID")]] + + + def format_resource_id_types(all_param_types): +@@ -1499,9 +1499,7 @@ + + + def format_param_type_resource_id_cases(all_param_types): +- id_types = filter( +- lambda t: t.endswith("ID") or t.endswith("IDConstPointer") or t.endswith("IDPointer"), +- all_param_types) ++ id_types = [t for t in all_param_types if t.endswith("ID") or t.endswith("IDConstPointer") or t.endswith("IDPointer")] + return "\n".join([format_param_type_to_resource_id_type_case(t) for t in id_types]) + + +@@ -1672,7 +1670,7 @@ + + egl.AddExtensionCommands(registry_xml.supported_egl_extensions, ['egl']) + +- for extension_name, ext_cmd_names in sorted(egl.ext_data.iteritems()): ++ for extension_name, ext_cmd_names in sorted(egl.ext_data.items()): + + if len(ext_cmd_names) == 0: + continue +@@ -1815,9 +1813,9 @@ + ] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +@@ -1927,7 +1925,7 @@ + + xml.AddExtensionCommands(registry_xml.supported_extensions, ['gles2', 'gles1']) + +- for extension_name, ext_cmd_names in sorted(xml.ext_data.iteritems()): ++ for extension_name, ext_cmd_names in sorted(xml.ext_data.items()): + extension_commands.extend(xml.ext_data[extension_name]) + + # Detect and filter duplicate extensions. +--- a/src/3rdparty/chromium/third_party/angle/scripts/generate_loader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/generate_loader.py 2025-01-16 02:26:08.539347094 +0800 +@@ -290,9 +290,9 @@ + ] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +--- a/src/3rdparty/chromium/third_party/angle/scripts/generate_new_renderer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/generate_new_renderer.py 2025-01-16 02:26:08.539347094 +0800 +@@ -18,7 +18,7 @@ + import os, sys, re, string, datetime + + if len(sys.argv) < 3: +- print('Usage: ' + sys.argv[0] + ' ') ++ print(('Usage: ' + sys.argv[0] + ' ')) + sys.exit(1) + + renderer_name = sys.argv[1] +@@ -283,5 +283,5 @@ + print("Generated files:") + for impl_class in impl_classes: + path = "libANGLE/renderer/" + renderer_name + "/" + impl_class + renderer_suffix +- print('\'' + path + ".cpp\',") +- print('\'' + path + ".h\',") ++ print(('\'' + path + ".cpp\',")) ++ print(('\'' + path + ".h\',")) +--- a/src/3rdparty/chromium/third_party/angle/scripts/generate_stats.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/generate_stats.py 2025-01-16 02:26:08.539347094 +0800 +@@ -110,7 +110,7 @@ + import re + import subprocess + import sys +-import urllib ++import urllib.request, urllib.parse, urllib.error + from google.auth.transport.requests import Request + from googleapiclient.discovery import build + from google_auth_oauthlib.flow import InstalledAppFlow +@@ -179,7 +179,7 @@ + if 'build_name' not in info: + info['build_name'] = line.strip().split("'")[1] + # Remove the bot name and prepend the build link +- info['build_link'] = BUILD_LINK_PREFIX + urllib.quote( ++ info['build_link'] = BUILD_LINK_PREFIX + urllib.parse.quote( + info['build_name'].split(BOT_NAME_PREFIX)[1]) + if 'Created' in line: + # Example output of line with 'Created': +@@ -194,13 +194,13 @@ + # ... + # "parent_got_angle_revision": "8cbd321cafa92ffbf0495e6d0aeb9e1a97940fee", + # ... +- info['angle_revision'] = filter(str.isalnum, line.split(':')[1]) ++ info['angle_revision'] = list(filter(str.isalnum, line.split(':')[1])) + if '"revision"' in line: + # Example output of line with chromium revision: + # ... + # "revision": "3b68405a27f1f9590f83ae07757589dba862f141", + # ... +- info['revision'] = filter(str.isalnum, line.split(':')[1]) ++ info['revision'] = list(filter(str.isalnum, line.split(':')[1])) + if 'build_name' not in info: + raise ValueError("Could not find build_name from bot '" + bot_name + "'") + return info +@@ -490,7 +490,7 @@ + header_ranges = [sheet_name + '!A1:Z' for sheet_name in sheet_names] + LOGGER.debug("Called [spreadsheets.values().batchGet(spreadsheetId='" + spreadsheet_id + + ', ranges=' + str(header_ranges) + "')]") +- request = service.values().batchGet(spreadsheetId=spreadsheet_id, ranges=header_ranges) ++ request = list(service.values()).batchGet(spreadsheetId=spreadsheet_id, ranges=header_ranges) + response = request.execute() + headers = {} + for k, sheet_name in enumerate(sheet_names): +@@ -511,7 +511,7 @@ + } + LOGGER.debug("Called [spreadsheets.values().batchUpdate(spreadsheetId='" + spreadsheet_id + + "', body=" + str(batch_update_values_request_body) + ')]') +- request = service.values().batchUpdate( ++ request = list(service.values()).batchUpdate( + spreadsheetId=spreadsheet_id, body=batch_update_values_request_body) + request.execute() + +@@ -576,7 +576,7 @@ + headers_stale = True + headers[sheet_name].append(req) + # Headers also must contain all the keys seen in this step +- for key in info[bot_name][step_name].keys(): ++ for key in list(info[bot_name][step_name].keys()): + if key not in headers[sheet_name]: + headers_stale = True + headers[sheet_name].append(key) +@@ -607,7 +607,7 @@ + "', body=" + str(append_values_request_body) + ", range='" + header_range + + "', insertDataOption='" + insert_data_option + "', valueInputOption='" + + value_input_option + "')]") +- request = service.values().append( ++ request = list(service.values()).append( + spreadsheetId=spreadsheet_id, + body=append_values_request_body, + range=header_range, +--- a/src/3rdparty/chromium/third_party/angle/scripts/msvs_projects.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/msvs_projects.py 2025-01-16 02:26:08.539347094 +0800 +@@ -26,7 +26,7 @@ + # Generate the VS solutions for any valid directory. + def generate_projects(dirname): + args = ['gn.bat', 'gen', dirname, '--ide=' + target_ide, '--sln=' + solution_name] +- print('Running "' + ' '.join(args) + '"') ++ print(('Running "' + ' '.join(args) + '"')) + subprocess.call(args) + + +@@ -38,5 +38,5 @@ + + # Run the helper utility that merges the projects. + args = ['python', os.path.join('build', 'win', 'gn_meta_sln.py')] +-print('Running "' + ' '.join(args) + '"') ++print(('Running "' + ' '.join(args) + '"')) + subprocess.call(args) +--- a/src/3rdparty/chromium/third_party/angle/scripts/perf_test_runner.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/perf_test_runner.py 2025-01-16 02:26:08.539347094 +0800 +@@ -89,7 +89,7 @@ + perftests_path = newest_binary + + if perftests_path == None or not os.path.exists(perftests_path): +- print('Cannot find Release %s!' % binary_name) ++ print(('Cannot find Release %s!' % binary_name)) + sys.exit(1) + + if sys.platform == 'win32': +@@ -100,8 +100,8 @@ + if len(sys.argv) >= 2: + test_name = sys.argv[1] + +-print('Using test executable: ' + perftests_path) +-print('Test name: ' + test_name) ++print(('Using test executable: ' + perftests_path)) ++print(('Test name: ' + test_name)) + + + def get_results(metric, extra_args=[]): +@@ -122,7 +122,7 @@ + pattern = r'\.' + metric + r':.*= ([0-9.]+)' + m = re.findall(pattern, output) + if not m: +- print("Did not find the metric '%s' in the test output:" % metric) ++ print(("Did not find the metric '%s' in the test output:" % metric)) + print(output) + sys.exit(1) + +@@ -131,7 +131,7 @@ + + # Calibrate the number of steps + steps = get_results("steps", ["--calibration"])[0] +-print("running with %d steps." % steps) ++print(("running with %d steps." % steps)) + + # Loop 'max_experiments' times, running the tests. + for experiment in range(max_experiments): +--- a/src/3rdparty/chromium/third_party/angle/scripts/registry_xml.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/registry_xml.py 2025-01-16 02:26:08.540430409 +0800 +@@ -216,7 +216,7 @@ + def get_all_commands(self): + cmd_names = [] + # Combine all the version lists into a single list +- for version, version_cmd_names in sorted(self.command_names.iteritems()): ++ for version, version_cmd_names in sorted(self.command_names.items()): + cmd_names += version_cmd_names + + return cmd_names +@@ -314,7 +314,7 @@ + + self.ext_data[extension_name] = sorted(ext_cmd_names) + +- for extension_name, ext_cmd_names in sorted(self.ext_data.iteritems()): ++ for extension_name, ext_cmd_names in sorted(self.ext_data.items()): + + # Detect and filter duplicate extensions. + dupes = [] +--- a/src/3rdparty/chromium/third_party/angle/scripts/remove_files.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/remove_files.py 2025-01-16 02:26:08.540430409 +0800 +@@ -14,7 +14,7 @@ + import sys + + if len(sys.argv) < 3: +- print("Usage: " + sys.argv[0] + " ") ++ print(("Usage: " + sys.argv[0] + " ")) + + stamp_file = sys.argv[1] + +--- a/src/3rdparty/chromium/third_party/angle/scripts/roll_chromium_deps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/roll_chromium_deps.py 2025-01-16 02:26:08.540430409 +0800 +@@ -21,7 +21,7 @@ + import re + import subprocess + import sys +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + + def FindSrcDirPath(): +@@ -147,7 +147,7 @@ + logging.debug('CMD: %s CWD: %s', ' '.join(command), working_dir) + env = os.environ.copy() + if extra_env: +- assert all(isinstance(value, str) for value in extra_env.values()) ++ assert all(isinstance(value, str) for value in list(extra_env.values())) + logging.debug('extra env: %s', extra_env) + env.update(extra_env) + p = subprocess.Popen( +@@ -209,7 +209,7 @@ + + def ReadUrlContent(url): + """Connect to a remote host and read the contents. Returns a list of lines.""" +- conn = urllib2.urlopen(url) ++ conn = urllib.request.urlopen(url) + try: + return conn.readlines() + except IOError as e: +@@ -232,7 +232,7 @@ + A list of DepsEntry objects. + """ + result = [] +- for path, depsentry in depsentry_dict.iteritems(): ++ for path, depsentry in depsentry_dict.items(): + if path == dir_path: + result.append(depsentry) + else: +@@ -247,7 +247,7 @@ + result = {} + + def AddDepsEntries(deps_subdict): +- for path, dep in deps_subdict.iteritems(): ++ for path, dep in deps_subdict.items(): + if path in result: + continue + if not isinstance(dep, dict): +@@ -311,7 +311,7 @@ + result = [] + angle_entries = BuildDepsentryDict(angle_deps) + new_cr_entries = BuildDepsentryDict(new_cr_deps) +- for path, angle_deps_entry in angle_entries.iteritems(): ++ for path, angle_deps_entry in angle_entries.items(): + if path not in ANGLE_CHROMIUM_DEPS: + continue + +--- a/src/3rdparty/chromium/third_party/angle/scripts/run_code_generation.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/run_code_generation.py 2025-01-16 02:26:08.540430409 +0800 +@@ -145,26 +145,26 @@ + + for fname in filenames: + if not os.path.isfile(fname): +- print('File not found: "%s". Code gen dirty for %s' % (fname, name)) ++ print(('File not found: "%s". Code gen dirty for %s' % (fname, name))) + found_dirty_hash = True + else: + new_hashes[fname] = md5(fname) + if (not fname in old_hashes) or (old_hashes[fname] != new_hashes[fname]): +- print('Hash for "%s" dirty for %s generator.' % (fname, name)) ++ print(('Hash for "%s" dirty for %s generator.' % (fname, name))) + found_dirty_hash = True + return found_dirty_hash + + + def any_old_hash_missing(all_new_hashes, all_old_hashes): + result = False +- for file, old_hashes in all_old_hashes.iteritems(): ++ for file, old_hashes in all_old_hashes.items(): + if file not in all_new_hashes: +- print('"%s" does not exist. Code gen dirty.' % file) ++ print(('"%s" does not exist. Code gen dirty.' % file)) + result = True + else: +- for name, _ in old_hashes.iteritems(): ++ for name, _ in old_hashes.items(): + if name not in all_new_hashes[file]: +- print('Hash for %s is missing from "%s". Code gen is dirty.' % (name, file)) ++ print(('Hash for %s is missing from "%s". Code gen is dirty.' % (name, file))) + result = True + return result + +@@ -172,7 +172,7 @@ + def update_output_hashes(script, outputs, new_hashes): + for output in outputs: + if not os.path.isfile(output): +- print('Output is missing from %s: %s' % (script, output)) ++ print(('Output is missing from %s: %s' % (script, output))) + sys.exit(1) + new_hashes[output] = md5(output) + +@@ -197,7 +197,7 @@ + if len(sys.argv) > 1 and sys.argv[1] == '--verify-no-dirty': + verify_only = True + +- for name, script in sorted(generators.iteritems()): ++ for name, script in sorted(generators.items()): + info = auto_script(script) + fname = get_hash_file_name(name) + filenames = info['inputs'] + info['outputs'] + [script] +@@ -211,7 +211,7 @@ + # Set the CWD to the script directory. + os.chdir(get_child_script_dirname(script)) + +- print('Running ' + name + ' code generator') ++ print(('Running ' + name + ' code generator')) + + f = open(os.path.basename(script), "r") + if subprocess.call([get_executable_name(f.readline()), +@@ -236,14 +236,14 @@ + sys.exit(1) + + # Update the output hashes again since they can be formatted. +- for name, script in sorted(generators.iteritems()): ++ for name, script in sorted(generators.items()): + info = auto_script(script) + fname = get_hash_file_name(name) + update_output_hashes(name, info['outputs'], all_new_hashes[fname]) + + os.chdir(script_dir) + +- for fname, new_hashes in all_new_hashes.iteritems(): ++ for fname, new_hashes in all_new_hashes.items(): + hash_fname = os.path.join(hash_dir, fname) + json.dump( + new_hashes, +--- a/src/3rdparty/chromium/third_party/angle/scripts/trigger.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/trigger.py 2025-01-16 02:26:08.540430409 +0800 +@@ -49,7 +49,7 @@ + with open(isolated_file, 'rb') as f: + sha = hashlib.sha1(f.read()).hexdigest() + +- print('Got an isolated SHA of %s' % sha) ++ print(('Got an isolated SHA of %s' % sha)) + swarming_script_path = os.path.join('tools', 'luci-go', 'swarming') + + swarming_args = [ +@@ -78,7 +78,7 @@ + if unknown: + shard_args += ["--"] + unknown + +- print(' '.join(shard_args)) ++ print((' '.join(shard_args))) + subprocess.call(shard_args) + return 0 + +--- a/src/3rdparty/chromium/third_party/angle/scripts/update_canary_angle.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/scripts/update_canary_angle.py 2025-01-16 02:26:08.540430409 +0800 +@@ -51,7 +51,7 @@ + + dest_folder = os.path.join(chrome_folder, sorted_chrome_bins[0]) + +-print('Copying DLLs from ' + source_folder + ' to ' + dest_folder + '.') ++print(('Copying DLLs from ' + source_folder + ' to ' + dest_folder + '.')) + + for dll in ['libGLESv2.dll', 'libEGL.dll']: + src = os.path.join(source_folder, dll) +--- a/src/3rdparty/chromium/third_party/angle/src/commit_id.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/commit_id.py 2025-01-16 02:26:08.540430409 +0800 +@@ -42,7 +42,7 @@ + sys.exit(0) + elif operation == 'position': + if git_dir_exists: +- print(get_commit_position(cwd)) ++ print((get_commit_position(cwd))) + else: + print("0") + sys.exit(0) +--- a/src/3rdparty/chromium/third_party/angle/src/common/Float16ToFloat32.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/common/Float16ToFloat32.py 2025-01-16 02:26:08.540430409 +0800 +@@ -48,7 +48,7 @@ + return 1024 + + +-print """// ++print("""// + // Copyright 2012 The ANGLE Project Authors. All rights reserved. + // Use of this source code is governed by a BSD-style license that can be + // found in the LICENSE file. +@@ -58,27 +58,27 @@ + + namespace gl + { +-""" ++""") + +-print "const static unsigned g_mantissa[2048] = {" ++print("const static unsigned g_mantissa[2048] = {") + for i in range(0, 2048): +- print " %#010x," % convertMantissa(i) +-print "};\n" ++ print(" %#010x," % convertMantissa(i)) ++print("};\n") + +-print "const static unsigned g_exponent[64] = {" ++print("const static unsigned g_exponent[64] = {") + for i in range(0, 64): +- print " %#010x," % convertExponent(i) +-print "};\n" ++ print(" %#010x," % convertExponent(i)) ++print("};\n") + +-print "const static unsigned g_offset[64] = {" ++print("const static unsigned g_offset[64] = {") + for i in range(0, 64): +- print " %#010x," % convertOffset(i) +-print "};\n" ++ print(" %#010x," % convertOffset(i)) ++print("};\n") + +-print """float float16ToFloat32(unsigned short h) ++print("""float float16ToFloat32(unsigned short h) + { + unsigned i32 = g_mantissa[g_offset[h >> 10] + (h & 0x3ff)] + g_exponent[h >> 10]; + return bitCast(i32); + } + } +-""" ++""") +--- a/src/3rdparty/chromium/third_party/angle/src/common/gen_packed_gl_enums.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/common/gen_packed_gl_enums.py 2025-01-16 02:26:08.540430409 +0800 +@@ -34,12 +34,12 @@ + enums_dict = json.loads(map_file.read(), object_pairs_hook=OrderedDict) + + enums = [] +- for (enum_name, value_list) in enums_dict.iteritems(): ++ for (enum_name, value_list) in enums_dict.items(): + + values = [] + i = 0 + +- for (value_name, value_gl_name) in value_list.iteritems(): ++ for (value_name, value_gl_name) in value_list.items(): + values.append(EnumValue(value_name, value_gl_name, i)) + i += 1 + +@@ -245,9 +245,9 @@ + ] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +--- a/src/3rdparty/chromium/third_party/angle/src/common/gen_uniform_type_table.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/common/gen_uniform_type_table.py 2025-01-16 02:26:08.540430409 +0800 +@@ -124,7 +124,7 @@ + + + def get_texture_type(uniform_type): +- for sampler_type, tex_type in texture_types.items(): ++ for sampler_type, tex_type in list(texture_types.items()): + if uniform_type.endswith(sampler_type): + return "GL_TEXTURE_" + tex_type + return "GL_NONE" +@@ -271,9 +271,9 @@ + outputs = ['uniform_type_info_autogen.cpp'] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +--- a/src/3rdparty/chromium/third_party/angle/src/compiler/generate_parser_tools.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/compiler/generate_parser_tools.py 2025-01-16 02:26:08.540430409 +0800 +@@ -140,20 +140,20 @@ + if current_file.endswith('.pyc'): + current_file = current_file[:-1] + inputs += [current_file] +- print(','.join(inputs)) ++ print((','.join(inputs))) + if sys.argv[1] == 'outputs': +- print(','.join(get_output_files(basename, generate_header))) ++ print((','.join(get_output_files(basename, generate_header)))) + return 0 + + # Call flex and bison to generate the lexer and parser. + flex_result = run_flex(basename) + if flex_result != 0: +- print 'Failed to run flex. Error ' + str(flex_result) ++ print('Failed to run flex. Error ' + str(flex_result)) + return 1 + + bison_result = run_bison(basename, generate_header) + if bison_result != 0: +- print 'Failed to run bison. Error ' + str(bison_result) ++ print('Failed to run bison. Error ' + str(bison_result)) + return 2 + + return 0 +--- a/src/3rdparty/chromium/third_party/angle/src/compiler/translator/gen_builtin_symbols.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/compiler/translator/gen_builtin_symbols.py 2025-01-16 02:26:08.540430409 +0800 +@@ -802,7 +802,7 @@ + class TType: + + def __init__(self, glsl_header_type): +- if isinstance(glsl_header_type, basestring): ++ if isinstance(glsl_header_type, str): + self.data = self.parse_type(glsl_header_type) + else: + self.data = glsl_header_type +@@ -1108,16 +1108,16 @@ + parameters = get_parameters(function_props) + mangled_names.append(get_function_mangled_name(function_name, parameters)) + if 'subgroups' in group: +- for subgroup_name, subgroup in group['subgroups'].iteritems(): ++ for subgroup_name, subgroup in group['subgroups'].items(): + get_function_names(subgroup, mangled_names, unmangled_names) + + + def get_variable_names(group, mangled_names): + if 'variables' in group: +- for variable_name, props in group['variables'].iteritems(): ++ for variable_name, props in group['variables'].items(): + mangled_names.append(variable_name) + if 'subgroups' in group: +- for subgroup_name, subgroup in group['subgroups'].iteritems(): ++ for subgroup_name, subgroup in group['subgroups'].items(): + get_variable_names(subgroup, mangled_names) + + +@@ -1289,9 +1289,9 @@ + return function_variants + + # If we have a normal gentype then we're generating variants for different sizes of vectors. +- sizes = range(1, 5) ++ sizes = list(range(1, 5)) + if 'vec' in gen_type: +- sizes = range(2, 5) ++ sizes = list(range(2, 5)) + for size in sizes: + variant_props = function_props.copy() + variant_parameters = [] +@@ -1457,7 +1457,7 @@ + unmangled_script_generated_hash_tests, mangled_builtins) + + if 'subgroups' in group: +- for subgroup_name, subgroup in group['subgroups'].iteritems(): ++ for subgroup_name, subgroup in group['subgroups'].items(): + process_function_group( + group_name + subgroup_name, subgroup, parameter_declarations, name_declarations, + unmangled_function_if_statements, defined_function_variants, +@@ -1481,7 +1481,7 @@ + parameter_variable_name_replacements = {} + used_param_variable_names = set() + for param_variable_name, param_declaration in sorted( +- parameter_declarations.iteritems(), key=lambda item: -len(item[0])): ++ iter(parameter_declarations.items()), key=lambda item: -len(item[0])): + replaced = False + for used in used_param_variable_names: + if used.startswith(param_variable_name): +@@ -1491,13 +1491,13 @@ + if not replaced: + used_param_variable_names.add(param_variable_name) + +- for i in xrange(len(function_declarations)): +- for replaced, replacement in parameter_variable_name_replacements.iteritems(): ++ for i in range(len(function_declarations)): ++ for replaced, replacement in parameter_variable_name_replacements.items(): + function_declarations[i] = function_declarations[i].replace( + 'BuiltInParameters::' + replaced + ',', 'BuiltInParameters::' + replacement + ',') + + return [ +- value for key, value in parameter_declarations.iteritems() ++ value for key, value in parameter_declarations.items() + if key in used_param_variable_names + ] + +@@ -1510,7 +1510,7 @@ + global id_counter + if 'variables' not in group: + return +- for variable_name, props in group['variables'].iteritems(): ++ for variable_name, props in group['variables'].items(): + essl_level = props['essl_level'] if 'essl_level' in props else None + glsl_level = props['glsl_level'] if 'glsl_level' in props else None + template_args = { +@@ -1559,7 +1559,7 @@ + template_args['fields'] = 'fields_{name_with_suffix}'.format(**template_args) + init_member_variables.append( + ' TFieldList *{fields} = new TFieldList();'.format(**template_args)) +- for field_name, field_type in props['fields'].iteritems(): ++ for field_name, field_type in props['fields'].items(): + template_args['field_name'] = field_name + template_args['field_type'] = TType(field_type).get_dynamic_type_string() + template_name_declaration = 'constexpr const ImmutableString {field_name}("{field_name}");' +@@ -1671,7 +1671,7 @@ + get_variable_definitions, script_generated_hash_tests) + + if 'subgroups' in group: +- for subgroup_name, subgroup in group['subgroups'].iteritems(): ++ for subgroup_name, subgroup in group['subgroups'].items(): + process_variable_group( + shader_type, subgroup_name, subgroup, builtin_id_declarations, + builtin_id_definitions, name_declarations, init_member_variables, +@@ -1746,15 +1746,15 @@ + # This script uses a perfect hash function to avoid dealing with collisions + mangled_names = [] + unmangled_names = [] +- for group_name, group in parsed_functions.iteritems(): ++ for group_name, group in parsed_functions.items(): + get_function_names(group, mangled_names, unmangled_names) +- for group_name, group in parsed_variables.iteritems(): ++ for group_name, group in parsed_variables.items(): + get_variable_names(group, mangled_names) + + # Hashing mangled names + mangled_names = list(dict.fromkeys(mangled_names)) + num_mangled_names = len(mangled_names) +- mangled_names_dict = dict(zip(mangled_names, range(0, len(mangled_names)))) ++ mangled_names_dict = dict(list(zip(mangled_names, list(range(0, len(mangled_names)))))) + # Generate the perfect hash function + f1, f2, mangled_G = generate_hash(mangled_names_dict, Hash2) + mangled_hashfn = HashFunction(f1, f2, mangled_G) +@@ -1766,7 +1766,7 @@ + # Hashing unmangled names + unmangled_names = list(dict.fromkeys(unmangled_names)) + num_unmangled_names = len(unmangled_names) +- unmangled_names_dict = dict(zip(unmangled_names, range(0, len(unmangled_names)))) ++ unmangled_names_dict = dict(list(zip(unmangled_names, list(range(0, len(unmangled_names)))))) + # Generate the perfect hash function + f1, f2, unmangled_G = generate_hash(unmangled_names_dict, Hash2) + unmangled_hashfn = HashFunction(f1, f2, unmangled_G) +@@ -1775,7 +1775,7 @@ + # Array for querying unmangled builtins + unmangled_function_if_statements = UnmangledGroupedList(unmangled_hashfn, num_unmangled_names) + +- for group_name, group in parsed_functions.iteritems(): ++ for group_name, group in parsed_functions.items(): + process_function_group( + group_name, group, parameter_declarations, name_declarations, + unmangled_function_if_statements, defined_function_variants, builtin_id_declarations, +@@ -1785,7 +1785,7 @@ + + parameter_declarations = prune_parameters_arrays(parameter_declarations, function_declarations) + +- for group_name, group in parsed_variables.iteritems(): ++ for group_name, group in parsed_variables.items(): + process_variable_group('NONE', group_name, group, builtin_id_declarations, + builtin_id_definitions, name_declarations, init_member_variables, + get_variable_declarations, mangled_builtins, +@@ -1846,9 +1846,9 @@ + 'num_mangled_names': + num_mangled_names, + 'script_generated_hash_tests': +- '\n'.join(script_generated_hash_tests.iterkeys()), ++ '\n'.join(iter(script_generated_hash_tests.keys())), + 'unmangled_script_generated_hash_tests': +- '\n'.join(unmangled_script_generated_hash_tests.iterkeys()), ++ '\n'.join(iter(unmangled_script_generated_hash_tests.keys())), + 'mangled_S1': + str(mangled_S1).replace('[', ' ').replace(']', ' '), + 'mangled_S2': +@@ -1939,9 +1939,9 @@ + ] + + if args.auto_script_command == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif args.auto_script_command == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +--- a/src/3rdparty/chromium/third_party/angle/src/compiler/translator/gen_emulated_builtin_function_tables.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/compiler/translator/gen_emulated_builtin_function_tables.py 2025-01-16 02:26:08.540430409 +0800 +@@ -123,9 +123,9 @@ + outputs = [hlsl_fname] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/gen_copy_conversion_table.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/gen_copy_conversion_table.py 2025-01-16 02:26:08.540430409 +0800 +@@ -78,9 +78,9 @@ + outputs = [out_file_name] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +@@ -90,7 +90,7 @@ + + format_map = {} + +- for description, data in json_data.iteritems(): ++ for description, data in json_data.items(): + for texture_format, framebuffer_format in data: + if texture_format not in format_map: + format_map[texture_format] = [] +@@ -98,7 +98,7 @@ + + texture_format_cases = "" + +- for texture_format, framebuffer_formats in sorted(format_map.iteritems()): ++ for texture_format, framebuffer_formats in sorted(format_map.items()): + texture_format_cases += parse_texture_format_case(texture_format, framebuffer_formats) + + with open(out_file_name, 'wt') as out_file: +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/gen_format_map.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/gen_format_map.py 2025-01-16 02:26:08.540430409 +0800 +@@ -118,7 +118,7 @@ + + def parse_format_case(format, type_map): + type_cases = "" +- for type, internal_format in sorted(type_map.iteritems()): ++ for type, internal_format in sorted(type_map.items()): + type_cases += parse_type_case(type, internal_format) + return template_format_case.format(format=format, type_cases=type_cases) + +@@ -133,9 +133,9 @@ + outputs = ['format_map_autogen.cpp'] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +@@ -147,12 +147,12 @@ + + format_cases = "" + +- for format, type_map in sorted(format_map.iteritems()): ++ for format, type_map in sorted(format_map.items()): + format_cases += parse_format_case(format, type_map) + + combo_data_file = 'es3_format_type_combinations.json' + es3_combo_data = angle_format.load_json(combo_data_file) +- combo_data = [combo for sublist in es3_combo_data.values() for combo in sublist] ++ combo_data = [combo for sublist in list(es3_combo_data.values()) for combo in sublist] + + types = set() + formats = set() +@@ -180,9 +180,9 @@ + + es3_combo_cases = "" + +- for format, type_combos in combos.iteritems(): ++ for format, type_combos in combos.items(): + this_type_cases = "" +- for type, combos in type_combos.iteritems(): ++ for type, combos in type_combos.items(): + internal_format_cases = "" + for internal_format in combos: + internal_format_cases += " case " + internal_format + ":\n" +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/gen_overlay_fonts.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/gen_overlay_fonts.py 2025-01-16 02:26:08.540430409 +0800 +@@ -28,7 +28,7 @@ + out_file_h = 'Overlay_font_autogen.h' + font_file = 'overlay/DejaVuSansMono-Bold.ttf' + +-template_out_file_h = u"""// GENERATED FILE - DO NOT EDIT. ++template_out_file_h = """// GENERATED FILE - DO NOT EDIT. + // Generated by {script_name} using {font_file}. + // + // Copyright {copyright_year} The ANGLE Project Authors. All rights reserved. +@@ -58,7 +58,7 @@ + + """ + +-template_out_file_cpp = u"""// GENERATED FILE - DO NOT EDIT. ++template_out_file_cpp = """// GENERATED FILE - DO NOT EDIT. + // Generated by {script_name} using images from {font_file}. + // + // Copyright {copyright_year} The ANGLE Project Authors. All rights reserved. +@@ -137,7 +137,7 @@ + }} // namespace gl + """ + +-template_get_font_layer_pixel = u"""case {layer}: ++template_get_font_layer_pixel = """case {layer}: + return GetFontLayerPixel({font_image}, x, y); + """ + +@@ -148,7 +148,7 @@ + # print(font_file) + return + if len(sys.argv) == 2 and sys.argv[1] == 'outputs': +- print(','.join([out_file_cpp, out_file_h])) ++ print((','.join([out_file_cpp, out_file_h]))) + return + + font_defs = [('large', 36), ('medium', 23), ('small', 14)] +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/gen_overlay_widgets.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/gen_overlay_widgets.py 2025-01-16 02:26:08.540430409 +0800 +@@ -18,7 +18,7 @@ + + IN_JSON_FILE_NAME = 'overlay_widgets.json' + +-OUT_SOURCE_FILE_TEMPLATE = u"""// GENERATED FILE - DO NOT EDIT. ++OUT_SOURCE_FILE_TEMPLATE = """// GENERATED FILE - DO NOT EDIT. + // Generated by {script_name} using data from {input_file_name}. + // + // Copyright {copyright_year} The ANGLE Project Authors. All rights reserved. +@@ -60,7 +60,7 @@ + + """ + +-OUT_HEADER_FILE_TEMPLATE = u"""// GENERATED FILE - DO NOT EDIT. ++OUT_HEADER_FILE_TEMPLATE = """// GENERATED FILE - DO NOT EDIT. + // Generated by {script_name} using data from {input_file_name}. + // + // Copyright {copyright_year} The ANGLE Project Authors. All rights reserved. +@@ -86,7 +86,7 @@ + }} // namespace gl + """ + +-WIDGET_INIT_TEMPLATE = u"""{{ ++WIDGET_INIT_TEMPLATE = """{{ + const int32_t fontSize = GetFontSize({font_size}, kLargeFont); + const int32_t offsetX = {offset_x}; + const int32_t offsetY = {offset_y}; +@@ -161,7 +161,7 @@ + + def is_negative_coord(coords, axis, widgets_so_far): + +- if isinstance(coords[axis], unicode): ++ if isinstance(coords[axis], str): + coord_split = coords[axis].split('.') + # The coordinate is in the form other_widget.edge.mode + # We simply need to know if other_widget's coordinate is negative or not. +@@ -197,7 +197,7 @@ + # The case for the Y axis is similar, with the edge values being top or bottom. + + coord = widget.coords[axis] +- if not isinstance(coord, unicode): ++ if not isinstance(coord, str): + is_left = coord >= 0 + return coord, is_left + +@@ -321,7 +321,7 @@ + OUT_SOURCE_FILE_NAME, + OUT_HEADER_FILE_NAME, + ] +- print(','.join(outputs)) ++ print((','.join(outputs))) + return + + with open(IN_JSON_FILE_NAME) as fin: +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/angle_format.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/angle_format.py 2025-01-16 02:26:08.540430409 +0800 +@@ -53,7 +53,7 @@ + results = load_without_override() + overrides = load_json(override_path) + +- for k, v in overrides.iteritems(): ++ for k, v in overrides.items(): + results[k] = v + + return results +@@ -61,7 +61,7 @@ + + def get_all_angle_formats(): + map_path = get_angle_format_map_abs_path() +- return load_inverse_table(map_path).keys() ++ return list(load_inverse_table(map_path).keys()) + + + def get_component_type(format_id): +@@ -95,7 +95,7 @@ + + def get_channel_tokens(format_id): + r = re.compile(r'([' + kChannels + '][\d]+)') +- return filter(r.match, r.split(format_id)) ++ return list(filter(r.match, r.split(format_id))) + + + def get_channels(format_id): +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/gen_angle_format_table.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/gen_angle_format_table.py 2025-01-16 02:26:08.540430409 +0800 +@@ -284,7 +284,7 @@ + "fastCopyFunctions": "NoCopyFunctions", + } + +- for k, v in json.iteritems(): ++ for k, v in json.items(): + parsed[k] = v + + if "glInternalFormat" not in parsed: +@@ -400,9 +400,9 @@ + outputs = ['Format_table_autogen.cpp', 'FormatID_autogen.h'] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +@@ -412,7 +412,7 @@ + angle_to_gl = angle_format.load_inverse_table('angle_format_map.json') + data_source_name = 'angle_format_data.json' + json_data = angle_format.load_json(data_source_name) +- all_angle = angle_to_gl.keys() ++ all_angle = list(angle_to_gl.keys()) + + angle_format_cases = parse_angle_format_table(all_angle, json_data, angle_to_gl) + switch_data = gen_map_switch_string(gl_to_angle) +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/gen_dxgi_format_table.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/gen_dxgi_format_table.py 2025-01-16 02:26:08.540430409 +0800 +@@ -10,6 +10,7 @@ + from datetime import date + import sys + import angle_format ++from functools import reduce + + template_cpp = """// GENERATED FILE - DO NOT EDIT. + // Generated by {script_name} using data from {data_source_name}. +@@ -94,9 +95,9 @@ + outputs = ['dxgi_format_map_autogen.cpp'] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +@@ -120,15 +121,15 @@ + + all_angle = angle_format.get_all_angle_formats() + +- for dxgi_format, a_format in sorted(dxgi_map.iteritems()): ++ for dxgi_format, a_format in sorted(dxgi_map.items()): + +- found = [ctype in dxgi_format for ctype in types.keys()] ++ found = [ctype in dxgi_format for ctype in list(types.keys())] + count = reduce((lambda a, b: int(a) + int(b)), found) + + component_type = 'GL_NONE' + + if count == 1: +- gltype = next(gltype for ctype, gltype in types.iteritems() if ctype in dxgi_format) ++ gltype = next(gltype for ctype, gltype in types.items() if ctype in dxgi_format) + component_cases += format_case(dxgi_format, gltype) + else: + component_cases += undefined_case(dxgi_format) +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/gen_dxgi_support_tables.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/gen_dxgi_support_tables.py 2025-01-16 02:26:08.541513724 +0800 +@@ -183,7 +183,7 @@ + 'mipAutoGen': macro_prefix + 'MIPGEN' + } + +- for format_name, format_support in sorted(format_data.iteritems()): ++ for format_name, format_support in sorted(format_data.items()): + + always_supported = set() + never_supported = set() +@@ -198,7 +198,7 @@ + fl_10_0_check_10_1_supported = set() + fl_10_0_check_11_0_supported = set() + +- for json_flag, support in format_support.iteritems(): ++ for json_flag, support in format_support.items(): + + d3d_flag = [json_flag_to_d3d[json_flag]] + +@@ -235,7 +235,7 @@ + fl_9_3_check.update(d3d_flag) + fl_10_0_check_11_0_supported.update(d3d_flag) + else: +- print("Data specification error: " + support) ++ print(("Data specification error: " + support)) + sys.exit(1) + + for feature_level in ['9_3', '10_0', '10_1', '11_0', '11_1']: +@@ -306,9 +306,9 @@ + outputs = ['dxgi_support_table_autogen.cpp'] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/gen_load_functions_table.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/gen_load_functions_table.py 2025-01-16 02:26:08.541513724 +0800 +@@ -113,7 +113,7 @@ + snippet += "{\n" + snippet += " switch (type)\n" + snippet += " {\n" +- for gl_type, load_function in sorted(type_functions.iteritems()): ++ for gl_type, load_function in sorted(type_functions.items()): + snippet += " case " + gl_type + ":\n" + requiresConversion = str('LoadToNative<' not in load_function).lower() + snippet += " return LoadImageFunctionInfo(" + load_function + ", " + requiresConversion + ");\n" +@@ -136,14 +136,14 @@ + def parse_json(json_data): + table_data = '' + load_functions_data = '' +- for internal_format, angle_to_type_map in sorted(json_data.iteritems()): ++ for internal_format, angle_to_type_map in sorted(json_data.items()): + + s = ' ' + + table_data += s + 'case ' + internal_format + ':\n' + + do_switch = len( +- angle_to_type_map) > 1 or angle_to_type_map.keys()[0] != angle_format_unknown ++ angle_to_type_map) > 1 or list(angle_to_type_map.keys())[0] != angle_format_unknown + + if do_switch: + table_data += s + '{\n' +@@ -152,7 +152,7 @@ + table_data += s + '{\n' + s += ' ' + +- for angle_format, type_functions in sorted(angle_to_type_map.iteritems()): ++ for angle_format, type_functions in sorted(angle_to_type_map.items()): + + if angle_format == angle_format_unknown: + continue +@@ -164,7 +164,7 @@ + table_data += s + ' return ' + func_name + ';\n' + + if angle_format_unknown in angle_to_type_map: +- for gl_type, load_function in angle_to_type_map[angle_format_unknown].iteritems(): ++ for gl_type, load_function in angle_to_type_map[angle_format_unknown].items(): + if gl_type not in type_functions: + type_functions[gl_type] = load_function + +@@ -202,9 +202,9 @@ + outputs = ['load_functions_table_autogen.cpp'] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/d3d/d3d11/gen_blit11helper.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/d3d/d3d11/gen_blit11helper.py 2025-01-16 02:26:08.541513724 +0800 +@@ -322,9 +322,9 @@ + outputs = ['Blit11Helper_autogen.inc', 'd3d11_blit_shaders_autogen.gni'] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/d3d/d3d11/gen_texture_format_table.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/d3d/d3d11/gen_texture_format_table.py 2025-01-16 02:26:08.541513724 +0800 +@@ -85,9 +85,9 @@ + internal_format) + + bits = angle_format['bits'] +- max_component_bits = max(bits.itervalues()) ++ max_component_bits = max(bits.values()) + channels_different = not all( +- [component_bits == bits.itervalues().next() for component_bits in bits.itervalues()]) ++ [component_bits == next(iter(bits.values())) for component_bits in bits.values()]) + + # The format itself can be used for swizzles if it can be accessed as a render target and + # sampled and the bit count for all 4 channels is the same. +@@ -197,7 +197,7 @@ + "condition": prefix, + } + +- for k, v in json.iteritems(): ++ for k, v in json.items(): + parsed[k] = v + + # Derived values. +@@ -218,15 +218,15 @@ + support_test = None + fallback = None + +- for k, v in angle_format.iteritems(): ++ for k, v in angle_format.items(): + if k == "FL10Plus": + assert support_test is None + support_test = "OnlyFL10Plus(deviceCaps)" +- for k2, v2 in v.iteritems(): ++ for k2, v2 in v.items(): + supported_case[k2] = v2 + elif k == "FL9_3": + split = True +- for k2, v2 in v.iteritems(): ++ for k2, v2 in v.items(): + unsupported_case[k2] = v2 + elif k == "supportTest": + assert support_test is None +@@ -251,7 +251,7 @@ + def parse_json_into_switch_angle_format_string(json_map, json_data): + table_data = '' + +- for internal_format, format_name in sorted(json_map.iteritems()): ++ for internal_format, format_name in sorted(json_map.items()): + + if format_name not in json_data: + continue +@@ -290,9 +290,9 @@ + outputs = ['texture_format_table_autogen.cpp'] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/gl/generate_gl_dispatch_table.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/gl/generate_gl_dispatch_table.py 2025-01-16 02:26:08.541513724 +0800 +@@ -260,9 +260,9 @@ + ] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +@@ -330,7 +330,7 @@ + # Used later in the NULL bindings. + all_entry_points = [] + +- for comment, entry_points in json_data.iteritems(): ++ for comment, entry_points in json_data.items(): + for entry_point_no_prefix in entry_points: + entry_point = "gl" + entry_point_no_prefix + +@@ -347,18 +347,18 @@ + if not gl_required: + gl_required = reqs + elif entry_point in core_removed_eps: +- print('Upgrade ' + entry_point + ' to ' + str(reqs) + ' instead of ' + +- str(gl_required)) ++ print(('Upgrade ' + entry_point + ' to ' + str(reqs) + ' instead of ' + ++ str(gl_required))) + gl_required = reqs + else: +- print('Keep ' + entry_point + ' at ' + str(gl_required) + +- ' instead of ' + str(reqs)) ++ print(('Keep ' + entry_point + ' at ' + str(gl_required) + ++ ' instead of ' + str(reqs))) + elif api == 'gles2': + if not gles2_required: + gles2_required = reqs + else: +- print("Duplicate for " + entry_point + ": " + str(reqs) + " and " + +- str(gles2_required)) ++ print(("Duplicate for " + entry_point + ": " + str(reqs) + " and " + ++ str(gles2_required))) + else: + raise Exception('Bad api type: ' + api) + +@@ -397,7 +397,7 @@ + raise Exception('Entry point ' + entry_point + ' not found in the xml.') + + table_data = [] +- for comment, entry_points in sorted(json_data.iteritems()): ++ for comment, entry_points in sorted(json_data.items()): + formatted = [" // " + comment] + formatted += [format_ep_decl(entry_point) for entry_point in sorted(entry_points)] + +@@ -414,25 +414,25 @@ + out.write(dispatch_table_header) + + gl_data = [] +- for gl_required, entry_points in sorted(gl_requirements.iteritems()): ++ for gl_required, entry_points in sorted(gl_requirements.items()): + gl_data.append(format_requirements_lines(gl_required, entry_points)) + + gl_extensions_data = [] +- for extension, entry_points in sorted(gl_extension_requirements.iteritems()): ++ for extension, entry_points in sorted(gl_extension_requirements.items()): + gl_extensions_data.append( + format_extension_requirements_lines(extension, entry_points, "gl")) + + gles2_data = [] +- for gles2_required, entry_points in sorted(gles2_requirements.iteritems()): ++ for gles2_required, entry_points in sorted(gles2_requirements.items()): + gles2_data.append(format_requirements_lines(gles2_required, entry_points)) + + gles2_extensions_data = [] +- for extension, entry_points in sorted(gles2_extension_requirements.iteritems()): ++ for extension, entry_points in sorted(gles2_extension_requirements.items()): + gles2_extensions_data.append( + format_extension_requirements_lines(extension, entry_points, "gles2")) + + both_extensions_data = [] +- for extension, entry_points in sorted(both_extension_requirements.iteritems()): ++ for extension, entry_points in sorted(both_extension_requirements.items()): + both_extensions_data.append( + format_extension_requirements_lines(extension, entry_points, "gles2|gl")) + +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/metal/gen_mtl_format_table.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/metal/gen_mtl_format_table.py 2025-01-16 02:26:08.541513724 +0800 +@@ -471,9 +471,9 @@ + outputs = ['mtl_format_table_autogen.mm'] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/metal/shaders/gen_mtl_internal_shaders.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/metal/shaders/gen_mtl_internal_shaders.py 2025-01-16 02:26:08.541513724 +0800 +@@ -93,10 +93,10 @@ + # src_files: metal source files + def gen_precompiled_shaders(mac_version, ios_version, variable_name, additional_flags, src_files, + copyright_comments): +- print('Generating default shaders with flags=\'{0}\' ...'.format(additional_flags)) ++ print(('Generating default shaders with flags=\'{0}\' ...'.format(additional_flags))) + + # Mac version's compilation +- print('Compiling macos {0} version of default shaders ...'.format(mac_version)) ++ print(('Compiling macos {0} version of default shaders ...'.format(mac_version))) + + mac_metallib = 'compiled/{0}.mac.metallib'.format(variable_name) + +@@ -110,7 +110,7 @@ + file=mac_metallib, object_files=object_files)) + + # iOS device version's compilation +- print('Compiling ios {0} version of default shaders ...'.format(ios_version)) ++ print(('Compiling ios {0} version of default shaders ...'.format(ios_version))) + + ios_metallib = 'compiled/{0}.ios.metallib'.format(variable_name) + +@@ -124,7 +124,7 @@ + file=ios_metallib, object_files=object_files)) + + # iOS simulator version's compilation +- print('Compiling ios {0} simulator version of default shaders ...'.format(ios_version)) ++ print(('Compiling ios {0} simulator version of default shaders ...'.format(ios_version))) + + ios_sim_metallib = 'compiled/{0}.ios_sim.metallib'.format(variable_name) + +@@ -224,9 +224,9 @@ + ] + os_specific_autogen_files + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +@@ -239,7 +239,7 @@ + + # -------- Generate shader constants ----------- + angle_to_gl = angle_format.load_inverse_table('../../angle_format_map.json') +- shader_formats_autogen = gen_shader_enums_code(angle_to_gl.keys()) ++ shader_formats_autogen = gen_shader_enums_code(list(angle_to_gl.keys())) + shader_autogen_header = boilerplate_code + shader_formats_autogen + + with open('format_autogen.h', 'wt') as out_file: +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/vulkan/gen_vk_format_table.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/vulkan/gen_vk_format_table.py 2025-01-16 02:26:08.541513724 +0800 +@@ -107,9 +107,9 @@ + + no_error = True + for table in ["map", "fallbacks"]: +- for angle_format in vk_json_data[table].keys(): +- if not angle_format in angle_to_gl.keys(): +- print "Invalid format " + angle_format + " in vk_format_map.json in " + table ++ for angle_format in list(vk_json_data[table].keys()): ++ if not angle_format in list(angle_to_gl.keys()): ++ print("Invalid format " + angle_format + " in vk_format_map.json in " + table) + no_error = False + + return no_error +@@ -218,9 +218,9 @@ + outputs = [out_file_name] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +@@ -233,7 +233,7 @@ + return 1 + + vk_cases = [ +- gen_format_case(angle, gl, vk_json_data) for angle, gl in sorted(angle_to_gl.iteritems()) ++ gen_format_case(angle, gl, vk_json_data) for angle, gl in sorted(angle_to_gl.items()) + ] + + output_cpp = template_table_autogen_cpp.format( +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/vulkan/gen_vk_internal_shaders.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/vulkan/gen_vk_internal_shaders.py 2025-01-16 02:26:08.541513724 +0800 +@@ -28,7 +28,7 @@ + is_linux = platform.system() == 'Linux' + + # Templates for the generated files: +-template_shader_library_cpp = u"""// GENERATED FILE - DO NOT EDIT. ++template_shader_library_cpp = """// GENERATED FILE - DO NOT EDIT. + // Generated by {script_name} using data from {input_file_name} + // + // Copyright {copyright_year} The ANGLE Project Authors. All rights reserved. +@@ -117,7 +117,7 @@ + }} // namespace rx + """ + +-template_shader_library_h = u"""// GENERATED FILE - DO NOT EDIT. ++template_shader_library_h = """// GENERATED FILE - DO NOT EDIT. + // Generated by {script_name} using data from {input_file_name} + // + // Copyright {copyright_year} The ANGLE Project Authors. All rights reserved. +@@ -160,7 +160,7 @@ + #endif // LIBANGLE_RENDERER_VULKAN_VK_INTERNAL_SHADERS_AUTOGEN_H_ + """ + +-template_shader_includes_gni = u"""# GENERATED FILE - DO NOT EDIT. ++template_shader_includes_gni = """# GENERATED FILE - DO NOT EDIT. + # Generated by {script_name} using data from {input_file_name} + # + # Copyright {copyright_year} The ANGLE Project Authors. All rights reserved. +@@ -175,7 +175,7 @@ + ] + """ + +-template_spirv_blob_inc = u"""// GENERATED FILE - DO NOT EDIT. ++template_spirv_blob_inc = """// GENERATED FILE - DO NOT EDIT. + // Generated by {script_name}. + // + // Copyright {copyright_year} The ANGLE Project Authors. All rights reserved. +@@ -267,7 +267,7 @@ + flags = {} + enums = [] + +- for key, value in variations.iteritems(): ++ for key, value in variations.items(): + if key == "Description": + continue + elif key == "Flags": +@@ -419,7 +419,7 @@ + if description: + print(description) + if out and out.strip(): +- print(out.strip()) ++ print((out.strip())) + if err and err.strip(): + print(err) + if returncode != 0: +@@ -468,11 +468,11 @@ + # [ name, arg1, ..., argN ]. In that case, name is option[0] and option[1:] are extra arguments + # that need to be passed to glslang_validator for this variation. + def get_variation_name(option): +- return option if isinstance(option, unicode) else option[0] ++ return option if isinstance(option, str) else option[0] + + + def get_variation_args(option): +- return [] if isinstance(option, unicode) else option[1:] ++ return [] if isinstance(option, str) else option[1:] + + + def compile_variation(glslang_path, compile_queue, shader_file, shader_basename, flags, enums, +@@ -705,7 +705,7 @@ + input_shaders_variations = [ + variations for variations in input_shaders_variations if variations is not None + ] +- print(",".join(input_shaders + input_shaders_variations + glslang_binary_hashes)) ++ print((",".join(input_shaders + input_shaders_variations + glslang_binary_hashes))) + return 0 + + # STEP 1: Call glslang to generate the internal shaders into small .inc files. +@@ -752,7 +752,7 @@ + outputs = output_shaders + [out_file_cpp, out_file_h] + + if print_outputs: +- print(','.join(outputs)) ++ print((','.join(outputs))) + return 0 + + compile_queue.finish() +--- a/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/vulkan/gen_vk_mandatory_format_support_table.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/src/libANGLE/renderer/vulkan/gen_vk_mandatory_format_support_table.py 2025-01-16 02:26:08.541513724 +0800 +@@ -101,9 +101,9 @@ + outputs = [out_file_name] + + if sys.argv[1] == 'inputs': +- print ','.join(inputs) ++ print(','.join(inputs)) + elif sys.argv[1] == 'outputs': +- print ','.join(outputs) ++ print(','.join(outputs)) + else: + print('Invalid script parameters') + return 1 +--- a/src/3rdparty/chromium/third_party/angle/third_party/vulkan-headers/src/registry/cgenerator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/third_party/vulkan-headers/src/registry/cgenerator.py 2025-01-16 02:26:08.542597039 +0800 +@@ -300,12 +300,12 @@ + + # Everyone with an explicit mayalias="true" + self.may_alias = set(typeName +- for typeName, data in self.registry.typedict.items() ++ for typeName, data in list(self.registry.typedict.items()) + if data.elem.get('mayalias') == 'true') + + # Every type mentioned in some other type's parentstruct attribute. + parent_structs = (otherType.elem.get('parentstruct') +- for otherType in self.registry.typedict.values()) ++ for otherType in list(self.registry.typedict.values())) + self.may_alias.update(set(x for x in parent_structs + if x is not None)) + return typeName in self.may_alias +--- a/src/3rdparty/chromium/third_party/angle/third_party/vulkan-headers/src/registry/generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/third_party/vulkan-headers/src/registry/generator.py 2025-01-16 02:26:08.542597039 +0800 +@@ -5,7 +5,7 @@ + # SPDX-License-Identifier: Apache-2.0 + """Base class for source/header/doc generators, as well as some utility functions.""" + +-from __future__ import unicode_literals ++ + + import io + import os +@@ -324,7 +324,7 @@ + declared first when emitting this enum.""" + name = elem.get('name') + numVal = None +- if 'value' in elem.keys(): ++ if 'value' in list(elem.keys()): + value = elem.get('value') + # print('About to translate value =', value, 'type =', type(value)) + if needsNum: +@@ -336,7 +336,7 @@ + # value += enuminfo.type + self.logMsg('diag', 'Enum', name, '-> value [', numVal, ',', value, ']') + return [numVal, value] +- if 'bitpos' in elem.keys(): ++ if 'bitpos' in list(elem.keys()): + value = elem.get('bitpos') + bitpos = int(value, 0) + numVal = 1 << bitpos +@@ -345,13 +345,13 @@ + value = value + 'ULL' + self.logMsg('diag', 'Enum', name, '-> bitpos [', numVal, ',', value, ']') + return [numVal, value] +- if 'offset' in elem.keys(): ++ if 'offset' in list(elem.keys()): + # Obtain values in the mapping from the attributes + enumNegative = False + offset = int(elem.get('offset'), 0) + extnumber = int(elem.get('extnumber'), 0) + extends = elem.get('extends') +- if 'dir' in elem.keys(): ++ if 'dir' in list(elem.keys()): + enumNegative = True + self.logMsg('diag', 'Enum', name, 'offset =', offset, + 'extnumber =', extnumber, 'extends =', extends, +@@ -365,7 +365,7 @@ + # More logic needed! + self.logMsg('diag', 'Enum', name, '-> offset [', numVal, ',', value, ']') + return [numVal, value] +- if 'alias' in elem.keys(): ++ if 'alias' in list(elem.keys()): + return [None, elem.get('alias')] + return [None, None] + +--- a/src/3rdparty/chromium/third_party/angle/third_party/vulkan-headers/src/registry/reg.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/third_party/vulkan-headers/src/registry/reg.py 2025-01-16 02:26:08.542597039 +0800 +@@ -112,7 +112,7 @@ + If 'required' is not True, also returns True if neither element + has an attribute value for key.""" + +- if required and key not in self.elem.keys(): ++ if required and key not in list(self.elem.keys()): + return False + return self.elem.get(key) == info.elem.get(key) + +@@ -1186,7 +1186,7 @@ + # being generated. Add extensions matching the pattern specified in + # regExtensions, then remove extensions matching the pattern + # specified in regRemoveExtensions +- for (extName, ei) in sorted(self.extdict.items(), key=lambda x: x[1].number if x[1].number is not None else '0'): ++ for (extName, ei) in sorted(list(self.extdict.items()), key=lambda x: x[1].number if x[1].number is not None else '0'): + extName = ei.name + include = False + +--- a/src/3rdparty/chromium/third_party/angle/third_party/vulkan-loader/src/scripts/generate_source.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/third_party/vulkan-loader/src/scripts/generate_source.py 2025-01-16 02:26:08.542597039 +0800 +@@ -61,14 +61,14 @@ + + # run each code generator + for cmd in gen_cmds: +- print(' '.join(cmd)) ++ print((' '.join(cmd))) + try: + subprocess.check_call([sys.executable] + cmd, + # ignore generator output, vk_validation_stats.py is especially noisy + stdout=subprocess.DEVNULL, + cwd=gen_dir) + except Exception as e: +- print('ERROR:', str(e)) ++ print(('ERROR:', str(e))) + return 1 + + # optional post-generation steps +@@ -79,15 +79,15 @@ + files_match = True + for filename in sorted((temp_files | repo_files) - set(verify_exclude)): + if filename not in repo_files: +- print('ERROR: Missing repo file', filename) ++ print(('ERROR: Missing repo file', filename)) + files_match = False + elif filename not in temp_files: +- print('ERROR: Missing generator for', filename) ++ print(('ERROR: Missing generator for', filename)) + files_match = False + elif not filecmp.cmp(os.path.join(temp_dir, filename), + os.path.join(repo_dir, filename), + shallow=False): +- print('ERROR: Repo files do not match generator output for', filename) ++ print(('ERROR: Repo files do not match generator output for', filename)) + files_match = False + + # return code for test scripts +@@ -103,7 +103,7 @@ + repo_filename = os.path.join(repo_dir, filename) + if not os.path.exists(repo_filename) or \ + not filecmp.cmp(temp_filename, repo_filename, shallow=False): +- print('update', repo_filename) ++ print(('update', repo_filename)) + shutil.copyfile(temp_filename, repo_filename) + + return 0 +--- a/src/3rdparty/chromium/third_party/angle/third_party/vulkan-loader/src/scripts/loader_genvk.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/third_party/vulkan-loader/src/scripts/loader_genvk.py 2025-01-16 02:26:08.542597039 +0800 +@@ -256,7 +256,7 @@ + makeGenOpts(args) + + # Select a generator matching the requested target +- if (args.target in genOpts.keys()): ++ if (args.target in list(genOpts.keys())): + createGenerator = genOpts[args.target][0] + options = genOpts[args.target][1] + +@@ -343,7 +343,7 @@ + # default scripts path to be same as registry + if not args.scripts: + args.scripts = os.path.dirname(args.registry) +- print(args.scripts) ++ print((args.scripts)) + + scripts_dir = os.path.dirname(os.path.abspath(__file__)) + registry_dir = os.path.join(scripts_dir, args.scripts) +--- a/src/3rdparty/chromium/third_party/angle/third_party/vulkan-loader/src/scripts/update_deps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/third_party/vulkan-loader/src/scripts/update_deps.py 2025-01-16 02:26:08.542597039 +0800 +@@ -238,7 +238,7 @@ + + """ + +-from __future__ import print_function ++ + + import argparse + import json +--- a/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/build-gn/generate_vulkan_layers_json.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/build-gn/generate_vulkan_layers_json.py 2025-01-16 02:26:08.542597039 +0800 +@@ -17,7 +17,7 @@ + """Generate copies of the Vulkan layers JSON files, with no paths, forcing + Vulkan to use the default search path to look for layers.""" + +-from __future__ import print_function ++ + + import argparse + import glob +--- a/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/scripts/determine_vs_version.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/scripts/determine_vs_version.py 2025-01-16 02:26:08.542597039 +0800 +@@ -74,7 +74,7 @@ + # fail if the program above tries to use it. + if foundExeName == None: + print('00 0000') +- print('Executable ' + exeName + ' not found in PATH!') ++ print(('Executable ' + exeName + ' not found in PATH!')) + else: + proc = subprocess.Popen([exeName, arguments], stdout=subprocess.PIPE) + sysCallOut = proc.stdout.readline().decode('iso-8859-1').rstrip() +@@ -116,4 +116,4 @@ + year = determine_year(version) + + # Output the string we need for Cmake to properly build for this version +- print(str(version) + ' ' + str(year)) ++ print((str(version) + ' ' + str(year))) +--- a/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/scripts/fetch_glslangvalidator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/scripts/fetch_glslangvalidator.py 2025-01-16 02:26:08.542597039 +0800 +@@ -56,7 +56,7 @@ + + if os.path.isdir(GLSLANG_DIR): + if os.path.exists(GLSLANG_VALIDATOR_FULL_PATH): +- print(" Using glslangValidator at %s" % GLSLANG_VALIDATOR_PATH) ++ print((" Using glslangValidator at %s" % GLSLANG_VALIDATOR_PATH)) + sys.exit() + else: + os.makedirs(GLSLANG_DIR) +--- a/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/scripts/generate_source.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/scripts/generate_source.py 2025-01-16 02:26:08.542597039 +0800 +@@ -53,11 +53,11 @@ + # generate in temp directory so we can compare or copy later + temp_obj = tempfile.TemporaryDirectory(prefix='VulkanLoader_generated_source_') + temp_dir = temp_obj.name +- for path in files_to_gen.keys(): ++ for path in list(files_to_gen.keys()): + os.makedirs(os.path.join(temp_dir, path)) + + # run each code generator +- for path, filenames in files_to_gen.items(): ++ for path, filenames in list(files_to_gen.items()): + for filename in filenames: + if args.verify or args.incremental: + output_path = os.path.join(temp_dir, path) +@@ -67,7 +67,7 @@ + cmd = [common_codegen.repo_relative(os.path.join('scripts','kvt_genvk.py')), + '-registry', os.path.abspath(os.path.join(args.registry, 'vk.xml')), + '-quiet', '-directory', output_path, filename] +- print(' '.join(cmd)) ++ print((' '.join(cmd))) + try: + if args.verify or args.incremental: + subprocess.check_call([sys.executable] + cmd, cwd=temp_dir) +@@ -75,35 +75,35 @@ + subprocess.check_call([sys.executable] + cmd, cwd=repo_dir) + + except Exception as e: +- print('ERROR:', str(e)) ++ print(('ERROR:', str(e))) + return 1 + + # optional post-generation steps + if args.verify: + # compare contents of temp dir and repo + temp_files = {} +- for path in files_to_gen.keys(): ++ for path in list(files_to_gen.keys()): + temp_files[path] = set() + temp_files[path].update(set(os.listdir(os.path.join(temp_dir, path)))) + + repo_files = {} +- for path in files_to_gen.keys(): ++ for path in list(files_to_gen.keys()): + repo_files[path] = set() + repo_files[path].update(set(os.listdir(os.path.join(repo_dir, path))) - set(verify_exclude)) + + files_match = True +- for path in files_to_gen.keys(): ++ for path in list(files_to_gen.keys()): + for filename in sorted((temp_files[path] | repo_files[path])): + if filename not in repo_files[path]: +- print('ERROR: Missing repo file', filename) ++ print(('ERROR: Missing repo file', filename)) + files_match = False + elif filename not in temp_files[path]: +- print('ERROR: Missing generator for', filename) ++ print(('ERROR: Missing generator for', filename)) + files_match = False + elif not filecmp.cmp(os.path.join(temp_dir, path, filename), + os.path.join(repo_dir, path, filename), + shallow=False): +- print('ERROR: Repo files do not match generator output for', filename) ++ print(('ERROR: Repo files do not match generator output for', filename)) + files_match = False + + # return code for test scripts +@@ -114,13 +114,13 @@ + + elif args.incremental: + # copy missing or differing files from temp directory to repo +- for path in files_to_gen.keys(): ++ for path in list(files_to_gen.keys()): + for filename in os.listdir(os.path.join(temp_dir,path)): + temp_filename = os.path.join(temp_dir, path, filename) + repo_filename = os.path.join(repo_dir, path, filename) + if not os.path.exists(repo_filename) or \ + not filecmp.cmp(temp_filename, repo_filename, shallow=False): +- print('update', repo_filename) ++ print(('update', repo_filename)) + shutil.copyfile(temp_filename, repo_filename) + + return 0 +--- a/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/scripts/kvt_genvk.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/scripts/kvt_genvk.py 2025-01-16 02:26:08.542597039 +0800 +@@ -248,7 +248,7 @@ + # Create generator options with specified parameters + makeGenOpts(args) + +- if (args.target in genOpts.keys()): ++ if (args.target in list(genOpts.keys())): + createGenerator = genOpts[args.target][0] + options = genOpts[args.target][1] + +--- a/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/scripts/mock_icd_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/scripts/mock_icd_generator.py 2025-01-16 02:26:08.542597039 +0800 +@@ -1259,7 +1259,7 @@ + self.newline() + #write('// endFeature looking at self.sections[command]', file=self.outFile) + if (self.sections['command']): +- write('\n'.join(self.sections['command']), end=u'', file=self.outFile) ++ write('\n'.join(self.sections['command']), end='', file=self.outFile) + self.newline() + if (self.featureExtraProtect != None): + write('#endif /*', self.featureExtraProtect, '*/', file=self.outFile) +--- a/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/scripts/update_deps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/scripts/update_deps.py 2025-01-16 02:26:08.542597039 +0800 +@@ -238,7 +238,7 @@ + + """ + +-from __future__ import print_function ++ + + import argparse + import json +--- a/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/scripts/vulkaninfo_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/third_party/vulkan-tools/src/scripts/vulkaninfo_generator.py 2025-01-16 02:26:08.543680354 +0800 +@@ -186,7 +186,7 @@ + self.types_to_gen = set() + + self.extension_sets = OrderedDict() +- for ext_cat in EXTENSION_CATEGORIES.keys(): ++ for ext_cat in list(EXTENSION_CATEGORIES.keys()): + self.extension_sets[ext_cat] = set() + + self.enums = [] +@@ -235,7 +235,7 @@ + + types_to_gen.update( + GatherTypesToGen(self.all_structures, structures_to_gen)) +- for key in EXTENSION_CATEGORIES.keys(): ++ for key in list(EXTENSION_CATEGORIES.keys()): + types_to_gen.update( + GatherTypesToGen(self.all_structures, self.extension_sets[key])) + types_to_gen = sorted(types_to_gen) +@@ -252,12 +252,12 @@ + structs_to_comp.update( + GatherTypesToGen(self.all_structures, struct_comparisons_to_gen)) + +- for key, value in self.extension_sets.items(): ++ for key, value in list(self.extension_sets.items()): + self.extension_sets[key] = sorted(value) + + alias_versions = OrderedDict() + for version in self.vulkan_versions: +- for aliased_type, aliases in self.aliases.items(): ++ for aliased_type, aliases in list(self.aliases.items()): + for alias in aliases: + if alias in version.names: + alias_versions[alias] = version.minorVersion +@@ -292,12 +292,12 @@ + + out += "pNextChainInfos get_chain_infos() {\n" + out += " pNextChainInfos infos;\n" +- for key in EXTENSION_CATEGORIES.keys(): ++ for key in list(EXTENSION_CATEGORIES.keys()): + out += PrintChainBuilders(key, + self.extension_sets[key], self.all_structures) + out += " return infos;\n}\n" + +- for key, value in EXTENSION_CATEGORIES.items(): ++ for key, value in list(EXTENSION_CATEGORIES.items()): + out += PrintChainIterator(key, + self.extension_sets[key], self.all_structures, value.get('type'), self.extTypes, self.aliases, self.vulkan_versions) + +@@ -324,7 +324,7 @@ + gen.OutputGenerator.genGroup(self, groupinfo, groupName, alias) + + if alias is not None: +- if alias in self.aliases.keys(): ++ if alias in list(self.aliases.keys()): + self.aliases[alias].append(groupName) + else: + self.aliases[alias] = [groupName, ] +@@ -339,7 +339,7 @@ + gen.OutputGenerator.genType(self, typeinfo, name, alias) + + if alias is not None: +- if alias in self.aliases.keys(): ++ if alias in list(self.aliases.keys()): + self.aliases[alias].append(name) + else: + self.aliases[alias] = [name, ] +@@ -358,7 +358,7 @@ + if(node.get('values').find(vendor)) != -1: + return + +- for key, value in EXTENSION_CATEGORIES.items(): ++ for key, value in list(EXTENSION_CATEGORIES.items()): + if typeinfo.elem.get('structextends') == value.get('extends'): + self.extension_sets[key].add(name) + +@@ -660,8 +660,8 @@ + + extNameStr = None + extType = None +- for k, e in extTypes.items(): +- if k == s.name or (s.name in aliases.keys() and k in aliases[s.name]): ++ for k, e in list(extTypes.items()): ++ if k == s.name or (s.name in list(aliases.keys()) and k in aliases[s.name]): + if e.extNameStr is not None: + extNameStr = e.extNameStr + if e.type is not None: +@@ -672,7 +672,7 @@ + for v in versions: + if s.name in v.names: + version = v.minorVersion +- if s.name in aliases.keys(): ++ if s.name in list(aliases.keys()): + for alias in aliases[s.name]: + oldVersionName = alias + +@@ -680,7 +680,7 @@ + out += AddGuardHeader(s) + out += " if (structure->sType == " + s.sTypeName + has_version = version is not None +- has_extNameStr = extNameStr is not None or s.name in aliases.keys() ++ has_extNameStr = extNameStr is not None or s.name in list(aliases.keys()) + + if has_version or has_extNameStr: + out += " && \n (" +@@ -700,7 +700,7 @@ + "("+s.name+"*)structure;\n" + + out += " Dump" + s.name + "(p, " +- if s.name in aliases.keys() and version is not None: ++ if s.name in list(aliases.keys()) and version is not None: + out += "version.minor >= " + version + " ?\"" + \ + s.name + "\":\"" + oldVersionName + "\"" + else: +@@ -787,11 +787,11 @@ + extBase = 1000000000 + extBlockSize = 1000 + childValue = extBase + (extNum - 1) * extBlockSize + extOffset +- if ('dir' in child.keys()): ++ if ('dir' in list(child.keys())): + childValue = -childValue + duplicate = False + for o in self.options: +- if o.values()['optName'] == childName: ++ if list(o.values())['optName'] == childName: + duplicate = True + if duplicate: + continue +@@ -912,7 +912,7 @@ + self.members.append(VulkanVariable( + node, constants, self.name)) + +- for k, e in extTypes.items(): ++ for k, e in list(extTypes.items()): + if k == self.name: + if e.guard is not None: + self.guard = e.guard +--- a/src/3rdparty/chromium/third_party/angle/tools/flex-bison/update_flex_bison_binaries.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/tools/flex-bison/update_flex_bison_binaries.py 2025-01-16 02:26:08.543680354 +0800 +@@ -52,9 +52,9 @@ + print('Suggested commit message (please indicate flex/bison versions):') + print('----------------------------') + print('') +- print('Update flex and bison binaries for %s.' % platform.system()) ++ print(('Update flex and bison binaries for %s.' % platform.system())) + print('') +- print('These binaries were updated using %s.' % os.path.basename(__file__)) ++ print(('These binaries were updated using %s.' % os.path.basename(__file__))) + print('Please see instructions in tools/flex-bison/README.md.') + print('') + print('flex is at version TODO.') +--- a/src/3rdparty/chromium/third_party/angle/tools/glslang/update_glslang_binary.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/angle/tools/glslang/update_glslang_binary.py 2025-01-16 02:26:08.543680354 +0800 +@@ -80,9 +80,9 @@ + print('Suggested commit message:') + print('----------------------------') + print('') +- print('Update glslang_validator binary for %s.' % platform.system()) ++ print(('Update glslang_validator binary for %s.' % platform.system())) + print('') +- print('This binary was updated using %s.' % os.path.basename(__file__)) ++ print(('This binary was updated using %s.' % os.path.basename(__file__))) + print('Please see instructions in tools/glslang/README.md.') + print('') + print('Bug: None') +--- a/src/3rdparty/chromium/third_party/blink/PRESUBMIT_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/PRESUBMIT_test.py 2025-01-16 02:26:08.543680354 +0800 +@@ -113,7 +113,7 @@ + # pylint: disable=W0212 + errors = PRESUBMIT._CheckForWrongMojomIncludes(mock_input_api, + MockOutputApi()) +- self.assertEquals( ++ self.assertEqual( + 'Public blink headers using Blink variant mojoms found. ' + + 'You must include .mojom-forward.h or .mojom-shared.h instead:', + errors[0].message) +@@ -137,7 +137,7 @@ + # pylint: disable=W0212 + errors = PRESUBMIT._CheckForWrongMojomIncludes(mock_input_api, + MockOutputApi()) +- self.assertEquals([], errors) ++ self.assertEqual([], errors) + + + class CxxDependencyTest(unittest.TestCase): +@@ -201,8 +201,8 @@ + + for item in self.disallow_list: + errors = self.runCheck(filename, ['%s' % item]) +- self.assertEquals(1, len(errors)) +- self.assertRegexpMatches( ++ self.assertEqual(1, len(errors)) ++ self.assertRegex( + errors[0].message, + r'^[^:]+:\d+ uses disallowed identifier .+$') + +@@ -214,8 +214,8 @@ + + for item in self.disallow_list: + errors = self.runCheck(filename, ['%s' % item]) +- self.assertEquals(1, len(errors)) +- self.assertRegexpMatches( ++ self.assertEqual(1, len(errors)) ++ self.assertRegex( + errors[0].message, + r'^[^:]+:\d+ uses disallowed identifier .+$') + +@@ -227,8 +227,8 @@ + + for item in self.disallow_list: + errors = self.runCheck(filename, ['%s' % item]) +- self.assertEquals(1, len(errors)) +- self.assertRegexpMatches( ++ self.assertEqual(1, len(errors)) ++ self.assertRegex( + errors[0].message, + r'^[^:]+:\d+ uses disallowed identifier .+$') + +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/PRESUBMIT.py 2025-01-16 02:26:08.543680354 +0800 +@@ -65,7 +65,7 @@ + test_cmd = input_api.Command( + name=cmd_name, cmd=cmd, kwargs={}, message=message_type) + if input_api.verbose: +- print('Running ' + cmd_name) ++ print(('Running ' + cmd_name)) + return input_api.RunTests([test_cmd]) + + +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/aggregate_generated_bindings.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/aggregate_generated_bindings.py 2025-01-16 02:26:08.543680354 +0800 +@@ -46,7 +46,7 @@ + Design doc: http://www.chromium.org/developers/design-documents/idl-build + """ + +-from __future__ import print_function ++ + + import errno + import optparse +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/blink_idl_lexer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/blink_idl_lexer.py 2025-01-16 02:26:08.543680354 +0800 +@@ -53,7 +53,7 @@ + # Disable attribute validation, as lint can't import parent class to check + # pylint: disable=E1101 + +-from __future__ import print_function ++ + + import os.path + import sys +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/blink_idl_parser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/blink_idl_parser.py 2025-01-16 02:26:08.543680354 +0800 +@@ -54,7 +54,7 @@ + # pylint: disable=E1101 + # + +-from __future__ import print_function ++ + + import os.path + import sys +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/code_generator.py 2025-01-14 21:29:17.874895440 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/code_generator.py 2025-01-16 02:26:08.543680354 +0800 +@@ -5,7 +5,7 @@ + # pylint: disable=import-error,print-statement,relative-import + """Plumbing for a Jinja-based code generator, including CodeGeneratorBase, a base class for all generators.""" + +-from __future__ import print_function ++ + + import os + import posixpath +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/code_generator_v8.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/code_generator_v8.py 2025-01-16 02:26:08.543680354 +0800 +@@ -95,7 +95,7 @@ + def resolve(self, definitions, definition_name): + """Traverse definitions and resolves typedefs with the actual types.""" + self.typedefs = {} +- for name, typedef in self.info_provider.typedefs.items(): ++ for name, typedef in list(self.info_provider.typedefs.items()): + self.typedefs[name] = typedef.idl_type + self.additional_header_includes = set() + definitions.accept(self) +@@ -338,7 +338,7 @@ + # idl_definitions.py. What we do instead is to resolve typedefs in + # _generate_container_code() whenever a new union file is generated. + self.typedefs = {} +- for name, typedef in self.info_provider.typedefs.items(): ++ for name, typedef in list(self.info_provider.typedefs.items()): + self.typedefs[name] = typedef.idl_type + + def _generate_container_code(self, union_type): +@@ -441,7 +441,7 @@ + if not callback_functions: + return () + outputs = set() +- for callback_function_dict in callback_functions.values(): ++ for callback_function_dict in list(callback_functions.values()): + if callback_function_dict['component_dir'] != self.target_component: + continue + callback_function = callback_function_dict['callback_function'] +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/compute_global_objects.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/compute_global_objects.py 2025-01-16 02:26:08.543680354 +0800 +@@ -48,7 +48,7 @@ + + + def dict_union(dicts): +- return dict((k, v) for d in dicts for k, v in d.items()) ++ return dict((k, v) for d in dicts for k, v in list(d.items())) + + + def idl_file_to_global_names(idl_filename): +@@ -71,7 +71,7 @@ + raise ValueError( + '[Global] must take an indentifier or an identifier list.\n' + + full_path) +- return map(str.strip, global_value.strip('()').split(',')) ++ return list(map(str.strip, global_value.strip('()').split(','))) + + + def idl_files_to_interface_name_global_names(idl_files): +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/compute_interfaces_info_individual.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/compute_interfaces_info_individual.py 2025-01-16 02:26:08.543680354 +0800 +@@ -247,7 +247,7 @@ + self.union_types.update(this_union_types) + self.typedefs.update(definitions.typedefs) + for callback_function_name, callback_function in \ +- definitions.callback_functions.items(): ++ list(definitions.callback_functions.items()): + # Set 'component_dir' to specify a directory that callback function files belong to + self.callback_functions[callback_function_name] = { + 'callback_function': callback_function, +@@ -255,14 +255,14 @@ + 'full_path': os.path.realpath(idl_filename), + } + # Check enum duplication. +- for enum in definitions.enumerations.values(): ++ for enum in list(definitions.enumerations.values()): + if not self.check_enum_consistency(enum): + raise Exception('Enumeration "%s" is defined more than once ' + 'with different valid values' % enum.name) + self.enumerations.update(definitions.enumerations) + + if definitions.interfaces: +- definition = next(iter(definitions.interfaces.values())) ++ definition = next(iter(list(definitions.interfaces.values()))) + interface_info = { + 'is_callback_interface': + definition.is_callback, +@@ -279,7 +279,7 @@ + get_put_forward_interfaces_from_definition(definition), + } + elif definitions.dictionaries: +- definition = next(iter(definitions.dictionaries.values())) ++ definition = next(iter(list(definitions.dictionaries.values()))) + interface_info = { + 'is_callback_interface': False, + 'is_dictionary': True, +@@ -379,7 +379,7 @@ + self.callback_functions, + 'enumerations': + dict((enum.name, enum.values) +- for enum in self.enumerations.values()), ++ for enum in list(self.enumerations.values())), + 'runtime_enabled_features': + runtime_enabled_features, + 'typedefs': +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/compute_interfaces_info_overall.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/compute_interfaces_info_overall.py 2025-01-16 02:26:08.543680354 +0800 +@@ -127,12 +127,12 @@ + + Needed for merging partial_interface_files across components. + """ +- for key, value in other.items(): ++ for key, value in list(other.items()): + if key not in existing: + existing[key] = value + continue + existing_value = existing[key] +- for inner_key, inner_value in value.items(): ++ for inner_key, inner_value in list(value.items()): + existing_value[inner_key].extend(inner_value) + + +@@ -178,7 +178,7 @@ + garbage_collected_interfaces = set() + callback_interfaces = set() + +- for interface_name, interface_info in interfaces_info.items(): ++ for interface_name, interface_info in list(interfaces_info.items()): + component_dirs[interface_name] = idl_filename_to_component( + interface_info['full_path']) + +@@ -220,10 +220,10 @@ + partial_interface_files, info['partial_interface_files']) + + # Record inheritance information individually +- for interface_name, interface_info in interfaces_info.items(): ++ for interface_name, interface_info in list(interfaces_info.items()): + extended_attributes = interface_info['extended_attributes'] + inherited_extended_attributes_by_interface[interface_name] = dict( +- (key, value) for key, value in extended_attributes.items() ++ (key, value) for key, value in list(extended_attributes.items()) + if key in INHERITED_EXTENDED_ATTRIBUTES) + parent = interface_info['parent'] + if parent: +@@ -241,7 +241,7 @@ + # 'includes'). + # Note that moving an 'includes' statement between files does not change the + # info itself (or hence cause a rebuild)! +- for mixin_name, interface_info in interfaces_info.items(): ++ for mixin_name, interface_info in list(interfaces_info.items()): + for interface_name in interface_info['included_by_interfaces']: + interfaces_info[interface_name]['including_mixins'].append( + mixin_name) +@@ -249,7 +249,7 @@ + + # An IDL file's dependencies are partial interface files that extend it, + # and files for other interfaces that this interfaces include. +- for interface_name, interface_info in interfaces_info.items(): ++ for interface_name, interface_info in list(interfaces_info.items()): + partial_interface_paths = partial_interface_files[interface_name] + partial_interfaces_full_paths = partial_interface_paths['full_paths'] + # Partial interface definitions each need an include, as they are +@@ -311,7 +311,7 @@ + }) + + # Clean up temporary private information +- for interface_info in interfaces_info.values(): ++ for interface_info in list(interfaces_info.values()): + del interface_info['extended_attributes'] + del interface_info['union_types'] + del interface_info['is_legacy_treat_as_partial_interface'] +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/generate_global_constructors.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/generate_global_constructors.py 2025-01-16 02:26:08.543680354 +0800 +@@ -108,8 +108,8 @@ + elif 'Exposed' in extended_attributes: + # Exposed=env or Exposed=(env1,...) case + exposed_value = extended_attributes.get('Exposed') +- exposed_global_names = map(str.strip, +- exposed_value.strip('()').split(',')) ++ exposed_global_names = list(map(str.strip, ++ exposed_value.strip('()').split(','))) + new_constructors_list = generate_global_constructors_list( + interface_name, extended_attributes) + for name in exposed_global_names: +@@ -196,7 +196,7 @@ + record_global_constructors(idl_filename) + + # Check for [Exposed] / [Global] mismatch. +- known_global_names = EXPOSED_EXECUTION_CONTEXT_METHOD.keys() ++ known_global_names = list(EXPOSED_EXECUTION_CONTEXT_METHOD.keys()) + exposed_global_names = frozenset(global_name_to_constructors) + if not exposed_global_names.issubset(known_global_names): + unknown_global_names = exposed_global_names.difference( +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/generate_init_partial_interfaces.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/generate_init_partial_interfaces.py 2025-01-16 02:26:08.543680354 +0800 +@@ -8,7 +8,7 @@ + + # pylint: disable=relative-import + +-from __future__ import print_function ++ + + from optparse import OptionParser + import os +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/generate_origin_trial_features.py 2025-01-14 21:29:17.875978753 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/generate_origin_trial_features.py 2025-01-16 02:26:08.543680354 +0800 +@@ -209,7 +209,7 @@ + interface_info.v8_class, + 'installers': + get_install_functions([interface_info], feature_names) +- } for interface_info, feature_names in features_for_type.items()] ++ } for interface_info, feature_names in list(features_for_type.items())] + context['installers_by_interface'].sort(key=lambda x: x['name']) + + # For each conditional feature, collect a list of bindings installation +@@ -221,7 +221,7 @@ + 'OriginTrialFeature::k%s' % feature_name, + 'installers': + get_install_functions(interfaces, [feature_name]) +- } for feature_name, interfaces in types_for_feature.items()] ++ } for feature_name, interfaces in list(types_for_feature.items())] + context['installers_by_feature'].sort(key=lambda x: x['name']) + + return context +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/generate_v8_context_snapshot_external_references.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/generate_v8_context_snapshot_external_references.py 2025-01-16 02:26:08.543680354 +0800 +@@ -191,7 +191,7 @@ + target_definitions = definitions[component] + interfaces = target_definitions.interfaces + first_name = target_definitions.first_name +- if first_name in interfaces.keys(): ++ if first_name in list(interfaces.keys()): + interface = interfaces[first_name] + self._process_interface(interface, component, interfaces) + +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_compiler.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_compiler.py 2025-01-16 02:26:08.543680354 +0800 +@@ -83,11 +83,10 @@ + return options, idl_filename + + +-class IdlCompiler(object): ++class IdlCompiler(object, metaclass=abc.ABCMeta): + """The IDL Compiler. + + """ +- __metaclass__ = abc.ABCMeta + + def __init__(self, + output_directory, +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_definitions.py 2025-01-14 21:29:17.875978753 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_definitions.py 2025-01-16 02:26:08.543680354 +0800 +@@ -77,13 +77,12 @@ + ################################################################################ + + +-class TypedObject(object): ++class TypedObject(object, metaclass=abc.ABCMeta): + """Object with a type, such as an Attribute or Operation (return value). + + The type can be an actual type, or can be a typedef, which must be resolved + by the TypedefResolver before passing data to the code generator. + """ +- __metaclass__ = abc.ABCMeta + idl_type_attributes = ('idl_type', ) + + +@@ -137,22 +136,22 @@ + + def accept(self, visitor): + visitor.visit_definitions(self) +- for interface in self.interfaces.values(): ++ for interface in list(self.interfaces.values()): + interface.accept(visitor) +- for callback_function in self.callback_functions.values(): ++ for callback_function in list(self.callback_functions.values()): + callback_function.accept(visitor) +- for dictionary in self.dictionaries.values(): ++ for dictionary in list(self.dictionaries.values()): + dictionary.accept(visitor) +- for enumeration in self.enumerations.values(): ++ for enumeration in list(self.enumerations.values()): + enumeration.accept(visitor) + for include in self.includes: + include.accept(visitor) +- for typedef in self.typedefs.values(): ++ for typedef in list(self.typedefs.values()): + typedef.accept(visitor) + + def update(self, other): + """Update with additional IdlDefinitions.""" +- for interface_name, new_interface in other.interfaces.items(): ++ for interface_name, new_interface in list(other.interfaces.items()): + if not new_interface.is_partial: + # Add as new interface + self.interfaces[interface_name] = new_interface +@@ -394,8 +393,7 @@ + else: + raise ValueError('Unrecognized node class: %s' % child_class) + +- if len(list(filter(None, +- [self.iterable, self.maplike, self.setlike]))) > 1: ++ if len(list([_f for _f in [self.iterable, self.maplike, self.setlike] if _f])) > 1: + raise ValueError( + 'Interface can only have one of iterable<>, maplike<> and setlike<>.' + ) +@@ -430,8 +428,8 @@ + extended_attributes = ( + convert_constructor_operations_extended_attributes( + constructor_operations_extended_attributes)) +- if any(name in extended_attributes.keys() +- for name in self.extended_attributes.keys()): ++ if any(name in list(extended_attributes.keys()) ++ for name in list(self.extended_attributes.keys())): + raise ValueError('Detected mixed extended attributes for ' + 'both [Constructor] and constructor ' + 'operations. Do not use both in a single ' +@@ -1059,7 +1057,7 @@ + """ + + converted = {} +- for name, value in extended_attributes.items(): ++ for name, value in list(extended_attributes.items()): + if name == "CallWith": + converted["ConstructorCallWith"] = value + elif name == "RaisesException": +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_types.py 2025-01-14 21:29:17.875978753 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_types.py 2025-01-16 02:26:08.543680354 +0800 +@@ -642,7 +642,7 @@ + def __str__(self): + annotation = ', '.join( + (key + ('' if val is None else '=' + val)) +- for key, val in self.extended_attributes.items()) ++ for key, val in list(self.extended_attributes.items())) + return '[%s] %s' % (annotation, str(self.inner_type)) + + def __getattr__(self, name): +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_validator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/idl_validator.py 2025-01-16 02:26:08.543680354 +0800 +@@ -51,7 +51,7 @@ + + def validate_extended_attributes(self, definitions): + # FIXME: this should be done when parsing the file, rather than after. +- for interface in definitions.interfaces.values(): ++ for interface in list(definitions.interfaces.values()): + self.validate_extended_attributes_node(interface) + for attribute in interface.attributes: + self.validate_extended_attributes_node(attribute) +@@ -59,17 +59,17 @@ + self.validate_extended_attributes_node(operation) + for argument in operation.arguments: + self.validate_extended_attributes_node(argument) +- for dictionary in definitions.dictionaries.values(): ++ for dictionary in list(definitions.dictionaries.values()): + self.validate_extended_attributes_node(dictionary) + for member in dictionary.members: + self.validate_extended_attributes_node(member) +- for callback_function in definitions.callback_functions.values(): ++ for callback_function in list(definitions.callback_functions.values()): + self.validate_extended_attributes_node(callback_function) + for argument in callback_function.arguments: + self.validate_extended_attributes_node(argument) + + def validate_extended_attributes_node(self, node): +- for name, values_string in node.extended_attributes.items(): ++ for name, values_string in list(node.extended_attributes.items()): + self.validate_name_values_string(name, values_string) + + def validate_name_values_string(self, name, values_string): +@@ -103,7 +103,7 @@ + line = line.strip() + if not line or line.startswith('#'): + continue +- name, _, values_string = map(str.strip, line.partition('=')) ++ name, _, values_string = list(map(str.strip, line.partition('='))) + value_list = [ + value.strip() for value in values_string.split('|') + ] +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/interface_dependency_resolver.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/interface_dependency_resolver.py 2025-01-16 02:26:08.543680354 +0800 +@@ -101,7 +101,7 @@ + 'this definition: %s, because this should ' + 'have a dictionary.' % definitions.idl_name) + +- target_interface = next(iter(definitions.interfaces.values())) ++ target_interface = next(iter(list(definitions.interfaces.values()))) + interface_name = target_interface.name + interface_info = self.interfaces_info[interface_name] + +@@ -163,7 +163,7 @@ + dependency_idl_filename) + + dependency_interface = next( +- iter(dependency_definitions.interfaces.values())) ++ iter(list(dependency_definitions.interfaces.values()))) + + transfer_extended_attributes(dependency_interface, + dependency_idl_filename) +@@ -314,7 +314,7 @@ + 'ImplementedAs', dependency_interface.name)) + + def update_attributes(attributes, extras): +- for key, value in extras.items(): ++ for key, value in list(extras.items()): + if key not in attributes: + attributes[key] = value + +@@ -362,8 +362,8 @@ + interface.get('cpp_includes', {}).get(component, {})) + return unforgeable_attributes, referenced_interfaces, cpp_includes + +- for component, definitions in resolved_definitions.items(): +- for interface_name, interface in definitions.interfaces.items(): ++ for component, definitions in list(resolved_definitions.items()): ++ for interface_name, interface in list(definitions.interfaces.items()): + interface_info = interfaces_info[interface_name] + inherited_unforgeable_attributes, referenced_interfaces, cpp_includes = \ + collect_unforgeable_attributes_in_ancestors( +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/overload_set_algorithm.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/overload_set_algorithm.py 2025-01-16 02:26:08.543680354 +0800 +@@ -183,7 +183,7 @@ + # Filter to only methods that are actually overloaded + method_counts = Counter(method['name'] for method in methods) + overloaded_method_names = set( +- name for name, count in method_counts.items() if count > 1) ++ name for name, count in list(method_counts.items()) if count > 1) + overloaded_methods = [ + method for method in methods + if method['name'] in overloaded_method_names +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/utilities.py 2025-01-14 21:29:17.875978753 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/utilities.py 2025-01-16 02:26:08.544763668 +0800 +@@ -13,7 +13,7 @@ + import sys + + if sys.version_info.major == 2: +- import cPickle as pickle ++ import pickle as pickle + else: + import pickle + +@@ -220,7 +220,7 @@ + |target| will be updated with |diff|. Part of |diff| may be re-used in + |target|. + """ +- for key, value in diff.items(): ++ for key, value in list(diff.items()): + if key not in target: + target[key] = value + elif type(value) == dict: +@@ -443,7 +443,7 @@ + if parences < 0 or square_brackets < 0: + raise ValueError('You have more close braces than open braces.') + if parences == 0 and square_brackets == 0: +- name, _, value = map(str.strip, concatenated.partition('=')) ++ name, _, value = list(map(str.strip, concatenated.partition('='))) + extended_attributes[name] = value + concatenated = None + return extended_attributes +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_attributes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_attributes.py 2025-01-16 02:26:08.544763668 +0800 +@@ -110,9 +110,7 @@ + # [ReflectOnly] + reflect_only = extended_attribute_value_as_list(attribute, 'ReflectOnly') + if reflect_only: +- reflect_only = map( +- lambda v: cpp_content_attribute_value_name(interface, v), +- reflect_only) ++ reflect_only = [cpp_content_attribute_value_name(interface, v) for v in reflect_only] + if is_custom_element_callbacks or is_reflect: + includes.add('core/html/custom/v0_custom_element_processing_stack.h') + # [PerWorldBindings] +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_dictionary.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_dictionary.py 2025-01-16 02:26:08.544763668 +0800 +@@ -254,7 +254,7 @@ + raise Exception('Member name conflict: %s' % cpp_name) + members_dict[cpp_name] = member + return sorted( +- members_dict.values(), key=lambda member: member['cpp_name']) ++ list(members_dict.values()), key=lambda member: member['cpp_name']) + + includes.clear() + header_forward_decls = set() +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_methods.py 2025-01-14 21:29:17.877062067 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/v8_methods.py 2025-01-16 02:26:08.544763668 +0800 +@@ -48,7 +48,7 @@ + + # TODO: Remove this once Python2 is obsoleted. + if sys.version_info.major != 2: +- basestring = str ++ str = str + + + def method_is_visible(method, interface_is_partial): +@@ -589,7 +589,7 @@ + return '/* null default value */' + if default_value.value == "{}": + member_type = idl_type.dictionary_member_type +- elif isinstance(default_value.value, basestring): ++ elif isinstance(default_value.value, str): + member_type = idl_type.string_member_type + elif isinstance(default_value.value, (int, float)): + member_type = idl_type.numeric_member_type +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/code_node.py 2025-01-14 21:29:17.872728812 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/code_node.py 2025-01-16 02:26:08.544763668 +0800 +@@ -316,7 +316,7 @@ + for node in outers: + if node.own_template_vars is None: + continue +- for name, value in node.own_template_vars.items(): ++ for name, value in list(node.own_template_vars.items()): + assert name not in bindings, ( + "Duplicated template variable binding: {}".format(name)) + bindings[name] = value +@@ -341,7 +341,7 @@ + + def add_template_vars(self, template_vars): + assert isinstance(template_vars, dict) +- for name, value in template_vars.items(): ++ for name, value in list(template_vars.items()): + self.add_template_var(name, value) + + @property +@@ -357,7 +357,7 @@ + + def set_base_template_vars(self, template_vars): + assert isinstance(template_vars, dict) +- for name, value in template_vars.items(): ++ for name, value in list(template_vars.items()): + assert isinstance(name, str) + assert not isinstance(value, CodeNode) + assert self._base_template_vars is None +@@ -507,7 +507,7 @@ + gensym = CodeNode.gensym() + gensym_args.append("${{{}}}".format(gensym)) + template_vars[gensym] = arg +- for key, value in kwargs.items(): ++ for key, value in list(kwargs.items()): + assert isinstance(key, (int, str)) + assert isinstance(value, (CodeNode, int, str)) + gensym = CodeNode.gensym() +@@ -722,19 +722,18 @@ + return counts + + self_index = next(iter(scope_chains)).index(self) +- scope_chains = map( +- lambda scope_chain: scope_chain[self_index + 1:], scope_chains) ++ scope_chains = [scope_chain[self_index + 1:] for scope_chain in scope_chains] + scope_to_likeliness = {} + for scope_chain in scope_chains: + if not scope_chain: + counts[DIRECT_USES] += 1 + else: + likeliness = min( +- map(lambda scope: scope.likeliness, scope_chain)) ++ [scope.likeliness for scope in scope_chain]) + scope = scope_chain[0] + scope_to_likeliness[scope] = max( + likeliness, scope_to_likeliness.get(scope, likeliness)) +- for likeliness in scope_to_likeliness.values(): ++ for likeliness in list(scope_to_likeliness.values()): + counts[DIRECT_CHILD_SCOPES] += 1 + counts[likeliness] += 1 + return counts +@@ -896,8 +895,7 @@ + + def _request_symbol_definition(self, renderer): + symbol_scope_chain = tuple( +- filter(lambda node: isinstance(node, SymbolScopeNode), +- renderer.callers_from_first_to_last)) ++ [node for node in renderer.callers_from_first_to_last if isinstance(node, SymbolScopeNode)]) + + for caller in renderer.callers_from_last_to_first: + caller.on_code_symbol_referenced(self, symbol_scope_chain) +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/code_node_cxx.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/code_node_cxx.py 2025-01-16 02:26:08.544763668 +0800 +@@ -319,8 +319,8 @@ + CompositeNode.__init__(self, + template_format, + name=_to_maybe_text_node(name), +- arg_decls=ListNode(map(_to_maybe_text_node, +- arg_decls), ++ arg_decls=ListNode(list(map(_to_maybe_text_node, ++ arg_decls)), + separator=", "), + return_type=_to_maybe_text_node(return_type), + template=template, +@@ -401,7 +401,7 @@ + member_initializer_list = "" + else: + member_initializer_list = ListNode( +- map(_to_maybe_text_node, member_initializer_list), ++ list(map(_to_maybe_text_node, member_initializer_list)), + separator=", ", + head=" : ") + +@@ -413,7 +413,7 @@ + template_format, + name=_to_maybe_text_node(name), + arg_decls=ListNode( +- map(_to_maybe_text_node, arg_decls), separator=", "), ++ list(map(_to_maybe_text_node, arg_decls)), separator=", "), + return_type=_to_maybe_text_node(return_type), + class_name=class_name, + template=template, +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_accumulator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_accumulator.py 2025-01-16 02:26:08.544763668 +0800 +@@ -26,7 +26,7 @@ + return self._include_headers + + def add_include_headers(self, headers): +- self._include_headers.update(filter(None, headers)) ++ self._include_headers.update([_f for _f in headers if _f]) + + @staticmethod + def require_include_headers(headers): +@@ -37,7 +37,7 @@ + return self._class_decls + + def add_class_decls(self, class_names): +- self._class_decls.update(filter(None, class_names)) ++ self._class_decls.update([_f for _f in class_names if _f]) + + @staticmethod + def require_class_decls(class_names): +@@ -48,7 +48,7 @@ + return self._struct_decls + + def add_struct_decls(self, struct_names): +- self._struct_decls.update(filter(None, struct_names)) ++ self._struct_decls.update([_f for _f in struct_names if _f]) + + @staticmethod + def require_struct_decls(struct_names): +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_context.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_context.py 2025-01-16 02:26:08.544763668 +0800 +@@ -114,7 +114,7 @@ + ) + + # Define public readonly properties of this class. +- for attr in cls._context_attrs.keys(): ++ for attr in list(cls._context_attrs.keys()): + + def make_get(): + _attr = cls._internal_attr(attr) +@@ -133,11 +133,11 @@ + def __init__(self, **kwargs): + assert CodeGenContext._was_initialized + +- for arg in kwargs.keys(): ++ for arg in list(kwargs.keys()): + assert arg in self._context_attrs, "Unknown argument: {}".format( + arg) + +- for attr, default_value in self._context_attrs.items(): ++ for attr, default_value in list(self._context_attrs.items()): + value = kwargs[attr] if attr in kwargs else default_value + assert (default_value is None + or type(value) is type(default_value)), ( +@@ -149,13 +149,13 @@ + Returns a copy of this context applying the updates given as the + arguments. + """ +- for arg in kwargs.keys(): ++ for arg in list(kwargs.keys()): + assert arg in self._context_attrs, "Unknown argument: {}".format( + arg) + + new_object = copy.copy(self) + +- for attr, new_value in kwargs.items(): ++ for attr, new_value in list(kwargs.items()): + old_value = getattr(self, attr) + assert old_value is None or type(new_value) is type(old_value), ( + "Type mismatch at argument: {}".format(attr)) +@@ -172,7 +172,7 @@ + """ + bindings = {} + +- for attr in self._context_attrs.keys(): ++ for attr in list(self._context_attrs.keys()): + value = getattr(self, attr) + if value is None: + value = NonRenderable(attr) +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_expr.py 2025-01-14 21:29:17.872728812 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/codegen_expr.py 2025-01-16 02:26:08.544763668 +0800 +@@ -109,7 +109,7 @@ + + if any(term.is_always_false for term in terms): + return _Expr(False) +- terms = list(filter(lambda x: not x.is_always_true, terms)) ++ terms = list([x for x in terms if not x.is_always_true]) + if not terms: + return _Expr(True) + if len(terms) == 1: +@@ -124,7 +124,7 @@ + + if any(term.is_always_true for term in terms): + return _Expr(True) +- terms = list(filter(lambda x: not x.is_always_false, terms)) ++ terms = list([x for x in terms if not x.is_always_false]) + if not terms: + return _Expr(False) + if len(terms) == 1: +@@ -210,9 +210,7 @@ + feature, arg)) + + def ref_selected(features): +- feature_tokens = map( +- lambda feature: "OriginTrialFeature::k{}".format(feature), +- features) ++ feature_tokens = ["OriginTrialFeature::k{}".format(feature) for feature in features] + return _Expr("${{feature_selector}}.IsAnyOf({})".format( + ", ".join(feature_tokens))) + +@@ -269,17 +267,16 @@ + # [RuntimeEnabled] + if exposure.runtime_enabled_features: + feature_enabled_terms.extend( +- map(ref_enabled, exposure.runtime_enabled_features)) ++ list(map(ref_enabled, exposure.runtime_enabled_features))) + feature_selector_names.extend( + exposure.context_dependent_runtime_enabled_features) + + # [ContextEnabled] + if exposure.context_enabled_features: + terms = list( +- map( +- lambda feature: _Expr( ++ [_Expr( + "${{context_feature_settings}}->is{}Enabled()".format( +- feature)), exposure.context_enabled_features)) ++ feature)) for feature in exposure.context_enabled_features]) + context_enabled_terms.append( + expr_and([_Expr("${context_feature_settings}"), + expr_or(terms)])) +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/interface.py 2025-01-14 21:29:17.874895440 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/interface.py 2025-01-16 02:26:08.544763668 +0800 +@@ -561,8 +561,7 @@ + body=F("${return_value} = {};", constant(empty_default))) + + expr = " || ".join( +- map(lambda keyword: "reflect_value == {}".format(constant(keyword)), +- keywords)) ++ ["reflect_value == {}".format(constant(keyword)) for keyword in keywords]) + branches.append(cond=expr, body=T("${return_value} = reflect_value;")) + + if invalid_default is not None: +@@ -1177,7 +1176,7 @@ + + # TODO(yukishiino): Runtime-enabled features should be taken into account + # when calculating the max argument size. +- max_arg_size = max(map(args_size, items)) ++ max_arg_size = max(list(map(args_size, items))) + arg_count_def = F("const int arg_count = std::min(${info}.Length(), {});", + max_arg_size) + +@@ -1197,9 +1196,8 @@ + + conditional = expr_or( + list( +- map( +- lambda item: expr_from_exposure(item.function_like.exposure +- ), items))) ++ [expr_from_exposure(item.function_like.exposure ++ ) for item in items])) + if not conditional.is_always_true: + node = CxxUnlikelyIfNode(cond=conditional, body=node) + +@@ -1809,7 +1807,7 @@ + def optimize_element_cereactions_reflect(): + has_cereactions = False + has_reflect = False +- for key in ext_attrs.keys(): ++ for key in list(ext_attrs.keys()): + if key == "CEReactions": + has_cereactions = True + elif key == "Reflect": +@@ -4954,7 +4952,7 @@ + + iterate(collectionlike.attributes, process_attribute) + iterate( +- filter(should_define, collectionlike.operation_groups), ++ list(filter(should_define, collectionlike.operation_groups)), + process_operation_group) + + return callback_def_nodes +@@ -4970,8 +4968,8 @@ + + unscopables = [] + is_unscopable = lambda member: "Unscopable" in member.extended_attributes +- unscopables.extend(filter(is_unscopable, class_like.attributes)) +- unscopables.extend(filter(is_unscopable, class_like.operations)) ++ unscopables.extend(list(filter(is_unscopable, class_like.attributes))) ++ unscopables.extend(list(filter(is_unscopable, class_like.operations))) + if unscopables: + nodes.extend([ + TextNode("""\ +@@ -5178,12 +5176,10 @@ + + if class_like.identifier == "CSSStyleDeclaration": + css_properties = list( +- filter(lambda attr: "CSSProperty" in attr.extended_attributes, +- class_like.attributes)) ++ [attr for attr in class_like.attributes if "CSSProperty" in attr.extended_attributes]) + if css_properties: + prop_name_list = "".join( +- map(lambda attr: "\"{}\", ".format(attr.identifier), +- css_properties)) ++ ["\"{}\", ".format(attr.identifier) for attr in css_properties]) + body.append( + T("""\ + // CSSStyleDeclaration-specific settings +@@ -5532,7 +5528,7 @@ + TextNode(installer_call_text), + ])) + body.append(EmptyNode()) +- for conditional, entries in conditional_to_entries.items(): ++ for conditional, entries in list(conditional_to_entries.items()): + body.append( + CxxUnlikelyIfNode( + cond=conditional, +@@ -5569,8 +5565,7 @@ + "V8DOMConfiguration::InstallConstants(${isolate}, " + "${interface_template}, ${prototype_template}, " + "kConstantCallbackTable, base::size(kConstantCallbackTable));") +- constant_callback_entries = list(filter(lambda entry: entry.const_callback_name, +- constant_entries)) ++ constant_callback_entries = list([entry for entry in constant_entries if entry.const_callback_name]) + install_properties(table_name, constant_callback_entries, + _make_constant_callback_registration_table, + installer_call_text) +@@ -5586,8 +5581,7 @@ + "V8DOMConfiguration::InstallConstants(${isolate}, " + "${interface_template}, ${prototype_template}, " + "kConstantValueTable, base::size(kConstantValueTable));") +- constant_value_entries = list(filter( +- lambda entry: not entry.const_callback_name, constant_entries)) ++ constant_value_entries = list([entry for entry in constant_entries if not entry.const_callback_name]) + install_properties(table_name, constant_value_entries, + _make_constant_value_registration_table, + installer_call_text) +@@ -5619,12 +5613,10 @@ + "${instance_template}, ${prototype_template}, " + "${interface_template}, ${signature}, " + "kOperationTable, base::size(kOperationTable));") +- entries = filter(lambda entry: not entry.no_alloc_direct_callback_name, +- operation_entries) ++ entries = [entry for entry in operation_entries if not entry.no_alloc_direct_callback_name] + install_properties(table_name, entries, _make_operation_registration_table, + installer_call_text) +- entries = filter(lambda entry: entry.no_alloc_direct_callback_name, +- operation_entries) ++ entries = [entry for entry in operation_entries if entry.no_alloc_direct_callback_name] + install_properties(table_name, entries, _make_operation_registration_table, + installer_call_text) + +@@ -5663,7 +5655,7 @@ + + def most_derived_interface(*interfaces): + key = lambda interface: len(interface.inclusive_inherited_interfaces) +- return sorted(filter(None, interfaces), key=key)[-1] ++ return sorted([_f for _f in interfaces if _f], key=key)[-1] + + cg_context = cg_context.make_copy( + v8_callback_type=CodeGenContext.V8_OTHER_CALLBACK) +@@ -5701,7 +5693,7 @@ + flags.append("v8::PropertyHandlerFlags::kHasNoSideEffect") + property_handler_flags = ( + "static_cast({})".format(" | ".join( +- map(lambda flag: "int32_t({})".format(flag), flags)))) ++ ["int32_t({})".format(flag) for flag in flags]))) + pattern = """\ + // Named interceptors + {{ +@@ -6339,7 +6331,7 @@ + + derived_interfaces = cg_context.interface.deriveds + derived_names = list( +- map(lambda interface: interface.identifier, derived_interfaces)) ++ [interface.identifier for interface in derived_interfaces]) + derived_names.append(cg_context.interface.identifier) + if not ("Window" in derived_names or "HTMLDocument" in derived_names): + return None, None +@@ -6414,10 +6406,8 @@ + collect_callbacks(cross_origin_property_callback_defs) + + entry_nodes = list( +- map( +- lambda name: TextNode("reinterpret_cast({}),".format(name +- )), +- filter(None, callback_names))) ++ [TextNode("reinterpret_cast({}),".format(name ++ )) for name in [_f for _f in callback_names if _f]]) + table_node = ListNode([ + TextNode("using namespace ${class_name}Callbacks;"), + TextNode("static const intptr_t kReferenceTable[] = {"), +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/mako_renderer.py 2025-01-14 21:29:17.874895440 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/mako_renderer.py 2025-01-16 02:26:08.544763668 +0800 +@@ -166,7 +166,7 @@ + """Returns the best-guessed name of |caller|.""" + try: + # Outer CodeNode may have a binding to the caller. +- for name, value in caller.outer.template_vars.items(): ++ for name, value in list(caller.outer.template_vars.items()): + if value is caller: + return name + try: +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/name_style.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/name_style.py 2025-01-16 02:26:08.544763668 +0800 +@@ -139,8 +139,8 @@ + assert callable(style_func) + assert isinstance(format_string, str) + +- args = map(style_func, map(_tokenize, args)) +- for key, value in kwargs.items(): ++ args = list(map(style_func, list(map(_tokenize, args)))) ++ for key, value in list(kwargs.items()): + kwargs[key] = style_func(_tokenize(value)) + return format_string.format(*args, **kwargs) + +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/path_manager.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/path_manager.py 2025-01-16 02:26:08.544763668 +0800 +@@ -58,7 +58,7 @@ + cls._root_gen_dir = os.path.abspath(root_gen_dir) + cls._component_reldirs = { + component: posixpath.normpath(rel_dir) +- for component, rel_dir in component_reldirs.items() ++ for component, rel_dir in list(component_reldirs.items()) + } + cls._is_initialized = True + +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/callback_interface.py 2025-01-14 21:29:17.877062067 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/callback_interface.py 2025-01-16 02:26:08.544763668 +0800 +@@ -93,9 +93,8 @@ + self._operation_groups = tuple([ + OperationGroup(operation_group_ir, + list( +- filter( +- lambda x: x.identifier == operation_group_ir +- .identifier, self._operations)), ++ [x for x in self._operations if x.identifier == operation_group_ir ++ .identifier]), + owner=self) + for operation_group_ir in ir.operation_groups + ]) +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/database.py 2025-01-14 21:29:17.877062067 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/database.py 2025-01-16 02:26:08.544763668 +0800 +@@ -53,12 +53,12 @@ + + def __init__(self): + self._defs = {} +- for kind in DatabaseBody.Kind.values(): ++ for kind in list(DatabaseBody.Kind.values()): + self._defs[kind] = {} + + def register(self, kind, user_defined_type): + assert isinstance(user_defined_type, (Typedef, Union, UserDefinedType)) +- assert kind in DatabaseBody.Kind.values() ++ assert kind in list(DatabaseBody.Kind.values()) + try: + self.find_by_identifier(user_defined_type.identifier) + assert False, user_defined_type.identifier +@@ -67,7 +67,7 @@ + self._defs[kind][user_defined_type.identifier] = user_defined_type + + def find_by_identifier(self, identifier): +- for defs_per_kind in self._defs.values(): ++ for defs_per_kind in list(self._defs.values()): + if identifier in defs_per_kind: + return defs_per_kind[identifier] + raise KeyError(identifier) +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/extended_attribute.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/extended_attribute.py 2025-01-16 02:26:08.544763668 +0800 +@@ -190,7 +190,7 @@ + def _on_ext_attrs_updated(self): + self._keys = tuple(sorted(self._ext_attrs.keys())) + self._length = 0 +- for ext_attrs in self._ext_attrs.values(): ++ for ext_attrs in list(self._ext_attrs.values()): + self._length += len(ext_attrs) + + @classmethod +@@ -206,7 +206,7 @@ + if not all(isinstance(x, cls) for x in (lhs, rhs)): + return False + +- if lhs.keys() != rhs.keys(): ++ if list(lhs.keys()) != list(rhs.keys()): + return False + if len(lhs) != len(rhs): + return False +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/file_io.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/file_io.py 2025-01-16 02:26:08.544763668 +0800 +@@ -6,7 +6,7 @@ + import sys + + if sys.version_info.major == 2: +- import cPickle as pickle # 'cPickle' is faster than 'pickle' on Py2 ++ import pickle as pickle # 'cPickle' is faster than 'pickle' on Py2 + else: + import pickle + +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/function_like.py 2025-01-14 21:29:17.878145380 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/function_like.py 2025-01-16 02:26:08.546930298 +0800 +@@ -72,8 +72,7 @@ + """Returns the number of required arguments.""" + return len( + list( +- filter(lambda arg: not (arg.is_optional or arg.is_variadic), +- self.arguments))) ++ [arg for arg in self.arguments if not (arg.is_optional or arg.is_variadic)])) + + + class OverloadGroup(WithIdentifier): +@@ -165,7 +164,7 @@ + Returns the minimum number of required arguments of overloaded + functions. + """ +- return min(map(lambda func: func.num_of_required_arguments, self)) ++ return min([func.num_of_required_arguments for func in self]) + + def effective_overload_set(self, argument_count=None): + """ +@@ -178,7 +177,7 @@ + S = [] + F = self + +- maxarg = max(map(lambda X: len(X.arguments), F)) ++ maxarg = max([len(X.arguments) for X in F]) + if N is None: + arg_sizes = [len(X.arguments) for X in F if not X.is_variadic] + N = 1 + (max(arg_sizes) if arg_sizes else 0) +@@ -188,20 +187,20 @@ + + S.append( + OverloadGroup.EffectiveOverloadItem( +- X, list(map(lambda arg: arg.idl_type, X.arguments)), +- list(map(lambda arg: arg.optionality, X.arguments)))) ++ X, list([arg.idl_type for arg in X.arguments]), ++ list([arg.optionality for arg in X.arguments]))) + + if X.is_variadic: + for i in range(n, max(maxarg, N)): +- t = list(map(lambda arg: arg.idl_type, X.arguments)) +- o = list(map(lambda arg: arg.optionality, X.arguments)) ++ t = list([arg.idl_type for arg in X.arguments]) ++ o = list([arg.optionality for arg in X.arguments]) + for _ in range(n, i + 1): + t.append(X.arguments[-1].idl_type) + o.append(X.arguments[-1].optionality) + S.append(OverloadGroup.EffectiveOverloadItem(X, t, o)) + +- t = list(map(lambda arg: arg.idl_type, X.arguments)) +- o = list(map(lambda arg: arg.optionality, X.arguments)) ++ t = list([arg.idl_type for arg in X.arguments]) ++ o = list([arg.optionality for arg in X.arguments]) + for i in range(n - 1, -1, -1): + if X.arguments[i].optionality == IdlType.Optionality.REQUIRED: + break +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/idl_compiler.py 2025-01-14 21:29:17.878145380 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/idl_compiler.py 2025-01-16 02:26:08.546930298 +0800 +@@ -310,7 +310,7 @@ + + self._ir_map.move_to_new_phase() + +- for identifier, old_dictionary in old_dictionaries.items(): ++ for identifier, old_dictionary in list(old_dictionaries.items()): + new_dictionary = make_copy(old_dictionary) + self._ir_map.add(new_dictionary) + for partial_dictionary in old_partial_dictionaries.get( +@@ -342,7 +342,7 @@ + ir_sets_to_merge = [(interface, [ + mixins[include.mixin_identifier] + for include in includes.get(identifier, []) +- ]) for identifier, interface in interfaces.items()] ++ ]) for identifier, interface in list(interfaces.items())] + + self._ir_map.move_to_new_phase() + +@@ -393,7 +393,7 @@ + + identifier_to_derived_set = {} + +- for old_interface in old_interfaces.values(): ++ for old_interface in list(old_interfaces.values()): + new_interface = make_copy(old_interface) + self._ir_map.add(new_interface) + inheritance_chain = create_inheritance_chain( +@@ -423,8 +423,7 @@ + derived_set = identifier_to_derived_set.get( + new_interface.identifier, set()) + new_interface.deriveds = list( +- map(lambda id_: self._ref_to_idl_def_factory.create(id_), +- sorted(derived_set))) ++ [self._ref_to_idl_def_factory.create(id_) for id_ in sorted(derived_set)]) + + def _supplement_missing_html_constructor_operation(self): + # Temporary mitigation of misuse of [HTMLConstructor] +@@ -553,8 +552,7 @@ + self._ir_map.add(new_ir) + + for group in new_ir.iter_all_overload_groups(): +- exposures = list(map(lambda overload: overload.exposure, +- group)) ++ exposures = list([overload.exposure for overload in group]) + + # [Exposed] + if any(not exposure.global_names_and_features +@@ -772,13 +770,13 @@ + + grouped_typedefs = {} # {unique key: list of typedefs to the union} + all_typedefs = self._db.find_by_kind(DatabaseBody.Kind.TYPEDEF) +- for typedef in all_typedefs.values(): ++ for typedef in list(all_typedefs.values()): + if not typedef.idl_type.is_union: + continue + key = unique_key(typedef.idl_type) + grouped_typedefs.setdefault(key, []).append(typedef) + +- for key, union_types in grouped_unions.items(): ++ for key, union_types in list(grouped_unions.items()): + self._db.register( + DatabaseBody.Kind.UNION, + Union( +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/idl_type.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/idl_type.py 2025-01-16 02:26:08.546930298 +0800 +@@ -265,14 +265,14 @@ + } + + value_counts = {None: 0, False: 0, True: 0} +- for value in switches.values(): ++ for value in list(switches.values()): + assert value is None or isinstance(value, bool) + value_counts[value] += 1 + assert value_counts[False] == 0 or value_counts[True] == 0, ( + "Specify only True or False arguments. Unspecified arguments are " + "automatically set to the opposite value.") + default = value_counts[True] == 0 +- for arg, value in switches.items(): ++ for arg, value in list(switches.items()): + if value is None: + switches[arg] = default + +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/idl_type_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/idl_type_test.py 2025-01-16 02:26:08.546930298 +0800 +@@ -83,7 +83,7 @@ + 'void': 'Void', + 'symbol': 'Symbol', + } +- for name, expect in type_names.items(): ++ for name, expect in list(type_names.items()): + self.assertEqual(expect, factory.simple_type(name).type_name) + + short_type = factory.simple_type('short') +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/interface.py 2025-01-14 21:29:17.878145380 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/interface.py 2025-01-16 02:26:08.546930298 +0800 +@@ -181,8 +181,7 @@ + ConstructorGroup( + group_ir, + list( +- filter(lambda x: x.identifier == group_ir.identifier, +- self._constructors)), ++ [x for x in self._constructors if x.identifier == group_ir.identifier]), + owner=self) for group_ir in ir.constructor_groups + ]) + assert len(self._constructor_groups) <= 1 +@@ -194,8 +193,7 @@ + ConstructorGroup( + group_ir, + list( +- filter(lambda x: x.identifier == group_ir.identifier, +- self._named_constructors)), ++ [x for x in self._named_constructors if x.identifier == group_ir.identifier]), + owner=self) for group_ir in ir.named_constructor_groups + ]) + self._operations = tuple([ +@@ -206,22 +204,20 @@ + OperationGroup( + group_ir, + list( +- filter(lambda x: x.identifier == group_ir.identifier, +- self._operations)), ++ [x for x in self._operations if x.identifier == group_ir.identifier]), + owner=self) for group_ir in ir.operation_groups + ]) + self._exposed_constructs = tuple(ir.exposed_constructs) + self._legacy_window_aliases = tuple(ir.legacy_window_aliases) + self._indexed_and_named_properties = None + indexed_and_named_property_operations = list( +- filter(lambda x: x.is_indexed_or_named_property_operation, +- self._operations)) ++ [x for x in self._operations if x.is_indexed_or_named_property_operation]) + if indexed_and_named_property_operations: + self._indexed_and_named_properties = IndexedAndNamedProperties( + indexed_and_named_property_operations, owner=self) + self._stringifier = None + stringifier_operation_irs = list( +- filter(lambda x: x.is_stringifier, ir.operations)) ++ [x for x in ir.operations if x.is_stringifier]) + if stringifier_operation_irs: + assert len(stringifier_operation_irs) == 1 + op_ir = make_copy(stringifier_operation_irs[0]) +@@ -235,8 +231,7 @@ + if operation.stringifier_attribute: + attr_id = operation.stringifier_attribute + attributes = list( +- filter(lambda x: x.identifier == attr_id, +- self._attributes)) ++ [x for x in self._attributes if x.identifier == attr_id]) + assert len(attributes) == 1 + attribute = attributes[0] + self._stringifier = Stringifier(operation, attribute, owner=self) +@@ -258,7 +253,7 @@ + @property + def deriveds(self): + """Returns the list of the derived interfaces.""" +- return tuple(map(lambda ref: ref.target_object, self._deriveds)) ++ return tuple([ref.target_object for ref in self._deriveds]) + + @property + def inclusive_inherited_interfaces(self): +@@ -350,7 +345,7 @@ + Returns a list of the constructs that are exposed on this global object. + """ + return tuple( +- map(lambda ref: ref.target_object, self._exposed_constructs)) ++ [ref.target_object for ref in self._exposed_constructs]) + + @property + def legacy_window_aliases(self): +@@ -583,8 +578,7 @@ + OperationGroup( + group_ir, + list( +- filter(lambda x: x.identifier == group_ir.identifier, +- self._operations)), ++ [x for x in self._operations if x.identifier == group_ir.identifier]), + owner=owner) for group_ir in ir.operation_groups + ]) + +@@ -672,8 +666,7 @@ + OperationGroup( + group_ir, + list( +- filter(lambda x: x.identifier == group_ir.identifier, +- self._operations)), ++ [x for x in self._operations if x.identifier == group_ir.identifier]), + owner=owner) for group_ir in ir.operation_groups + ]) + +@@ -762,8 +755,7 @@ + OperationGroup( + group_ir, + list( +- filter(lambda x: x.identifier == group_ir.identifier, +- self._operations)), ++ [x for x in self._operations if x.identifier == group_ir.identifier]), + owner=owner) for group_ir in ir.operation_groups + ]) + +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/ir_builder.py 2025-01-14 21:29:17.878145380 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/ir_builder.py 2025-01-16 02:26:08.546930298 +0800 +@@ -127,7 +127,7 @@ + for child in child_nodes + ] + if stringifier_members: +- members.extend(filter(None, stringifier_members)) ++ members.extend([_f for _f in stringifier_members if _f]) + attributes = [] + constants = [] + constructors = [] +@@ -494,8 +494,8 @@ + assert node.GetClass() == 'ExtAttributes' + return ExtendedAttributes( + list( +- filter(None, map(build_extended_attribute, +- node.GetChildren())))) ++ [_f for _f in map(build_extended_attribute, ++ node.GetChildren()) if _f])) + + def _build_inheritance(self, node): + assert node.GetClass() == 'Inherit' +@@ -557,7 +557,7 @@ + elif type_token == 'integer': + idl_type = factory.simple_type(name='long', debug_info=debug_info) + assert isinstance(value_token, str) +- value = long(value_token, base=0) ++ value = int(value_token, base=0) + literal = value_token + elif type_token == 'float': + idl_type = factory.simple_type( +@@ -1002,7 +1002,7 @@ + + return ExtendedAttributes([ + ExtendedAttribute(key=key, values=values) +- for key, values in key_values.items() ++ for key, values in list(key_values.items()) + ]) + + def _create_iterator_operations(self, node): +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/ir_map.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/ir_map.py 2025-01-16 02:26:08.546930298 +0800 +@@ -168,7 +168,7 @@ + """ + assert isinstance(identifier, Identifier) + for irs_per_phase in self._single_value_irs[self._current_phase::-1]: +- for irs_per_kind in irs_per_phase.values(): ++ for irs_per_kind in list(irs_per_phase.values()): + if identifier in irs_per_kind: + return irs_per_kind[identifier] + raise KeyError(identifier) +@@ -190,7 +190,7 @@ + """Returns a flattened list of IRs of the given kind.""" + if IRMap.IR.Kind.does_support_multiple_defs(kind): + accumulated = [] +- for irs in self.find_by_kind(kind).values(): ++ for irs in list(self.find_by_kind(kind).values()): + accumulated.extend(irs) + return accumulated + else: +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/make_copy.py 2025-01-14 21:29:17.879228694 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/make_copy.py 2025-01-16 02:26:08.546930298 +0800 +@@ -8,7 +8,7 @@ + # TODO: Remove this once Python2 is obsoleted. + if sys.version_info.major != 2: + long = int +- basestring = str ++ str = str + + def make_copy(obj, memo=None): + """ +@@ -23,7 +23,7 @@ + memo = dict() + + if (obj is None +- or isinstance(obj, (bool, int, long, float, complex, basestring))): ++ or isinstance(obj, (bool, int, float, complex, str))): + # Do not make a copy if the object is of an immutable primitive type + # (or its subclass). + # +@@ -50,16 +50,16 @@ + cls = type(obj) + + if isinstance(obj, (list, tuple, set, frozenset)): +- return memoize(cls(map(lambda x: make_copy(x, memo), obj))) ++ return memoize(cls([make_copy(x, memo) for x in obj])) + + if isinstance(obj, dict): + return memoize( + cls([(make_copy(key, memo), make_copy(value, memo)) +- for key, value in obj.items()])) ++ for key, value in list(obj.items())])) + + if hasattr(obj, '__dict__'): + copy = memoize(cls.__new__(cls)) +- for name, value in obj.__dict__.items(): ++ for name, value in list(obj.__dict__.items()): + setattr(copy, name, make_copy(value, memo)) + return copy + +--- a/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/namespace.py 2025-01-14 21:29:17.879228694 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/bindings/scripts/web_idl/namespace.py 2025-01-16 02:26:08.546930298 +0800 +@@ -109,9 +109,8 @@ + self._operation_groups = tuple([ + OperationGroup(operation_group_ir, + list( +- filter( +- lambda x: x.identifier == operation_group_ir +- .identifier, self._operations)), ++ [x for x in self._operations if x.identifier == operation_group_ir ++ .identifier]), + owner=self) + for operation_group_ir in ir.operation_groups + ]) +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/aria_properties.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/aria_properties.py 2025-01-16 02:26:08.546930298 +0800 +@@ -21,4 +21,4 @@ + self._data = json5.loads(json5_file.read()) + + def attributes_list(self): +- return {'data': [item[u'name'] for item in self._data['attributes']]} ++ return {'data': [item['name'] for item in self._data['attributes']]} +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/hasher.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/hasher.py 2025-01-16 02:26:08.546930298 +0800 +@@ -28,16 +28,16 @@ + + class uint32_t(long): + def __rshift__(self, other): +- return uint32_t(long.__rshift__(self, other) & ((1 << 32) - 1)) ++ return uint32_t(int.__rshift__(self, other) & ((1 << 32) - 1)) + + def __lshift__(self, other): +- return uint32_t(long.__lshift__(self, other) & ((1 << 32) - 1)) ++ return uint32_t(int.__lshift__(self, other) & ((1 << 32) - 1)) + + def __add__(self, other): +- return uint32_t(long.__add__(self, other) & ((1 << 32) - 1)) ++ return uint32_t(int.__add__(self, other) & ((1 << 32) - 1)) + + def __xor__(self, other): +- return uint32_t(long.__xor__(self, other) & ((1 << 32) - 1)) ++ return uint32_t(int.__xor__(self, other) & ((1 << 32) - 1)) + + + def hash(string): +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/in_file.py 2025-01-14 21:29:17.880312008 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/in_file.py 2025-01-16 02:26:08.546930298 +0800 +@@ -26,7 +26,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-from __future__ import print_function ++ + + import copy + import os +@@ -143,7 +143,7 @@ + if not name in self.parameters: + self._fatal( + "Unknown parameter: '%s' in line:\n%s\nKnown parameters: %s" % +- (name, line, self.parameters.keys())) ++ (name, line, list(self.parameters.keys()))) + self.parameters[name] = value + + def _parse_line(self, line): +@@ -163,7 +163,7 @@ + if arg_name not in self._defaults: + self._fatal( + "Unknown argument: '%s' in line:\n%s\nKnown arguments: %s" +- % (arg_name, line, self._defaults.keys())) ++ % (arg_name, line, list(self._defaults.keys()))) + valid_values = self._valid_values.get(arg_name) + if valid_values and arg_value not in valid_values: + self._fatal( +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/in_file_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/in_file_unittest.py 2025-01-16 02:26:08.546930298 +0800 +@@ -56,7 +56,7 @@ + 'arg2': [] + }, + ] +- self.assertEquals(in_file.name_dictionaries, expected_values) ++ self.assertEqual(in_file.name_dictionaries, expected_values) + + def test_with_parameters(self): + contents = """namespace=TestNamespace +@@ -82,7 +82,7 @@ + 'namespace': 'TestNamespace', + 'fruit': True, + } +- self.assertEquals(in_file.parameters, expected_parameters) ++ self.assertEqual(in_file.parameters, expected_parameters) + + def test_assertion_for_non_in_files(self): + in_files = ['some_sample_file.json'] +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/in_generator.py 2025-01-14 21:29:17.880312008 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/in_generator.py 2025-01-16 02:26:08.546930298 +0800 +@@ -26,7 +26,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-from __future__ import print_function ++ + + import os + import os.path +@@ -39,7 +39,7 @@ + + # TODO: Remove this once Python2 is obsoleted. + if sys.version_info.major != 2: +- basestring = str ++ str = str + + + ######################################################### +@@ -66,7 +66,7 @@ + output_file.write(contents) + + def write_files(self, output_dir): +- for file_name, generator in self._outputs.items(): ++ for file_name, generator in list(self._outputs.items()): + self._write_file_if_changed(output_dir, generator(), file_name) + + def set_gperf_path(self, gperf_path): +@@ -83,7 +83,7 @@ + def __init__(self, in_files): + super(Writer, self).__init__(in_files) + +- if isinstance(in_files, basestring): ++ if isinstance(in_files, str): + in_files = [in_files] + if in_files: + self.in_file = InFile.load_from_files(in_files, self.defaults, +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/json5_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/json5_generator.py 2025-01-16 02:26:08.546930298 +0800 +@@ -103,7 +103,7 @@ + assert valid_keys, "'valid_keys' must be declared when using a dict value" + return all([(key in valid_keys or key == "default") + and (val in valid_values or val == "") +- for key, val in value.items()]) ++ for key, val in list(value.items())]) + else: + return value in valid_values + +@@ -142,7 +142,7 @@ + + def _process(self, doc): + # Process optional metadata map entries. +- for key, value in doc.get("metadata", {}).items(): ++ for key, value in list(doc.get("metadata", {}).items()): + self._process_metadata(key, value) + # Get optional parameters map, and get the default value map from it. + self.parameters.update(doc.get("parameters", {})) +@@ -155,7 +155,7 @@ + entry = self._get_entry(item) + self.name_dictionaries.append(entry) + else: +- for key, value in items.items(): ++ for key, value in list(items.items()): + value["name"] = key + entry = self._get_entry(value) + self.name_dictionaries.append(entry) +@@ -164,11 +164,11 @@ + def _process_metadata(self, key, value): + if key not in self.metadata: + raise Exception("Unknown metadata: '%s'\nKnown metadata: %s" % +- (key, self.metadata.keys())) ++ (key, list(self.metadata.keys()))) + self.metadata[key] = value + + def _get_defaults(self): +- for key, value in self.parameters.items(): ++ for key, value in list(self.parameters.items()): + if value and "default" in value: + self._defaults[key] = value["default"] + else: +@@ -188,10 +188,10 @@ + "The parameter 'name' is reserved, use a different name." + entry["name"] = NameStyleConverter(item.pop("name")) + # Validate parameters if it's specified. +- for key, value in item.items(): ++ for key, value in list(item.items()): + if key not in self.parameters: + raise Exception("Unknown parameter: '%s'\nKnown params: %s" % +- (key, self.parameters.keys())) ++ (key, list(self.parameters.keys()))) + assert self.parameters[key] is not None, \ + "Specification for parameter 'key' cannot be None. Use {} instead." + self._validate_parameter(self.parameters[key], value) +@@ -300,7 +300,7 @@ + output_file.write(contents) + + def write_files(self, output_dir): +- for file_name, generator in self._outputs.items(): ++ for file_name, generator in list(self._outputs.items()): + self._write_file_if_changed(output_dir, generator(), file_name) + + def cleanup_files(self, output_dir): +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/keyword_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/keyword_utils.py 2025-01-16 02:26:08.546930298 +0800 +@@ -23,7 +23,7 @@ + default_parameters=json5_file_parameters).name_dictionaries + css_values_dictionary = [x['name'].original for x in css_values_dictionary] + name_to_position_dictionary = dict( +- zip(css_values_dictionary, range(len(css_values_dictionary)))) ++ list(zip(css_values_dictionary, list(range(len(css_values_dictionary)))))) + for css_property in css_properties: + if css_property['field_template'] == 'keyword' and len( + css_property['include_paths']) == 0: +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/make_event_factory.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/make_event_factory.py 2025-01-16 02:26:08.546930298 +0800 +@@ -27,7 +27,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-from __future__ import print_function ++ + + import os.path + import sys +@@ -122,7 +122,7 @@ + 'third_party/blink/renderer/platform/instrumentation/use_counter.h', + 'third_party/blink/renderer/platform/runtime_enabled_features.h', + } +- includes.update(map(self._headers_header_include_path, entries)) ++ includes.update(list(map(self._headers_header_include_path, entries))) + return sorted([x for x in includes if x]) + + @template_expander.use_jinja( +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/make_instrumenting_probes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/make_instrumenting_probes.py 2025-01-16 02:26:08.546930298 +0800 +@@ -151,8 +151,8 @@ + # Splitting parameters by a comma, assuming that attribute + # lists contain no more than one attribute. + self.params = list( +- map(Parameter, map(str.strip, +- match.group(3).split(",")))) ++ map(Parameter, list(map(str.strip, ++ match.group(3).split(","))))) + + + class Parameter(object): +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/make_origin_trials.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/make_origin_trials.py 2025-01-16 02:26:08.546930298 +0800 +@@ -48,7 +48,7 @@ + self._implied_mappings = self._make_implied_mappings() + self._trial_to_features_map = self._make_trial_to_features_map() + self._max_features_per_trial = max( +- len(features) for features in self._trial_to_features_map.values()) ++ len(features) for features in list(self._trial_to_features_map.values())) + self._set_trial_types() + + @property +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/make_runtime_features.py 2025-01-14 21:29:17.880312008 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/make_runtime_features.py 2025-01-16 02:26:08.546930298 +0800 +@@ -32,7 +32,7 @@ + import sys + + if sys.version_info.major == 2: +- import cPickle as pickle ++ import pickle as pickle + else: + import pickle + +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/make_runtime_features_utilities_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/make_runtime_features_utilities_unittest.py 2025-01-16 02:26:08.546930298 +0800 +@@ -23,7 +23,7 @@ + class MakeRuntimeFeaturesUtilitiesTest(unittest.TestCase): + def test_cycle(self): + # Cycle: 'c' => 'd' => 'e' => 'c' +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + AssertionError, 'Cycle found in depends_on/implied_by graph'): + util.origin_trials([ + _feature('a', depends_on=['b']), +@@ -34,15 +34,15 @@ + ]) + + def test_bad_dependency(self): +- with self.assertRaisesRegexp(AssertionError, ++ with self.assertRaisesRegex(AssertionError, + 'a: Depends on non-existent-feature: x'): + util.origin_trials([_feature('a', depends_on=['x'])]) + + def test_bad_implication(self): +- with self.assertRaisesRegexp(AssertionError, ++ with self.assertRaisesRegex(AssertionError, + 'a: Implied by non-existent-feature: x'): + util.origin_trials([_feature('a', implied_by=['x'])]) +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + AssertionError, + 'a: A feature must be in origin trial if implied by an origin trial feature: b' + ): +@@ -52,7 +52,7 @@ + ]) + + def test_both_dependency_and_implication(self): +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + AssertionError, + 'c: Only one of implied_by and depends_on is allowed'): + util.origin_trials([ +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/rule_bison.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/rule_bison.py 2025-01-16 02:26:08.546930298 +0800 +@@ -47,8 +47,8 @@ + + + def modify_file(path, prefix_lines, suffix_lines, replace_list=[]): +- prefix_lines = map(lambda s: s + '\n', prefix_lines) +- suffix_lines = map(lambda s: s + '\n', suffix_lines) ++ prefix_lines = [s + '\n' for s in prefix_lines] ++ suffix_lines = [s + '\n' for s in suffix_lines] + with open(path, 'r') as f: + old_lines = f.readlines() + for i in range(len(old_lines)): +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/trie_builder.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/trie_builder.py 2025-01-16 02:26:08.546930298 +0800 +@@ -30,7 +30,7 @@ + dicts_by_indexed_letter[string[index]].append((string, value)) + + output = {} +- for char, d in dicts_by_indexed_letter.items(): ++ for char, d in list(dicts_by_indexed_letter.items()): + if len(d) == 1: + string = d[0][0] + value = d[0][1] +@@ -47,11 +47,11 @@ + All strings should be all lower case. + """ + dicts_by_length = defaultdict(list) +- for string, value in str_to_return_value_dict.items(): ++ for string, value in list(str_to_return_value_dict.items()): + dicts_by_length[len(string)].append((string, value)) + + output = [] +- for length, pairs in dicts_by_length.items(): ++ for length, pairs in list(dicts_by_length.items()): + output.append((length, _single_trie(sorted(pairs), 0))) + + return output +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/update_css_ranking.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/update_css_ranking.py 2025-01-16 02:26:08.548013613 +0800 +@@ -7,7 +7,7 @@ + # Run `python update_css_ranking.py ` + # to update the ranking from API to + +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + import json + import sys + import cluster +@@ -36,7 +36,7 @@ + css_ranking_api: url to CSS ranking api + + """ +- css_ranking = json.loads(urllib2.urlopen(css_ranking_api).read()) ++ css_ranking = json.loads(urllib.request.urlopen(css_ranking_api).read()) + css_ranking_content = {"properties": {}, "data": []} + css_ranking_content["data"] = [ + property_["property_name"] for property_ in sorted( +@@ -90,16 +90,16 @@ + + """ + css_ranking = sorted( +- json.loads(urllib2.urlopen(css_ranking_api).read()), ++ json.loads(urllib.request.urlopen(css_ranking_api).read()), + key=lambda x: -x["day_percentage"]) + total_css_properties = len(css_ranking) + css_ranking_dictionary = dict( + [(x["property_name"], x["day_percentage"] * 100) for x in css_ranking]) + css_ranking_cdf = dict( +- zip([x["property_name"] for x in css_ranking], [ ++ list(zip([x["property_name"] for x in css_ranking], [ + float(i) / total_css_properties + for i in range(total_css_properties) +- ])) ++ ]))) + css_properties = json5_generator.Json5File.load_from_files( + [CSS_PROPERTIES]).name_dictionaries + +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/blinkbuild/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/blinkbuild/PRESUBMIT.py 2025-01-16 02:26:08.548013613 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + + def _RunBindingsTests(input_api, output_api): +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/blinkbuild/name_style_converter_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/blinkbuild/name_style_converter_test.py 2025-01-16 02:26:08.548013613 +0800 +@@ -7,8 +7,8 @@ + + import unittest + +-from name_style_converter import NameStyleConverter +-from name_style_converter import tokenize_name ++from .name_style_converter import NameStyleConverter ++from .name_style_converter import tokenize_name + + + class SmartTokenizerTest(unittest.TestCase): +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/make_css_property_names.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/make_css_property_names.py 2025-01-16 02:26:08.548013613 +0800 +@@ -52,7 +52,7 @@ + 'property_id_bit_length': + self._css_properties.property_id_bit_length, + 'max_name_length': +- max(map(len, self._css_properties.properties_by_id)), ++ max(list(map(len, self._css_properties.properties_by_id))), + } + + @gperf.use_jinja_gperf_template( +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/make_css_value_id_mappings.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/make_css_value_id_mappings.py 2025-01-16 02:26:08.548013613 +0800 +@@ -58,7 +58,7 @@ + longest_segment: the start and end indices of the longest segment + + """ +- segment_list = zip(segments[:-1], segments[1:]) ++ segment_list = list(zip(segments[:-1], segments[1:])) + return max(segment_list, key=lambda x: x[1] - x[0]) + + +@@ -85,11 +85,11 @@ + Build the switch case statements of other enums not in the + segment. Enums in the segment will be computed in default clause. + """ +- property_enum_order = range(len(property_['keywords'])) ++ property_enum_order = list(range(len(property_['keywords']))) + css_enum_order = [ + name_to_position_dictionary[x] for x in property_['keywords'] + ] +- enum_pair_list = zip(css_enum_order, property_enum_order) ++ enum_pair_list = list(zip(css_enum_order, property_enum_order)) + enum_segment, enum_pair_list = _find_continuous_segment(enum_pair_list) + longest_segment = _find_largest_segment(enum_segment) + +@@ -128,8 +128,8 @@ + [self.css_values_dictionary_file], + default_parameters=self.default_parameters).name_dictionaries + name_to_position_dictionary = dict( +- zip([x['name'].original for x in css_values_dictionary], +- range(len(css_values_dictionary)))) ++ list(zip([x['name'].original for x in css_values_dictionary], ++ list(range(len(css_values_dictionary)))))) + + for property_ in self.css_properties.properties_including_aliases: + include_paths.update(property_['include_paths']) +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/make_cssom_types.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/make_cssom_types.py 2025-01-16 02:26:08.548013613 +0800 +@@ -31,8 +31,8 @@ + property_['typedom_types'] = types + + # Generate CSSValueID values from keywords. +- property_['keywordIDs'] = map(enum_key_for_css_keyword, +- property_['keywords']) ++ property_['keywordIDs'] = list(map(enum_key_for_css_keyword, ++ property_['keywords'])) + + self._outputs = { + 'cssom_types.cc': self.generate_types, +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/make_style_shorthands.py 2025-01-14 21:29:17.879228694 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/css/make_style_shorthands.py 2025-01-16 02:26:08.548013613 +0800 +@@ -88,8 +88,7 @@ + def create_expansions(longhands): + flags = collect_runtime_flags(longhands) + expansions = list( +- map(lambda mask: Expansion(longhands, flags, mask), +- range(1 << len(flags)))) ++ [Expansion(longhands, flags, mask) for mask in range(1 << len(flags))]) + assert len(expansions) > 0 + # We generate 2^N expansions for N flags, so enforce some limit. + assert len(flags) <= 4, 'Too many runtime flags for a single shorthand' +@@ -121,13 +120,12 @@ + map(id_for_css_property, property_['longhands'])) + + longhands = list( +- map(lambda name: json5_properties.properties_by_name[name], +- property_['longhands'])) ++ [json5_properties.properties_by_name[name] for name in property_['longhands']]) + property_['expansions'] = create_expansions(longhands) + for longhand_enum_key in property_['longhand_enum_keys']: + self._longhand_dictionary[longhand_enum_key].append(property_) + +- for longhands in self._longhand_dictionary.values(): ++ for longhands in list(self._longhand_dictionary.values()): + # Sort first by number of longhands in decreasing order, then + # alphabetically + longhands.sort( +--- a/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/style/make_computed_style_base.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/build/scripts/core/style/make_computed_style_base.py 2025-01-16 02:26:08.548013613 +0800 +@@ -135,7 +135,7 @@ + fields_in_current_group = group_dict.pop(None) + subgroups = [ + _dict_to_group(subgroup_name, subgroup_dict) +- for subgroup_name, subgroup_dict in group_dict.items() ++ for subgroup_name, subgroup_dict in list(group_dict.items()) + ] + return Group(name, subgroups, _reorder_fields(fields_in_current_group)) + +@@ -240,7 +240,7 @@ + enums[enum.type_name] = enum + + # Return the enums sorted by type name +- return list(sorted(enums.values(), key=lambda e: e.type_name)) ++ return list(sorted(list(enums.values()), key=lambda e: e.type_name)) + + + def _create_property_field(property_): +@@ -412,11 +412,11 @@ + popularity in the ranking. + """ + return dict( +- zip(properties_ranking, [ ++ list(zip(properties_ranking, [ + bisect.bisect_left(partition_rule, + float(i) / len(properties_ranking)) + 1 + for i in range(len(properties_ranking)) +- ])) ++ ]))) + + + def _best_rank(prop, ranking_map): +--- a/src/3rdparty/chromium/third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/PRESUBMIT.py 2025-01-16 02:26:08.548013613 +0800 +@@ -11,7 +11,7 @@ + test_cmd = input_api.Command( + name=cmd_name, cmd=cmd, kwargs={}, message=output_api.PresubmitError) + if input_api.verbose: +- print 'Running ' + cmd_name ++ print('Running ' + cmd_name) + return input_api.RunTests([test_cmd]) + + +--- a/src/3rdparty/chromium/third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/constraints.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/constraints.py 2025-01-16 02:26:08.548013613 +0800 +@@ -64,7 +64,7 @@ + + + def _ToJsStr(s): +- return u'\'{}\''.format(s) ++ return '\'{}\''.format(s) + + + def _get_random_number(): +@@ -84,7 +84,7 @@ + try: + fuzzed_string = fuzzed_string.decode('utf8') + except UnicodeDecodeError: +- print 'Can\'t decode fuzzed string. Trying again.' ++ print('Can\'t decode fuzzed string. Trying again.') + else: + # Escape 'escape' characters. + fuzzed_string = fuzzed_string.replace('\\', r'\\') +@@ -101,7 +101,7 @@ + exp_max_value = math.log(max_value, 2) + return '[{}]'.format(', '.join( + str(utils.UniformExpoInteger(0, exp_max_value)) +- for _ in xrange(length))) ++ for _ in range(length))) + + + def _get_typed_array(): +@@ -359,8 +359,8 @@ + + def get_get_primary_services_call(): + call = random.choice([ +- u'getPrimaryService({service_uuid})', +- u'getPrimaryServices({optional_service_uuid})' ++ 'getPrimaryService({service_uuid})', ++ 'getPrimaryServices({optional_service_uuid})' + ]) + + return call.format( +@@ -370,8 +370,8 @@ + + def get_characteristics_call(): + call = random.choice([ +- u'getCharacteristic({characteristic_uuid})', +- u'getCharacteristics({optional_characteristic_uuid})' ++ 'getCharacteristic({characteristic_uuid})', ++ 'getCharacteristics({optional_characteristic_uuid})' + ]) + + return call.format( +@@ -389,7 +389,7 @@ + ' service = Array.isArray(services)'\ + ' ? services[{} % services.length]'\ + ' : services' +- return string.format(random.randint(0, sys.maxint)) ++ return string.format(random.randint(0, sys.maxsize)) + + + def get_pick_a_characteristic(): +@@ -401,7 +401,7 @@ + ' characteristic = Array.isArray(characteristics)'\ + ' ? characteristics[{} % characteristics.length]'\ + ' : characteristics' +- return string.format(random.randint(0, sys.maxint)) ++ return string.format(random.randint(0, sys.maxsize)) + + + def get_reload_id(): +--- a/src/3rdparty/chromium/third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/fuzz_integration_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/fuzz_integration_test.py 2025-01-16 02:26:08.548013613 +0800 +@@ -33,7 +33,7 @@ + + written_files = glob.glob(os.path.join(self._output_dir, '*.html')) + +- self.assertEquals(100, len(written_files), 'Should have written 100 ' ++ self.assertEqual(100, len(written_files), 'Should have written 100 ' + 'test files.') + + for test_case in written_files: +--- a/src/3rdparty/chromium/third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/fuzz_main_run.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/fuzz_main_run.py 2025-01-16 02:26:08.548013613 +0800 +@@ -20,12 +20,12 @@ + import parameter_fuzzer + import test_case_fuzzer + +-JS_FILES_AND_PARAMETERS = ((u'testharness.js', u'INCLUDE_TESTHARNESS'), +- (u'testharnessreport.js', +- u'INCLUDE_REPORT'), (u'bluetooth-test.js', +- u'INCLUDE_BLUETOOTH_TEST'), +- (u'bluetooth-fake-devices.js', +- u'INCLUDE_BLUETOOTH_FAKE_DEVICES')) ++JS_FILES_AND_PARAMETERS = (('testharness.js', 'INCLUDE_TESTHARNESS'), ++ ('testharnessreport.js', ++ 'INCLUDE_REPORT'), ('bluetooth-test.js', ++ 'INCLUDE_BLUETOOTH_TEST'), ++ ('bluetooth-fake-devices.js', ++ 'INCLUDE_BLUETOOTH_FAKE_DEVICES')) + + SCRIPT_PREFIX = '\n' +@@ -83,7 +83,7 @@ + Returns: + A string containing the test case. + """ +- print 'Generating test file based on {}'.format(template_path) ++ print('Generating test file based on {}'.format(template_path)) + + # Read the template. + template_file_handle = open(template_path) +@@ -128,8 +128,8 @@ + prefix=test_file_prefix, suffix='.html', dir=output_dir) + + with os.fdopen(file_descriptor, 'wb') as output: +- print 'Writing {} bytes to \'{}\''.format( +- len(test_file_data), file_path) ++ print('Writing {} bytes to \'{}\''.format( ++ len(test_file_data), file_path)) + output.write(test_file_data) + + return file_path +@@ -138,10 +138,10 @@ + def main(): + args = _GetArguments() + +- print 'Generating {} test file(s).'.format(args.no_of_files) +- print 'Writing test files to: \'{}\''.format(args.output_dir) ++ print('Generating {} test file(s).'.format(args.no_of_files)) ++ print('Writing test files to: \'{}\''.format(args.output_dir)) + if args.input_dir: +- print 'Reading data bundle from: \'{}\''.format(args.input_dir) ++ print('Reading data bundle from: \'{}\''.format(args.input_dir)) + + # Get Templates + current_path = os.path.dirname(os.path.realpath(__file__)) +@@ -165,8 +165,8 @@ + args.output_dir) + + if args.content_shell_dir: +- print '{} --run-web-tests {}'.format(args.content_shell_dir, +- test_file_path) ++ print('{} --run-web-tests {}'.format(args.content_shell_dir, ++ test_file_path)) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/setup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/setup.py 2025-01-16 02:26:08.548013613 +0800 +@@ -59,7 +59,7 @@ + + # Copy necessary files. + for r in RESOURCES: +- print('Copying: ' + os.path.abspath(os.path.join(current_path, r))) ++ print(('Copying: ' + os.path.abspath(os.path.join(current_path, r)))) + shutil.copy(os.path.join(current_path, r), resources_path) + + return resources_path +@@ -112,7 +112,7 @@ + format='bztar', + root_dir=os.path.join(current_path, os.pardir), + base_dir='clusterfuzz') +- print('File written to: ' + compressed_file_path + '.tar.bz2') ++ print(('File written to: ' + compressed_file_path + '.tar.bz2')) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/test_case_fuzzer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/renderer/modules/bluetooth/testing/clusterfuzz/test_case_fuzzer.py 2025-01-16 02:26:08.548013613 +0800 +@@ -164,7 +164,7 @@ + """ + result = random.choice(BASE_TOKENS) + +- for _ in xrange(random.randint(1, MAX_NUM_OF_TOKENS)): ++ for _ in range(random.randint(1, MAX_NUM_OF_TOKENS)): + # Get random token. + token = random.choice(TOKENS) + +--- a/src/3rdparty/chromium/third_party/blink/tools/extract_expectation_names.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/extract_expectation_names.py 2025-01-16 02:26:08.548013613 +0800 +@@ -26,4 +26,4 @@ + parser = TaggedTestListParser(f.read()) + for test_expectation in parser.expectations: + if test_expectation.test: +- print test_expectation.test ++ print(test_expectation.test) +--- a/src/3rdparty/chromium/third_party/blink/tools/plan_blink_move.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/plan_blink_move.py 2025-01-16 02:26:08.548013613 +0800 +@@ -85,10 +85,10 @@ + def main(): + fs = FileSystem() + file_pairs = plan_blink_move(fs, sys.argv[1:]) +- print 'Show renaming plan. It contains files not in the repository.' +- print ' => ' ++ print('Show renaming plan. It contains files not in the repository.') ++ print(' => ') + for pair in file_pairs: +- print '%s\t=>\t%s' % pair ++ print('%s\t=>\t%s' % pair) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/blink/tools/print_stale_test_expectations_entries.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/print_stale_test_expectations_entries.py 2025-01-16 02:26:08.548013613 +0800 +@@ -33,9 +33,9 @@ + import datetime + import json + import optparse +-import StringIO ++import io + import sys +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + from blinkpy.common.host import Host + from blinkpy.web_tests.models.test_expectations import TestExpectationParser +@@ -69,7 +69,7 @@ + port = self.host.port_factory.get() + expectations = port.expectations_dict() + parser = TestExpectationParser(port, all_tests=(), is_lint_mode=False) +- expectations_file, expectations_contents = expectations.items()[0] ++ expectations_file, expectations_contents = list(expectations.items())[0] + expectation_lines = parser.parse(expectations_file, + expectations_contents) + csv_rows = [] +@@ -81,7 +81,7 @@ + self.write_csv(csv_rows) + + def write_csv(self, rows): +- out = StringIO.StringIO() ++ out = io.StringIO() + writer = csv.writer(out) + writer.writerow(CSV_ROW_HEADERS) + for row in rows: +@@ -105,22 +105,22 @@ + self.populate_bug_info(bug_link, test_name) + # Return the stale bug's information. + if all(self.is_stale(bug_link) for bug_link in bug_links): +- print line.original_string.strip() ++ print(line.original_string.strip()) + return [ + bug_links[0], self.bug_info[bug_links[0]].filename, + self.bug_info[bug_links[0]].days_since_last_update, + self.bug_info[bug_links[0]].owner, + self.bug_info[bug_links[0]].status + ] +- except urllib2.HTTPError as error: ++ except urllib.error.HTTPError as error: + if error.code == 404: + message = 'got 404, bug does not exist.' + elif error.code == 403: + message = 'got 403, not accessible. Not able to tell if it\'s stale.' + else: + message = str(error) +- print >> sys.stderr, 'Error when checking %s: %s' % ( +- ','.join(bug_links), message) ++ print('Error when checking %s: %s' % ( ++ ','.join(bug_links), message), file=sys.stderr) + return None + + def populate_bug_info(self, bug_link, test_name): +@@ -129,13 +129,13 @@ + # In case there's an error in the request, don't make the same request again. + bug_number = bug_link.strip(CRBUG_PREFIX) + url = GOOGLE_CODE_URL % bug_number +- response = urllib2.urlopen(url) ++ response = urllib.request.urlopen(url) + parsed = json.loads(response.read()) + parsed_time = datetime.datetime.strptime( + parsed['updated'].split(".")[0] + "UTC", "%Y-%m-%dT%H:%M:%S%Z") + time_delta = datetime.datetime.now() - parsed_time + owner = 'none' +- if 'owner' in parsed.keys(): ++ if 'owner' in list(parsed.keys()): + owner = parsed['owner']['name'] + self.bug_info[bug_link] = BugInfo(bug_link, test_name, time_delta.days, + owner, parsed['state']) +--- a/src/3rdparty/chromium/third_party/blink/tools/print_web_test_json_results.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/print_web_test_json_results.py 2025-01-16 02:26:08.548013613 +0800 +@@ -35,7 +35,7 @@ + with open(args[0], 'r') as fp: + txt = fp.read() + else: +- print >> sys.stderr, "file not found: %s" % args[0] ++ print("file not found: %s" % args[0], file=sys.stderr) + sys.exit(1) + else: + txt = host.filesystem.read_text_file( +@@ -51,12 +51,12 @@ + + tests_to_print = [] + if options.passes: +- tests_to_print += passes.keys() ++ tests_to_print += list(passes.keys()) + if options.failures: +- tests_to_print += failures.keys() ++ tests_to_print += list(failures.keys()) + if options.flakes: +- tests_to_print += flakes.keys() +- print "\n".join(sorted(tests_to_print)) ++ tests_to_print += list(flakes.keys()) ++ print("\n".join(sorted(tests_to_print))) + + if options.ignored_failures_path: + with open(options.ignored_failures_path, 'r') as fp: +@@ -67,12 +67,12 @@ + _, ignored_failures, _ = decode_results(results, options.expected) + new_failures = set(failures.keys()) - set(ignored_failures.keys()) + if new_failures: +- print "New failures:" +- print "\n".join(sorted(new_failures)) +- print ++ print("New failures:") ++ print("\n".join(sorted(new_failures))) ++ print() + if ignored_failures: +- print "Ignored failures:" +- print "\n".join(sorted(ignored_failures.keys())) ++ print("Ignored failures:") ++ print("\n".join(sorted(ignored_failures.keys()))) + if new_failures: + return 1 + return 0 +@@ -83,7 +83,7 @@ + failures = {} + flakes = {} + passes = {} +- for (test, result) in tests.iteritems(): ++ for (test, result) in tests.items(): + if include_expected or result.get('is_unexpected'): + actual_results = result['actual'].split() + expected_results = result['expected'].split() +@@ -106,7 +106,7 @@ + # Cloned from blinkpy.web_tests.layout_package.json_results_generator + # so that this code can stand alone. + result = {} +- for name, data in trie.iteritems(): ++ for name, data in trie.items(): + if prefix: + name = prefix + "/" + name + +--- a/src/3rdparty/chromium/third_party/blink/tools/print_web_test_ordering.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/print_web_test_ordering.py 2025-01-16 02:26:08.548013613 +0800 +@@ -52,7 +52,7 @@ + + stats = convert_trie_to_flat_paths(stats_trie) + stats_by_worker = {} +- for test_name, data in stats.items(): ++ for test_name, data in list(stats.items()): + worker = "worker/" + str(data["results"][0]) + if worker not in stats_by_worker: + stats_by_worker[worker] = [] +@@ -63,18 +63,18 @@ + }) + + for worker in sorted(stats_by_worker.keys()): +- print worker + ':' ++ print(worker + ':') + for test in sorted( + stats_by_worker[worker], key=lambda test: test["number"]): +- print test["name"] +- print ++ print(test["name"]) ++ print() + + + def convert_trie_to_flat_paths(trie, prefix=None): + # Cloned from blinkpy.web_tests.layout_package.json_results_generator + # so that this code can stand alone. + result = {} +- for name, data in trie.iteritems(): ++ for name, data in trie.items(): + if prefix: + name = prefix + "/" + name + if "results" in data: +--- a/src/3rdparty/chromium/third_party/blink/tools/read_checksum_from_png.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/read_checksum_from_png.py 2025-01-16 02:26:08.548013613 +0800 +@@ -27,7 +27,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-from __future__ import with_statement ++ + import sys + + from blinkpy.common import read_checksum_from_png +@@ -35,5 +35,5 @@ + if '__main__' == __name__: + for filename in sys.argv[1:]: + with open(filename, 'r') as filehandle: +- print "%s: %s" % (read_checksum_from_png.read_checksum(filehandle), +- filename) ++ print("%s: %s" % (read_checksum_from_png.read_checksum(filehandle), ++ filename)) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/bindings/bindings_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/bindings/bindings_tests.py 2025-01-16 02:26:08.548013613 +0800 +@@ -136,7 +136,7 @@ + # So the files will be generated under + # output_dir/core/bindings/tests/idls/core. + # To avoid this issue, we need to clear relative_dir here. +- for value in info['interfaces_info'].itervalues(): ++ for value in info['interfaces_info'].values(): + value['relative_dir'] = '' + component_info = info_collector.get_component_info_as_dict( + runtime_enabled_features) +@@ -169,14 +169,14 @@ + non_test_idl_paths) + test_interfaces_info = {} + test_component_info = {} +- for component, paths in test_idl_paths.iteritems(): ++ for component, paths in test_idl_paths.items(): + test_interfaces_info[component], test_component_info[component] = \ + collect_interfaces_info(paths) + # In order to allow test IDL files to override the production IDL files if + # they have the same interface name, process the test IDL files after the + # non-test IDL files. + info_individuals = [non_test_interfaces_info] + \ +- test_interfaces_info.values() ++ list(test_interfaces_info.values()) + compute_interfaces_info_overall(info_individuals) + # Add typedefs which are specified in the actual IDL files to the testing + # component info. +@@ -239,22 +239,22 @@ + reference_basename = os.path.basename(reference_filename) + + if not os.path.isfile(reference_filename): +- print 'Missing reference file!' +- print '(if adding new test, update reference files)' +- print reference_basename +- print ++ print('Missing reference file!') ++ print('(if adding new test, update reference files)') ++ print(reference_basename) ++ print() + return False + + if not filecmp.cmp(reference_filename, output_filename): + # cmp is much faster than diff, and usual case is "no difference", + # so only run diff if cmp detects a difference +- print 'FAIL: %s' % reference_basename ++ print('FAIL: %s' % reference_basename) + if not suppress_diff: +- print diff(reference_filename, output_filename) ++ print(diff(reference_filename, output_filename)) + return False + + if verbose: +- print 'PASS: %s' % reference_basename ++ print('PASS: %s' % reference_basename) + return True + + def identical_output_files(output_files): +@@ -281,9 +281,9 @@ + if relpath not in generated_files: + excess_files.append(relpath) + if excess_files: +- print('Excess reference files! ' ++ print(('Excess reference files! ' + '(probably cruft from renaming or deleting):\n' + +- '\n'.join(excess_files)) ++ '\n'.join(excess_files))) + return False + return True + +@@ -379,11 +379,11 @@ + + if passed: + if verbose: +- print +- print PASS_MESSAGE ++ print() ++ print(PASS_MESSAGE) + return 0 +- print +- print FAIL_MESSAGE ++ print() ++ print(FAIL_MESSAGE) + return 1 + + +@@ -391,7 +391,7 @@ + # Generate output into the reference directory if resetting results, or + # a temp directory if not. + if reset_results: +- print 'Resetting results' ++ print('Resetting results') + return bindings_tests(REFERENCE_DIRECTORY, verbose, suppress_diff) + with TemporaryDirectory() as temp_dir: + # TODO(peria): Remove this hack. +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/bindings/collect_idls_into_json.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/bindings/collect_idls_into_json.py 2025-01-16 02:26:08.548013613 +0800 +@@ -411,7 +411,7 @@ + Returns: + A merged dictionary of |interface_dict| with |partial_dict|. + """ +- for interface_name, partial in partials_dict.iteritems(): ++ for interface_name, partial in partials_dict.items(): + interface = interfaces_dict.get(interface_name) + if not interface: + raise Exception( +@@ -435,8 +435,8 @@ + for implement in implement_node_list: + reference = implement.GetProperty(_PROP_REFERENCE) + implement = implement.GetName() +- if (reference not in interfaces_dict.keys() +- or implement not in interfaces_dict.keys()): ++ if (reference not in list(interfaces_dict.keys()) ++ or implement not in list(interfaces_dict.keys())): + raise Exception( + 'There is not corresponding implement or reference interface.') + for member in _MEMBERS: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/bindings/collect_idls_into_json_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/bindings/collect_idls_into_json_test.py 2025-01-16 02:26:08.548013613 +0800 +@@ -77,8 +77,8 @@ + collect_idls_into_json.get_const_value(const), '1') + self.assertTrue( + const_member.issuperset( +- collect_idls_into_json.const_node_to_dict(const). +- keys())) ++ list(collect_idls_into_json.const_node_to_dict(const). ++ keys()))) + else: + self.assertEqual(const, None) + +@@ -95,8 +95,8 @@ + 'Node') + self.assertTrue( + attribute_member.issuperset( +- collect_idls_into_json.attribute_node_to_dict( +- attribute).keys())) ++ list(collect_idls_into_json.attribute_node_to_dict( ++ attribute).keys()))) + else: + self.assertEqual(attribute, None) + +@@ -114,8 +114,8 @@ + 'Node') + self.assertTrue( + operate_member.issuperset( +- collect_idls_into_json.operation_node_to_dict( +- operation).keys())) ++ list(collect_idls_into_json.operation_node_to_dict( ++ operation).keys()))) + for argument in collect_idls_into_json.get_argument_node_list( + operation): + if argument: +@@ -126,8 +126,8 @@ + 'Node') + self.assertTrue( + argument_member.issuperset( +- collect_idls_into_json.argument_node_to_dict( +- argument).keys())) ++ list(collect_idls_into_json.argument_node_to_dict( ++ argument).keys()))) + else: + self.assertEqual(argument, None) + else: +@@ -140,19 +140,19 @@ + self.assertEqual(extattr.GetClass(), 'ExtAttribute') + self.assertEqual(extattr.GetName(), 'CustomToV8') + self.assertEqual( +- collect_idls_into_json.extattr_node_to_dict(extattr). +- keys(), ['Name']) ++ list(collect_idls_into_json.extattr_node_to_dict(extattr). ++ keys()), ['Name']) + self.assertEqual( +- collect_idls_into_json.extattr_node_to_dict(extattr). +- values(), ['CustomToV8']) ++ list(collect_idls_into_json.extattr_node_to_dict(extattr). ++ values()), ['CustomToV8']) + else: + self.assertEqual(extattr, None) + + def test_inherit_node_to_dict(self): + inherit = collect_idls_into_json.inherit_node_to_dict(self.definition) + if inherit: +- self.assertEqual(inherit.keys(), ['Parent']) +- self.assertEqual(inherit.values(), ['EventTarget']) ++ self.assertEqual(list(inherit.keys()), ['Parent']) ++ self.assertEqual(list(inherit.values()), ['EventTarget']) + else: + self.assertEqual(inherit, []) + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/bindings/generate_idl_diff.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/bindings/generate_idl_diff.py 2025-01-16 02:26:08.548013613 +0800 +@@ -137,7 +137,7 @@ + |new_interfaces| + """ + annotated = {} +- for interface_name, interface in new_interfaces.items(): ++ for interface_name, interface in list(new_interfaces.items()): + if interface_name in old_interfaces: + annotated_interface, is_changed = members_diff( + old_interfaces[interface_name], interface) +@@ -148,7 +148,7 @@ + interface = annotate_all_members(interface, DIFF_TAG_ADDED) + interface[DIFF_TAG] = DIFF_TAG_ADDED + annotated[interface_name] = interface +- for interface_name, interface in old_interfaces.items(): ++ for interface_name, interface in list(old_interfaces.items()): + interface = annotate_all_members(interface, DIFF_TAG_DELETED) + interface[DIFF_TAG] = DIFF_TAG_DELETED + annotated.update(old_interfaces) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/bindings/print_idl_diff.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/bindings/print_idl_diff.py 2025-01-16 02:26:08.548013613 +0800 +@@ -183,14 +183,14 @@ + Returns: + A list of sorted interface names + """ +- interface_list = interfaces.values() ++ interface_list = list(interfaces.values()) + removed, added, unspecified = group_by_tag(interface_list) + # pylint: disable=W0110 +- removed = map(lambda interface: interface['Name'], removed) ++ removed = [interface['Name'] for interface in removed] + # pylint: disable=W0110 +- added = map(lambda interface: interface['Name'], added) ++ added = [interface['Name'] for interface in added] + # pylint: disable=W0110 +- unspecified = map(lambda interface: interface['Name'], unspecified) ++ unspecified = [interface['Name'] for interface in unspecified] + sorted_interface_names = removed + added + unspecified + return sorted_interface_names + +@@ -382,14 +382,14 @@ + Args: + A sorted diff + """ +- for interface_name, interface in diff.iteritems(): ++ for interface_name, interface in diff.items(): + print_member_with_color(interface, out) + out.change_color(Colorize.YELLOW) + out.write('[[') + out.write(interface_name) + out.writeln(']]') + out.reset_color() +- for member_name, member in interface.iteritems(): ++ for member_name, member in interface.items(): + if member_name == 'ExtAttributes': + out.writeln('ExtAttributes') + print_extattributes(member, out) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/message_pool.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/message_pool.py 2025-01-16 02:26:08.549096928 +0800 +@@ -39,10 +39,10 @@ + instead. + """ + +-import cPickle ++import pickle + import logging + import multiprocessing +-import Queue ++import queue + import sys + import traceback + +@@ -68,8 +68,8 @@ + self._name = 'manager' + self._running_inline = (self._num_workers == 1) + if self._running_inline: +- self._messages_to_worker = Queue.Queue() +- self._messages_to_manager = Queue.Queue() ++ self._messages_to_worker = queue.Queue() ++ self._messages_to_manager = queue.Queue() + else: + self._messages_to_worker = multiprocessing.Queue() + self._messages_to_manager = multiprocessing.Queue() +@@ -92,7 +92,7 @@ + from_user=True, + logs=())) + +- for _ in xrange(self._num_workers): ++ for _ in range(self._num_workers): + self._messages_to_worker.put( + _Message( + self._name, +@@ -110,7 +110,7 @@ + if self._running_inline or self._can_pickle(self._host): + host = self._host + +- for worker_number in xrange(self._num_workers): ++ for worker_number in range(self._num_workers): + worker = _Worker(host, self._messages_to_manager, + self._messages_to_worker, self._worker_factory, + worker_number, self._running_inline, +@@ -171,7 +171,7 @@ + + def _can_pickle(self, host): + try: +- cPickle.dumps(host) ++ pickle.dumps(host) + return True + except TypeError: + return False +@@ -190,7 +190,7 @@ + method = getattr(self, '_handle_' + message.name) + assert method, 'bad message %s' % repr(message) + method(message.src, *message.args) +- except Queue.Empty: ++ except queue.Empty: + pass + + +@@ -273,7 +273,7 @@ + break + + _log.debug('%s exiting', self.name) +- except Queue.Empty: ++ except queue.Empty: + assert False, '%s: ran out of messages in worker queue.' % self.name + except KeyboardInterrupt: + self._raise(sys.exc_info()) +@@ -307,7 +307,7 @@ + def _raise(self, exc_info): + exception_type, exception_value, exception_traceback = exc_info + if self._running_inline: +- raise exception_type, exception_value, exception_traceback ++ raise exception_type(exception_value).with_traceback(exception_traceback) + + if exception_type == KeyboardInterrupt: + _log.debug('%s: interrupted, exiting', self.name) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/pretty_diff_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/pretty_diff_unittest.py 2025-01-16 02:26:08.549096928 +0800 +@@ -21,7 +21,7 @@ + lines = [] + diff, remaining_lines = DiffFile.parse(lines) + self.assertIsNone(diff) +- self.assertEquals(remaining_lines, []) ++ self.assertEqual(remaining_lines, []) + + def test_100percent_similarity(self): + # crrev.com/c576df77d72abe47154ff2489bb035aa20892f7f +@@ -35,7 +35,7 @@ + ] + diff, remaining_lines = DiffFile.parse(lines) + self.assertIsNotNone(diff) +- self.assertEquals(remaining_lines[0], lines[4]) ++ self.assertEqual(remaining_lines[0], lines[4]) + + def test_emptify_text(self): + lines = [ +@@ -47,7 +47,7 @@ + ] + diff, remaining_lines = DiffFile.parse(lines) + self.assertIsNotNone(diff) +- self.assertEquals(remaining_lines, []) ++ self.assertEqual(remaining_lines, []) + self._assert_file_status(diff, 'M') + + def test_remove_text(self): +@@ -59,7 +59,7 @@ + ] + diff, remaining_lines = DiffFile.parse(lines) + self.assertIsNotNone(diff) +- self.assertEquals(remaining_lines, []) ++ self.assertEqual(remaining_lines, []) + self._assert_file_status(diff, 'D') + + def test_remove_zero_byte_text(self): +@@ -69,7 +69,7 @@ + ] + diff, remaining_lines = DiffFile.parse(lines) + self.assertIsNotNone(diff) +- self.assertEquals(remaining_lines, []) ++ self.assertEqual(remaining_lines, []) + self._assert_file_status(diff, 'D') + + def test_add_empty_text(self): +@@ -79,7 +79,7 @@ + ] + diff, remaining_lines = DiffFile.parse(lines) + self.assertIsNotNone(diff) +- self.assertEquals(remaining_lines, []) ++ self.assertEqual(remaining_lines, []) + self._assert_file_status(diff, 'A') + + def test_emptify_binary(self): +@@ -91,7 +91,7 @@ + ] + diff, remaining_lines = DiffFile.parse(lines) + self.assertIsNotNone(diff) +- self.assertEquals(remaining_lines, []) ++ self.assertEqual(remaining_lines, []) + self._assert_file_status(diff, 'M') + + def test_remove_binary(self): +@@ -104,7 +104,7 @@ + ] + diff, remaining_lines = DiffFile.parse(lines) + self.assertIsNotNone(diff) +- self.assertEquals(remaining_lines, []) ++ self.assertEqual(remaining_lines, []) + self._assert_file_status(diff, 'D') + + def test_add_binary(self): +@@ -117,44 +117,44 @@ + ] + diff, remaining_lines = DiffFile.parse(lines) + self.assertIsNotNone(diff) +- self.assertEquals(remaining_lines, []) ++ self.assertEqual(remaining_lines, []) + self._assert_file_status(diff, 'A') + + + class TestDiffHunk(unittest.TestCase): + def test_find_operations(self): +- self.assertEquals(DiffHunk._find_operations([]), []) +- self.assertEquals(DiffHunk._find_operations([' ']), []) ++ self.assertEqual(DiffHunk._find_operations([]), []) ++ self.assertEqual(DiffHunk._find_operations([' ']), []) + +- self.assertEquals(DiffHunk._find_operations(['-']), [([0], [])]) +- self.assertEquals( ++ self.assertEqual(DiffHunk._find_operations(['-']), [([0], [])]) ++ self.assertEqual( + DiffHunk._find_operations(['-', '-']), [([0, 1], [])]) +- self.assertEquals( ++ self.assertEqual( + DiffHunk._find_operations([' ', '-', '-']), [([1, 2], [])]) +- self.assertEquals( ++ self.assertEqual( + DiffHunk._find_operations(['-', '-', ' ']), [([0, 1], [])]) + +- self.assertEquals(DiffHunk._find_operations(['+']), [([], [0])]) +- self.assertEquals( ++ self.assertEqual(DiffHunk._find_operations(['+']), [([], [0])]) ++ self.assertEqual( + DiffHunk._find_operations(['+', '+']), [([], [0, 1])]) +- self.assertEquals( ++ self.assertEqual( + DiffHunk._find_operations([' ', '+', '+']), [([], [1, 2])]) +- self.assertEquals( ++ self.assertEqual( + DiffHunk._find_operations(['+', '+', ' ']), [([], [0, 1])]) + +- self.assertEquals(DiffHunk._find_operations(['-', '+']), [([0], [1])]) +- self.assertEquals( ++ self.assertEqual(DiffHunk._find_operations(['-', '+']), [([0], [1])]) ++ self.assertEqual( + DiffHunk._find_operations(['-', '-', '+', '+']), + [([0, 1], [2, 3])]) +- self.assertEquals( ++ self.assertEqual( + DiffHunk._find_operations([' ', '-', '-', '+']), [([1, 2], [3])]) +- self.assertEquals( ++ self.assertEqual( + DiffHunk._find_operations(['-', '-', '+', '+', ' ']), + [([0, 1], [2, 3])]) +- self.assertEquals( ++ self.assertEqual( + DiffHunk._find_operations(['-', '-', '+', '+', '-']), + [([0, 1], [2, 3]), ([4], [])]) +- self.assertEquals( ++ self.assertEqual( + DiffHunk._find_operations(['-', '+', '-', '+']), [([0], [1]), + ([2], [3])]) + +@@ -164,15 +164,15 @@ + return annotations + + def test_annotate(self): +- self.assertEquals(self._annotate(['-abcdef'], 0, 2, 4), [[(2, 4)]]) +- self.assertEquals( ++ self.assertEqual(self._annotate(['-abcdef'], 0, 2, 4), [[(2, 4)]]) ++ self.assertEqual( + self._annotate(['-abcdef', '-ghi'], 0, 2, 6), [[(2, 6)], None]) +- self.assertEquals( ++ self.assertEqual( + self._annotate(['-abcdef', '-ghi'], 0, 2, 7), [[(2, 6)], [(0, 1)]]) +- self.assertEquals( ++ self.assertEqual( + self._annotate(['-abcdef', '-ghi', '-jkl'], 0, 2, 11), + [[(2, 6)], [(0, 3)], [(0, 2)]]) +- self.assertEquals( ++ self.assertEqual( + self._annotate(['+', '+abc', ' de'], 0, 0, 2), + [[(0, 0)], [(0, 2)], None]) + +@@ -187,7 +187,7 @@ + lines = ['literal 6', 'NcmZSh&&2%iKL7{~0|Ed5', '', 'literal 0...'] + binary, remaining_lines = BinaryHunk.parse(lines) + self.assertIsNotNone(binary) +- self.assertEquals(remaining_lines[0], lines[3]) ++ self.assertEqual(remaining_lines[0], lines[3]) + self.assertTrue( + 'data:image/png;base64,' in binary.prettify('image/png', 'add')) + +@@ -195,6 +195,6 @@ + lines = ['literal 6', 'NcmZSh&&2%iKL7{~0|Ed5', ''] + binary, remaining_lines = BinaryHunk.parse(lines) + self.assertIsNotNone(binary) +- self.assertEquals(remaining_lines, []) ++ self.assertEqual(remaining_lines, []) + self.assertTrue( + '\xe6\xf9\xd9\xcf\x00\x17\x93''' + ) + checksum = read_checksum_from_png.read_checksum(filehandle) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/unified_diff.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/unified_diff.py 2025-01-16 02:26:08.549096928 +0800 +@@ -25,7 +25,7 @@ + + + def _to_raw_bytes(string_value): +- if isinstance(string_value, unicode): ++ if isinstance(string_value, str): + return string_value.encode('utf-8') + return string_value + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/unified_diff_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/unified_diff_unittest.py 2025-01-16 02:26:08.550180243 +0800 +@@ -25,17 +25,17 @@ + # filenames are unicode, with regular or malformed input (expected or + # actual input is always raw bytes, not unicode). + unified_diff('exp', 'act', 'exp.txt', 'act.txt') +- unified_diff('exp', 'act', u'exp.txt', 'act.txt') +- unified_diff('exp', 'act', u'a\xac\u1234\u20ac\U00008000', 'act.txt') ++ unified_diff('exp', 'act', 'exp.txt', 'act.txt') ++ unified_diff('exp', 'act', 'a\xac\u1234\u20ac\U00008000', 'act.txt') + + def test_unified_diff_handles_non_ascii_chars(self): + unified_diff('exp' + chr(255), 'act', 'exp.txt', 'act.txt') +- unified_diff('exp' + chr(255), 'act', u'exp.txt', 'act.txt') ++ unified_diff('exp' + chr(255), 'act', 'exp.txt', 'act.txt') + + def test_unified_diff_handles_unicode_inputs(self): + # Though expected and actual files should always be read in with no + # encoding (and be stored as str objects), test unicode inputs just to + # be safe. +- unified_diff(u'exp', 'act', 'exp.txt', 'act.txt') +- unified_diff(u'a\xac\u1234\u20ac\U00008000', 'act', 'exp.txt', ++ unified_diff('exp', 'act', 'exp.txt', 'act.txt') ++ unified_diff('a\xac\u1234\u20ac\U00008000', 'act', 'exp.txt', + 'act.txt') +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/checkout/baseline_optimizer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/checkout/baseline_optimizer.py 2025-01-16 02:26:08.550180243 +0800 +@@ -106,7 +106,7 @@ + """ + results_by_directory = {} + directories = set() +- for port in self._ports.values(): ++ for port in list(self._ports.values()): + directories.update(set(self._relative_baseline_search_path(port))) + + for directory in directories: +@@ -160,14 +160,14 @@ + def _move_baselines(self, baseline_name, results_by_directory, + new_results_by_directory): + data_for_result = {} +- for directory, result in results_by_directory.items(): ++ for directory, result in list(results_by_directory.items()): + if result not in data_for_result: + source = self._join_directory(directory, baseline_name) + data_for_result[result] = self._filesystem.read_binary_file( + source) + + fs_files = [] +- for directory, result in results_by_directory.items(): ++ for directory, result in list(results_by_directory.items()): + if new_results_by_directory.get(directory) != result: + file_name = self._join_directory(directory, baseline_name) + if self._filesystem.exists(file_name): +@@ -184,7 +184,7 @@ + _log.debug(' (Nothing to delete)') + + file_names = [] +- for directory, result in new_results_by_directory.items(): ++ for directory, result in list(new_results_by_directory.items()): + if results_by_directory.get(directory) != result: + destination = self._join_directory(directory, baseline_name) + self._filesystem.maybe_make_directory( +@@ -221,7 +221,7 @@ + def _port_from_baseline_dir(self, baseline_dir): + """Returns a Port object from the given baseline directory.""" + baseline_dir = self._filesystem.basename(baseline_dir) +- for port in self._ports.values(): ++ for port in list(self._ports.values()): + if self._filesystem.basename( + port.baseline_version_dir()) == baseline_dir: + return port +@@ -305,7 +305,7 @@ + test_name, self._virtual_base(baseline_name)) + results_by_port_name = self._results_by_port_name(results_by_directory) + +- for port_name in self._ports.keys(): ++ for port_name in list(self._ports.keys()): + assert port_name in results_by_port_name + if results_by_port_name[port_name] != virtual_root_digest: + return +@@ -383,7 +383,7 @@ + A dictionary mapping port names to their baselines. + """ + results_by_port_name = {} +- for port_name, port in self._ports.items(): ++ for port_name, port in list(self._ports.items()): + for directory in self._relative_baseline_search_path(port): + if directory in results_by_directory: + results_by_port_name[port_name] = results_by_directory[ +@@ -399,7 +399,7 @@ + """Returns a list of directories immediately preceding the root on + search paths.""" + directories = set() +- for port in self._ports.values(): ++ for port in list(self._ports.values()): + directory = self._filesystem.relpath( + self._baseline_search_path(port)[-1], self._parent_of_tests) + directories.add(directory) +@@ -462,7 +462,7 @@ + # baseline is found (or the root is reached), i.e., keep the most + # generic one among duplicate baselines. + new_results_by_directory = copy.copy(results_by_directory) +- for port_name, port in self._ports.items(): ++ for port_name, port in list(self._ports.items()): + current_result = results_by_port_name.get(port_name) + + # This happens if we're missing baselines for a port. +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/checkout/baseline_optimizer_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/checkout/baseline_optimizer_unittest.py 2025-01-16 02:26:08.550180243 +0800 +@@ -109,7 +109,7 @@ + self.fs.join(web_tests_dir, 'VirtualTestSuites'), + '[{"prefix": "gpu", "bases": ["fast/canvas"], "args": ["--foo"]}]') + +- for dirname, contents in results_by_directory.items(): ++ for dirname, contents in list(results_by_directory.items()): + self.fs.write_binary_file( + self.fs.join(web_tests_dir, dirname, baseline_name), contents) + +@@ -120,7 +120,7 @@ + baseline_optimizer.optimize( + self.fs.join(baseline_dirname, test_name), suffix)) + +- for dirname, contents in directory_to_new_results.items(): ++ for dirname, contents in list(directory_to_new_results.items()): + path = self.fs.join(web_tests_dir, dirname, baseline_name) + if contents is None: + # Check files that are explicitly marked as absent. +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/checkout/git_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/checkout/git_unittest.py 2025-01-16 02:26:08.550180243 +0800 +@@ -168,7 +168,7 @@ + # Even if diff.noprefix is enabled, create_patch() produces diffs with prefixes. + self._run(['git', 'config', 'diff.noprefix', 'true']) + patch = git.create_patch() +- self.assertRegexpMatches( ++ self.assertRegex( + patch, r'^diff --git a/test_file_commit1 b/test_file_commit1') + + def test_rename_files(self): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/file_uploader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/file_uploader.py 2025-01-16 02:26:08.550180243 +0800 +@@ -27,7 +27,7 @@ + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + import mimetypes +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + from blinkpy.common.net.network_transaction import NetworkTransaction + +@@ -58,7 +58,7 @@ + lines.append('--' + BOUNDARY) + lines.append('Content-Disposition: form-data; name="%s"' % key) + lines.append('') +- if isinstance(value, unicode): ++ if isinstance(value, str): + value = value.encode('utf-8') + lines.append(value) + +@@ -68,7 +68,7 @@ + % (key, filename)) + lines.append('Content-Type: %s' % get_mime_type(filename)) + lines.append('') +- if isinstance(value, unicode): ++ if isinstance(value, str): + value = value.encode('utf-8') + lines.append(value) + +@@ -103,9 +103,9 @@ + # FIXME: Setting a timeout, either globally using socket.setdefaulttimeout() + # or in urlopen(), doesn't appear to work on Mac 10.5 with Python 2.7. + # For now we will ignore the timeout value and hope for the best. +- request = urllib2.Request(self._url, data, ++ request = urllib.request.Request(self._url, data, + {'Content-Type': content_type}) +- return urllib2.urlopen(request) ++ return urllib.request.urlopen(request) + + return NetworkTransaction( + timeout_seconds=self._timeout_seconds).run(callback) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/git_cl.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/git_cl.py 2025-01-16 02:26:08.550180243 +0800 +@@ -258,8 +258,8 @@ + """Returns the latest entries from from a Build to TryJobStatus dict.""" + if try_results is None: + return None +- latest_builds = filter_latest_builds(try_results.keys()) +- return {b: s for b, s in try_results.items() if b in latest_builds} ++ latest_builds = filter_latest_builds(list(try_results.keys())) ++ return {b: s for b, s in list(try_results.items()) if b in latest_builds} + + def try_job_results(self, + issue_number=None, +@@ -399,13 +399,13 @@ + + @staticmethod + def all_finished(try_results): +- return all(s.status == 'COMPLETED' for s in try_results.values()) ++ return all(s.status == 'COMPLETED' for s in list(try_results.values())) + + @staticmethod + def all_success(try_results): + return all(s.status == 'COMPLETED' and s.result == 'SUCCESS' +- for s in try_results.values()) ++ for s in list(try_results.values())) + + @staticmethod + def some_failed(try_results): +- return any(s.result == 'FAILURE' for s in try_results.values()) ++ return any(s.result == 'FAILURE' for s in list(try_results.values())) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/git_cl_mock.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/git_cl_mock.py 2025-01-16 02:26:08.550180243 +0800 +@@ -37,7 +37,7 @@ + def run(self, args): + self.calls.append(['git', 'cl'] + args) + arg_key = "".join(args) +- if self._git_error_output and arg_key in self._git_error_output.keys(): ++ if self._git_error_output and arg_key in list(self._git_error_output.keys()): + raise ScriptError(output=self._git_error_output[arg_key]) + return 'mock output' + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/network_transaction.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/network_transaction.py 2025-01-16 02:26:08.550180243 +0800 +@@ -28,7 +28,7 @@ + + import logging + import time +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + _log = logging.getLogger(__name__) + +@@ -57,7 +57,7 @@ + while True: + try: + return request() +- except urllib2.HTTPError as error: ++ except urllib.error.HTTPError as error: + if self._return_none_on_404 and error.code == 404: + return None + self._check_for_timeout() +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/network_transaction_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/network_transaction_unittest.py 2025-01-16 02:26:08.550180243 +0800 +@@ -26,7 +26,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-from urllib2 import HTTPError ++from urllib.error import HTTPError + from blinkpy.common.net.network_transaction import NetworkTransaction, NetworkTimeout + from blinkpy.common.system.log_testing import LoggingTestCase + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/results_fetcher.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/results_fetcher.py 2025-01-16 02:26:08.550180243 +0800 +@@ -30,7 +30,7 @@ + import logging + import json + import re +-import urllib ++import urllib.request, urllib.parse, urllib.error + + from blinkpy.common.memoized import memoized + from blinkpy.common.net.web import Web +@@ -81,7 +81,7 @@ + Build(builder_name, build_number)) + if step_name: + return '%s/%s/%s/layout-test-results' % ( +- url_base, build_number, urllib.quote(step_name)) ++ url_base, build_number, urllib.parse.quote(step_name)) + return '%s/%s/layout-test-results' % (url_base, build_number) + return self.accumulated_results_url_base(builder_name) + +@@ -140,7 +140,7 @@ + + url = '%s/testfile?%s' % ( + TEST_RESULTS_SERVER, +- urllib.urlencode({ ++ urllib.parse.urlencode({ + 'builder': build.builder_name, + 'buildnumber': build.build_number, + 'name': 'full_results.json', +@@ -195,7 +195,7 @@ + + url = '%s/testfile?%s' % ( + TEST_RESULTS_SERVER, +- urllib.urlencode({ ++ urllib.parse.urlencode({ + 'builder': build.builder_name, + 'buildnumber': build.build_number, + 'name': 'full_results.json', +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/results_fetcher_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/results_fetcher_test.py 2025-01-16 02:26:08.550180243 +0800 +@@ -59,7 +59,7 @@ + '/10/blink_web_tests%20%28with%20patch%29/layout-test-results') + + def test_results_url_with_non_numeric_build_number(self): +- with self.assertRaisesRegexp(AssertionError, ++ with self.assertRaisesRegex(AssertionError, + 'expected numeric build number'): + TestResultsFetcher().results_url('Test Builder', 'ba5eba11') + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/web.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/web.py 2025-01-16 02:26:08.550180243 +0800 +@@ -26,13 +26,13 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + from blinkpy.common.net.network_transaction import NetworkTransaction + + + class Web(object): +- class _HTTPRedirectHandler2(urllib2.HTTPRedirectHandler): # pylint:disable=no-init ++ class _HTTPRedirectHandler2(urllib.request.HTTPRedirectHandler): # pylint:disable=no-init + """A subclass of HTTPRedirectHandler to support 308 Permanent Redirect.""" + + def http_error_308(self, req, fp, code, msg, headers): # pylint:disable=unused-argument +@@ -45,13 +45,13 @@ + lambda: self.request('GET', url).read()) + + def request(self, method, url, data=None, headers=None): +- opener = urllib2.build_opener(Web._HTTPRedirectHandler2) +- request = urllib2.Request(url=url, data=data) ++ opener = urllib.request.build_opener(Web._HTTPRedirectHandler2) ++ request = urllib.request.Request(url=url, data=data) + + request.get_method = lambda: method + + if headers: +- for key, value in headers.items(): ++ for key, value in list(headers.items()): + request.add_header(key, value) + + return opener.open(request) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/web_mock.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/web_mock.py 2025-01-16 02:26:08.550180243 +0800 +@@ -26,7 +26,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + + class MockWeb(object): +@@ -55,7 +55,7 @@ + self._info = MockInfo(values.get('headers', {})) + + if int(self.status_code) >= 400: +- raise urllib2.HTTPError( ++ raise urllib.error.HTTPError( + url=self.url, + code=self.status_code, + msg='Received error status code: {}'.format(self.status_code), +@@ -77,7 +77,7 @@ + # The name of the headers (keys) are case-insensitive, and values are stripped. + self._headers = { + key.lower(): value.strip() +- for key, value in headers.iteritems() ++ for key, value in headers.items() + } + + def getheader(self, header): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/web_test_results.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/net/web_test_results.py 2025-01-16 02:26:08.550180243 +0800 +@@ -39,7 +39,7 @@ + + def suffixes_for_test_result(self): + suffixes = set() +- artifact_names = self._result_dict.get('artifacts', {}).keys() ++ artifact_names = list(self._result_dict.get('artifacts', {}).keys()) + # Add extensions for mismatches. + if 'actual_text' in artifact_names: + suffixes.add('txt') +@@ -96,7 +96,7 @@ + baseline, including an implicit all-PASS testharness baseline (i.e. a + previously all-PASS testharness test starts to fail).""" + actual_results = self.actual_results().split(' ') +- artifact_names = self._result_dict.get('artifacts', {}).keys() ++ artifact_names = list(self._result_dict.get('artifacts', {}).keys()) + return ('FAIL' in actual_results and any( + artifact_name.startswith('actual') + for artifact_name in artifact_names) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/executive.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/executive.py 2025-01-16 02:26:08.550180243 +0800 +@@ -72,7 +72,7 @@ + self.cwd = cwd + + def message_with_output(self): +- return unicode(self) ++ return str(self) + + def command_name(self): + command_path = self.script_args +@@ -286,7 +286,7 @@ + # See https://bugs.webkit.org/show_bug.cgi?id=37528 + # for an example of a regression caused by passing a unicode string directly. + # FIXME: We may need to encode differently on different platforms. +- if isinstance(user_input, unicode): ++ if isinstance(user_input, str): + user_input = user_input.encode(self._child_process_encoding()) + return (self.PIPE, user_input) + +@@ -297,7 +297,7 @@ + args = self._stringify_args(args) + escaped_args = [] + for arg in args: +- if isinstance(arg, unicode): ++ if isinstance(arg, str): + # Escape any non-ascii characters for easy copy/paste + arg = arg.encode('unicode_escape') + # FIXME: Do we need to fix quotes here? +@@ -403,9 +403,9 @@ + + def _stringify_args(self, args): + # Popen will throw an exception if args are non-strings (like int()) +- string_args = map(unicode, args) ++ string_args = list(map(str, args)) + # The Windows implementation of Popen cannot handle unicode strings. :( +- return map(self._encode_argument_if_needed, string_args) ++ return list(map(self._encode_argument_if_needed, string_args)) + + def popen(self, args, **kwargs): + assert not kwargs.get('shell') +@@ -427,7 +427,7 @@ + + def map(self, thunk, arglist, processes=None): + if sys.platform == 'win32' or len(arglist) == 1: +- return map(thunk, arglist) ++ return list(map(thunk, arglist)) + pool = multiprocessing.Pool( + processes=(processes or multiprocessing.cpu_count())) + try: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/executive_mock.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/executive_mock.py 2025-01-16 02:26:08.550180243 +0800 +@@ -29,7 +29,7 @@ + import collections + import logging + import os +-import StringIO ++import io + + from blinkpy.common.system.executive import ScriptError + +@@ -39,9 +39,9 @@ + class MockProcess(object): + def __init__(self, stdout='MOCK STDOUT\n', stderr='', returncode=0): + self.pid = 42 +- self.stdout = StringIO.StringIO(stdout) +- self.stderr = StringIO.StringIO(stderr) +- self.stdin = StringIO.StringIO() ++ self.stdout = io.StringIO(stdout) ++ self.stderr = io.StringIO(stderr) ++ self.stdin = io.StringIO() + self.returncode = returncode + + def wait(self): +@@ -101,11 +101,11 @@ + self.full_calls.append(MockCall(args=args, kwargs=kwargs)) + + def check_running_pid(self, pid): +- return pid in self._running_pids.values() ++ return pid in list(self._running_pids.values()) + + def running_pids(self, process_name_filter): + running_pids = [] +- for process_name, process_pid in self._running_pids.iteritems(): ++ for process_name, process_pid in self._running_pids.items(): + if process_name_filter(process_name): + running_pids.append(process_pid) + +@@ -113,7 +113,7 @@ + return running_pids + + def command_for_printing(self, args): +- string_args = map(unicode, args) ++ string_args = list(map(str, args)) + return ' '.join(string_args) + + # The argument list should match Executive.run_command, even if +@@ -166,7 +166,7 @@ + output = self._output + if return_stderr: + output += self._stderr +- if decode_output and not isinstance(output, unicode): ++ if decode_output and not isinstance(output, str): + output = output.decode('utf-8') + + return output +@@ -181,7 +181,7 @@ + pass + + def popen(self, args, cwd=None, env=None, **_): +- assert all(isinstance(arg, basestring) for arg in args) ++ assert all(isinstance(arg, str) for arg in args) + self._append_call(args, cwd=cwd, env=env) + if self._should_log: + cwd_string = '' +@@ -199,7 +199,7 @@ + return self._proc + + def call(self, args, **_): +- assert all(isinstance(arg, basestring) for arg in args) ++ assert all(isinstance(arg, str) for arg in args) + self._append_call(args) + _log.info('Mock call: %s', args) + +@@ -209,7 +209,7 @@ + num_previous_calls = len(self.full_calls) + command_outputs = [] + for cmd_line, cwd in commands: +- assert all(isinstance(arg, basestring) for arg in cmd_line) ++ assert all(isinstance(arg, str) for arg in cmd_line) + command_outputs.append( + [0, self.run_command(cmd_line, cwd=cwd), '']) + +@@ -219,7 +219,7 @@ + return command_outputs + + def map(self, thunk, arglist, processes=None): +- return map(thunk, arglist) ++ return list(map(thunk, arglist)) + + @property + def calls(self): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/executive_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/executive_unittest.py 2025-01-16 02:26:08.550180243 +0800 +@@ -100,7 +100,7 @@ + with self.assertRaises(AssertionError): + executive.run_command('echo') + with self.assertRaises(AssertionError): +- executive.run_command(u'echo') ++ executive.run_command('echo') + executive.run_command(command_line('echo', 'foo')) + executive.run_command(tuple(command_line('echo', 'foo'))) + +@@ -121,7 +121,7 @@ + to Executive.run* methods, and they will return unicode() + objects by default unless decode_output=False + """ +- unicode_tor_input = u"WebKit \u2661 Tor Arne Vestb\u00F8!" ++ unicode_tor_input = "WebKit \u2661 Tor Arne Vestb\u00F8!" + if sys.platform == 'win32': + encoding = 'mbcs' + else: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/filesystem.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/filesystem.py 2025-01-16 02:26:08.550180243 +0800 +@@ -75,7 +75,7 @@ + """ + if sys.platform == 'win32' and len(path) >= self.WINDOWS_MAX_PATH: + assert not path.startswith(r'\\'), "must not already be UNC" +- return ur'\\?\%s' % (self.abspath(path), ) ++ return r'\\?\%s' % (self.abspath(path), ) + return path + + def abspath(self, path): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/filesystem_mock.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/filesystem_mock.py 2025-01-16 02:26:08.550180243 +0800 +@@ -30,7 +30,7 @@ + import hashlib + import os + import re +-import StringIO ++import io + import unittest + + from blinkpy.common.system.filesystem import _remove_contents, _sanitize_filename +@@ -178,11 +178,11 @@ + + # We could use fnmatch.fnmatch, but that might not do the right thing on Windows. + existing_files = [ +- path for path, contents in self.files.items() ++ path for path, contents in list(self.files.items()) + if contents is not None + ] +- return filter(path_filter, existing_files) + filter( +- path_filter, self.dirs) ++ return list(filter(path_filter, existing_files)) + list(filter( ++ path_filter, self.dirs)) + + def isabs(self, path): + return path.startswith(self.sep) +@@ -524,7 +524,7 @@ + class ReadableTextFileObject(ReadableBinaryFileObject): + def __init__(self, fs, path, data): + super(ReadableTextFileObject, self).__init__( +- fs, path, StringIO.StringIO(data.decode('utf-8'))) ++ fs, path, io.StringIO(data.decode('utf-8'))) + + def close(self): + self.data.close() +@@ -542,8 +542,8 @@ + def __iter__(self): + return self.data.__iter__() + +- def next(self): +- return self.data.next() ++ def __next__(self): ++ return next(self.data) + + def seek(self, offset, whence=os.SEEK_SET): + self.data.seek(offset, whence) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/filesystem_mock_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/filesystem_mock_unittest.py 2025-01-16 02:26:08.551263558 +0800 +@@ -111,7 +111,7 @@ + mock_files = {'foo/bar/baz': '', 'foo/a': '', 'foo/b': '', 'foo/c': ''} + host = MockHost() + host.filesystem = MockFileSystem(files=mock_files) +- self.assertEquals( ++ self.assertEqual( + host.filesystem.walk(mock_dir), [('foo', ['bar'], ['a', 'b', 'c']), + ('foo/bar', [], ['baz'])]) + +@@ -128,7 +128,7 @@ + } + host = MockHost() + host.filesystem = MockFileSystem(files=mock_files) +- self.assertEquals( ++ self.assertEqual( + host.filesystem.walk(mock_dir), [('foo', ['a', 'bar'], ['c', 'b']), + ('foo/a', ['z'], ['x', 'y']), + ('foo/a/z', [], ['lyrics']), +@@ -145,4 +145,4 @@ + mock_files = {'foo': '', 'bar': '', 'a': ''} + filesystem = MockFileSystem(files=mock_files) + filesystem.make_executable('foo') +- self.assertEquals(filesystem.executable_files, set(['foo'])) ++ self.assertEqual(filesystem.executable_files, set(['foo'])) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/filesystem_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/filesystem_unittest.py 2025-01-16 02:26:08.551263558 +0800 +@@ -223,7 +223,7 @@ + with fs.mkdtemp(prefix='filesystem_unittest_') as d: + self.assertEqual(fs.listdir(d), []) + new_file = os.path.join(d, 'foo') +- fs.write_text_file(new_file, u'foo') ++ fs.write_text_file(new_file, 'foo') + self.assertEqual(fs.listdir(d), ['foo']) + os.remove(new_file) + +@@ -232,7 +232,7 @@ + with fs.mkdtemp(prefix='filesystem_unittest_') as d: + self.assertEqual(list(fs.walk(d)), [(d, [], [])]) + new_file = os.path.join(d, 'foo') +- fs.write_text_file(new_file, u'foo') ++ fs.write_text_file(new_file, 'foo') + self.assertEqual(list(fs.walk(d)), [(d, [], ['foo'])]) + os.remove(new_file) + +@@ -284,7 +284,7 @@ + fs = FileSystem() + text_path = None + +- unicode_text_string = u'\u016An\u012Dc\u014Dde\u033D' ++ unicode_text_string = '\u016An\u012Dc\u014Dde\u033D' + try: + text_path = tempfile.mktemp(prefix='tree_unittest_') + file = fs.open_text_file_for_writing(text_path) +@@ -305,7 +305,7 @@ + text_path = None + binary_path = None + +- unicode_text_string = u'\u016An\u012Dc\u014Dde\u033D' ++ unicode_text_string = '\u016An\u012Dc\u014Dde\u033D' + hex_equivalent = '\xC5\xAA\x6E\xC4\xAD\x63\xC5\x8D\x64\x65\xCC\xBD' + try: + text_path = tempfile.mktemp(prefix='tree_unittest_') +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/output_capture.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/output_capture.py 2025-01-16 02:26:08.551263558 +0800 +@@ -31,7 +31,7 @@ + import logging + import sys + +-from StringIO import StringIO ++from io import StringIO + + + class OutputCapture(object): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/path.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/path.py 2025-01-16 02:26:08.551263558 +0800 +@@ -27,7 +27,7 @@ + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + """Generic routines to convert platform-specific paths to URIs.""" + +-import urllib ++import urllib.request, urllib.parse, urllib.error + + + def abspath_to_uri(platform, path): +@@ -41,7 +41,7 @@ + # when converting filenames to files. Instead of using urllib's default + # rules, we allow a small list of other characters through un-escaped. + # It's unclear if this is the best possible solution. +- return urllib.quote(path, safe='/+:') ++ return urllib.parse.quote(path, safe='/+:') + + + def _convert_path(platform, path): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/platform_info.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/platform_info.py 2025-01-16 02:26:08.551263558 +0800 +@@ -95,7 +95,7 @@ + + def total_bytes_memory(self): + if self.is_mac(): +- return long( ++ return int( + self._executive.run_command(['sysctl', '-n', 'hw.memsize'])) + return None + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/platform_info_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/platform_info_unittest.py 2025-01-16 02:26:08.551263558 +0800 +@@ -84,9 +84,9 @@ + def test_real_code(self): + # This test makes sure the real (unmocked) code actually works. + info = PlatformInfo(sys, platform, FileSystem(), Executive()) +- self.assertNotEquals(info.os_name, '') +- self.assertNotEquals(info.os_version, '') +- self.assertNotEquals(info.display_name(), '') ++ self.assertNotEqual(info.os_name, '') ++ self.assertNotEqual(info.os_version, '') ++ self.assertNotEqual(info.display_name(), '') + self.assertTrue(info.is_mac() or info.is_win() or info.is_linux() + or info.is_freebsd()) + self.assertIsNotNone(info.terminal_width()) +@@ -239,16 +239,16 @@ + + def test_display_name(self): + info = self.make_info(fake_sys('darwin')) +- self.assertNotEquals(info.display_name(), '') ++ self.assertNotEqual(info.display_name(), '') + + info = self.make_info(fake_sys('win32', tuple([6, 1, 7600]))) +- self.assertNotEquals(info.display_name(), '') ++ self.assertNotEqual(info.display_name(), '') + + info = self.make_info(fake_sys('linux2')) +- self.assertNotEquals(info.display_name(), '') ++ self.assertNotEqual(info.display_name(), '') + + info = self.make_info(fake_sys('freebsd9')) +- self.assertNotEquals(info.display_name(), '') ++ self.assertNotEqual(info.display_name(), '') + + def test_total_bytes_memory(self): + info = self.make_info( +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/profiler.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/profiler.py 2025-01-16 02:26:08.551263558 +0800 +@@ -44,7 +44,7 @@ + profiler_name = profiler_name or cls.default_profiler_name( + host.platform) + profiler_class = next( +- itertools.ifilter(lambda profiler: profiler.name == profiler_name, ++ filter(lambda profiler: profiler.name == profiler_name, + profilers), None) + if not profiler_class: + return None +@@ -143,7 +143,7 @@ + def profile_after_exit(self): + # google-pprof doesn't check its arguments, so we have to. + if not self._host.filesystem.exists(self._output_path): +- print 'Failed to gather profile, %s does not exist.' % self._output_path ++ print('Failed to gather profile, %s does not exist.' % self._output_path) + return + + pprof_args = [ +@@ -151,13 +151,13 @@ + self._output_path + ] + profile_text = self._host.executive.run_command(pprof_args) +- print 'First 10 lines of pprof --text:' +- print self._first_ten_lines_of_profile(profile_text) +- print 'http://google-perftools.googlecode.com/svn/trunk/doc/cpuprofile.html documents output.' +- print +- print 'To interact with the the full profile, including produce graphs:' +- print ' '.join( +- [self._pprof_path(), self._executable_path, self._output_path]) ++ print('First 10 lines of pprof --text:') ++ print(self._first_ten_lines_of_profile(profile_text)) ++ print('http://google-perftools.googlecode.com/svn/trunk/doc/cpuprofile.html documents output.') ++ print() ++ print('To interact with the the full profile, including produce graphs:') ++ print(' '.join( ++ [self._pprof_path(), self._executable_path, self._output_path])) + + + class Perf(SingleFileOutputProfiler): +@@ -199,22 +199,22 @@ + perf_exitcode = self._perf_process.wait() + # The exit code should always be -2, as we're always interrupting perf. + if perf_exitcode not in (0, -2): +- print "'perf record' failed (exit code: %i), can't process results:" % perf_exitcode ++ print("'perf record' failed (exit code: %i), can't process results:" % perf_exitcode) + return + + perf_args = [ + self._perf_path(), 'report', '--call-graph', 'none', '--input', + self._output_path + ] +- print "First 10 lines of 'perf report --call-graph=none':" ++ print("First 10 lines of 'perf report --call-graph=none':") + +- print ' '.join(perf_args) ++ print(' '.join(perf_args)) + perf_output = self._host.executive.run_command(perf_args) +- print self._first_ten_lines_of_profile(perf_output) ++ print(self._first_ten_lines_of_profile(perf_output)) + +- print 'To view the full profile, run:' +- print ' '.join([self._perf_path(), 'report', '-i', self._output_path]) +- print # An extra line between tests looks nicer. ++ print('To view the full profile, run:') ++ print(' '.join([self._perf_path(), 'report', '-i', self._output_path])) ++ print() # An extra line between tests looks nicer. + + + class Sample(SingleFileOutputProfiler): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/stack_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/stack_utils.py 2025-01-16 02:26:08.551263558 +0800 +@@ -45,7 +45,7 @@ + """Returns a stack object that can be used to dump a stack trace for + the given thread id (or None if the id is not found). + """ +- for tid, stack in sys._current_frames().items(): ++ for tid, stack in list(sys._current_frames().items()): + if tid == thread_id: + return stack + return None +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/stack_utils_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/stack_utils_unittest.py 2025-01-16 02:26:08.551263558 +0800 +@@ -33,7 +33,7 @@ + + + def current_thread_id(): +- thread_id, _ = sys._current_frames().items()[0] ++ thread_id, _ = list(sys._current_frames().items())[0] + return thread_id + + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/system_host_mock.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/system_host_mock.py 2025-01-16 02:26:08.551263558 +0800 +@@ -26,7 +26,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-from StringIO import StringIO ++from io import StringIO + + from blinkpy.common.system.executive_mock import MockExecutive + from blinkpy.common.system.filesystem_mock import MockFileSystem +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/user.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/common/system/user.py 2025-01-16 02:26:08.551263558 +0800 +@@ -75,7 +75,7 @@ + for value in re.split(r"\s*,\s*", response): + parts = value.split('-') + if len(parts) == 2: +- indices += range(int(parts[0]) - 1, int(parts[1])) ++ indices += list(range(int(parts[0]) - 1, int(parts[1]))) + else: + indices.append(int(value) - 1) + except ValueError: +@@ -97,11 +97,11 @@ + list_items, + can_choose_multiple=False, + input_func=raw_input): +- print list_title ++ print(list_title) + i = 0 + for item in list_items: + i += 1 +- print '%2d. %s' % (i, item) ++ print('%2d. %s' % (i, item)) + return cls._wait_on_list_response(list_items, can_choose_multiple, + input_func) + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/presubmit/audit_non_blink_usage.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/presubmit/audit_non_blink_usage.py 2025-01-16 02:26:08.551263558 +0800 +@@ -1366,11 +1366,11 @@ + path, + [(i + 1, l) for i, l in enumerate(contents.splitlines())]) + if disallowed_identifiers: +- print '%s uses disallowed identifiers:' % path ++ print('%s uses disallowed identifiers:' % path) + for i in disallowed_identifiers: +- print(i.line, i.identifier, i.advice) ++ print((i.line, i.identifier, i.advice)) + except IOError as e: +- print 'could not open %s: %s' % (path, e) ++ print('could not open %s: %s' % (path, e)) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/checker_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/checker_unittest.py 2025-01-16 02:26:08.551263558 +0800 +@@ -215,7 +215,7 @@ + def test_max_reports_per_category(self): + """Check that _MAX_REPORTS_PER_CATEGORY is valid.""" + all_categories = self._all_categories() +- for category in _MAX_REPORTS_PER_CATEGORY.iterkeys(): ++ for category in _MAX_REPORTS_PER_CATEGORY.keys(): + self.assertIn(category, all_categories, + 'Key "%s" is not a category' % category) + +@@ -286,7 +286,7 @@ + } + + dispatcher = CheckerDispatcher() +- for file_path, expected_result in files.items(): ++ for file_path, expected_result in list(files.items()): + self.assertEqual( + dispatcher.should_check_and_strip_carriage_returns(file_path), + expected_result, 'Checking: %s' % file_path) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/filter.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/filter.py 2025-01-16 02:26:08.551263558 +0800 +@@ -192,7 +192,7 @@ + if self._path_specific_lower is None: + self._path_specific_lower = [] + for (sub_paths, path_rules) in self._path_specific: +- sub_paths = map(str.lower, sub_paths) ++ sub_paths = list(map(str.lower, sub_paths)) + self._path_specific_lower.append((sub_paths, path_rules)) + return self._path_specific_lower + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/patchreader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/patchreader.py 2025-01-16 02:26:08.551263558 +0800 +@@ -50,7 +50,7 @@ + """Checks style in the given patch.""" + patch_files = DiffParser(patch_string.splitlines()).files + +- for path, diff_file in patch_files.iteritems(): ++ for path, diff_file in patch_files.items(): + line_numbers = diff_file.added_or_modified_line_numbers() + _log.debug('Found %s new or modified lines in: %s', + len(line_numbers), path) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/checkers/cpp.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/checkers/cpp.py 2025-01-16 02:26:08.552346873 +0800 +@@ -862,7 +862,7 @@ + + # We'll say it should occur by line 10. Don't forget there's a + # dummy line at the front. +- for line in xrange(1, min(len(lines), 11)): ++ for line in range(1, min(len(lines), 11)): + if re.search(r'Copyright', lines[line], re.I): + break + else: # means no copyright line was found +@@ -946,7 +946,7 @@ + error: The function to call with any errors found. + """ + for line_number, line in enumerate(lines): +- if u'\ufffd' in line: ++ if '\ufffd' in line: + error( + line_number, 'readability/utf8', 5, + 'Line contains invalid UTF-8 (or Unicode replacement character).' +@@ -1318,7 +1318,7 @@ + return + + joined_line = '' +- for start_line_number in xrange(line_number, clean_lines.num_lines()): ++ for start_line_number in range(line_number, clean_lines.num_lines()): + start_line = clean_lines.elided[start_line_number] + joined_line += ' ' + start_line.lstrip() + body_match = search(r'{|;', start_line) +@@ -1578,7 +1578,7 @@ + The width of the line in column positions, accounting for Unicode + combining characters and wide characters. + """ +- if isinstance(line, unicode): ++ if isinstance(line, str): + width = 0 + for c in unicodedata.normalize('NFC', line): + if unicodedata.east_asian_width(c) in ('W', 'F'): +@@ -1751,7 +1751,7 @@ + # that this is rare. + end_position = Position(-1, -1) + start_col = len(virtual.group(2)) +- for start_line in xrange(linenum, min(linenum + 3, ++ for start_line in range(linenum, min(linenum + 3, + clean_lines.num_lines())): + line = clean_lines.elided[start_line][start_col:] + parameter_list = match(r'^([^(]*)\(', line) +@@ -1768,7 +1768,7 @@ + + # Look for "override" or "final" after the parameter list + # (possibly on the next few lines). +- for i in xrange(end_position.row, ++ for i in range(end_position.row, + min(end_position.row + 3, clean_lines.num_lines())): + line = clean_lines.elided[i][end_position.column:] + override_or_final = search(r'\b(override|final)\b', line) +@@ -2194,7 +2194,7 @@ + def grep(lines, pattern, error): + matches = [] + function_state = None +- for line_number in xrange(lines.num_lines()): ++ for line_number in range(lines.num_lines()): + line = (lines.elided[line_number]).rstrip() + try: + if pattern in line: +@@ -2566,7 +2566,7 @@ + required = {} + # Example of required: { '': (1219, 'less<>') } + +- for line_number in xrange(clean_lines.num_lines()): ++ for line_number in range(clean_lines.num_lines()): + line = clean_lines.elided[line_number] + if not line or line[0] == '#': + continue +@@ -2609,7 +2609,7 @@ + + # include_state is modified during iteration, so we iterate over a copy of + # the keys. +- for header in include_state.keys(): # NOLINT ++ for header in list(include_state.keys()): # NOLINT + (same_module, common_path) = files_belong_to_same_module( + abs_filename, header) + fullpath = common_path + header +@@ -2703,7 +2703,7 @@ + check_for_header_guard(filename, clean_lines, error) + + file_state = _FileState(clean_lines, file_extension) +- for line in xrange(clean_lines.num_lines()): ++ for line in range(clean_lines.num_lines()): + process_line(filename, file_extension, clean_lines, line, + include_state, function_state, class_state, file_state, + error) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/checkers/cpp_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/checkers/cpp_unittest.py 2025-01-16 02:26:08.552346873 +0800 +@@ -469,8 +469,8 @@ + # Test get line width. + def test_get_line_width(self): + self.assertEqual(0, cpp_style.get_line_width('')) +- self.assertEqual(10, cpp_style.get_line_width(u'x' * 10)) +- self.assertEqual(16, cpp_style.get_line_width(u'都|道|府|県|支庁')) ++ self.assertEqual(10, cpp_style.get_line_width('x' * 10)) ++ self.assertEqual(16, cpp_style.get_line_width('都|道|府|県|支庁')) + + def test_find_next_multi_line_comment_start(self): + self.assertEqual(1, +@@ -1362,7 +1362,7 @@ + error_collector = ErrorCollector(self.assertTrue) + self.process_file_data( + 'foo.cpp', 'cpp', +- unicode(raw_bytes, 'utf8', 'replace').split('\n'), ++ str(raw_bytes, 'utf8', 'replace').split('\n'), + error_collector) + # The warning appears only once. + self.assertEqual( +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/checkers/python_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/checkers/python_unittest.py 2025-01-16 02:26:08.552346873 +0800 +@@ -63,6 +63,6 @@ + (2, 'pylint/C0303(trailing-whitespace)', 5, + '[] Trailing whitespace'), + (2, 'pylint/E0602(undefined-variable)', 5, +- u"[] Undefined variable 'error'"), ++ "[] Undefined variable 'error'"), + (3, 'pylint/W0611(unused-import)', 5, '[] Unused import math'), + ], errors) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/checkers/python_unittest_input.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/checkers/python_unittest_input.py 2025-01-16 02:26:08.552346873 +0800 +@@ -1,3 +1,3 @@ + # This file is sample input for python_unittest.py and includes problems. +-print error() ++print(error()) + import math#unused +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/checkers/test_expectations_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/checkers/test_expectations_unittest.py 2025-01-16 02:26:08.552346873 +0800 +@@ -85,7 +85,7 @@ + self.assertEqual(expected_output, + self._error_collector.get_errors()) + else: +- self.assertNotEquals('', self._error_collector.get_errors()) ++ self.assertNotEqual('', self._error_collector.get_errors()) + + # Note that a patch might change a line that introduces errors elsewhere, but we + # don't want to lint the whole file (it can unfairly punish patches for pre-existing errors). +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/checkers/xml.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/style/checkers/xml.py 2025-01-16 02:26:08.552346873 +0800 +@@ -21,7 +21,7 @@ + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + """Checks WebKit style for XML files.""" + +-from __future__ import absolute_import ++ + + from xml.parsers import expat + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/pep8.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/pep8.py 2025-01-16 02:26:08.552346873 +0800 +@@ -44,7 +44,7 @@ + 700 statements + 900 syntax error + """ +-from __future__ import with_statement ++ + + __version__ = '1.5.7' + +@@ -61,7 +61,7 @@ + from configparser import RawConfigParser + from io import TextIOWrapper + except ImportError: +- from ConfigParser import RawConfigParser ++ from configparser import RawConfigParser + + DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__' + DEFAULT_IGNORE = 'E123,E226,E24' +@@ -425,7 +425,7 @@ + # for each depth, memorize the visual indent column + indent = [last_indent[1]] + if verbose >= 3: +- print(">>> " + tokens[0][4].rstrip()) ++ print((">>> " + tokens[0][4].rstrip())) + + for token_type, text, start, end, line in tokens: + +@@ -438,7 +438,7 @@ + # this is the beginning of a continuation line. + last_indent = start + if verbose >= 3: +- print("... " + line.rstrip()) ++ print(("... " + line.rstrip())) + + # record the initial indent. + rel_indent[row] = expand_indent(line) - indent_level +@@ -506,7 +506,7 @@ + indent[depth] = start[1] + indent_chances[start[1]] = True + if verbose >= 4: +- print("bracket depth %s indent to %s" % (depth, start[1])) ++ print(("bracket depth %s indent to %s" % (depth, start[1]))) + # deal with implicit string concatenation + elif (token_type in (tokenize.STRING, tokenize.COMMENT) or + text in ('u', 'ur', 'b', 'br')): +@@ -528,8 +528,8 @@ + open_rows[depth].append(row) + parens[row] += 1 + if verbose >= 4: +- print("bracket depth %s seen, col %s, visual min = %s" % +- (depth, start[1], indent[depth])) ++ print(("bracket depth %s seen, col %s, visual min = %s" % ++ (depth, start[1], indent[depth]))) + elif text in ')]}' and depth > 0: + # parent indents should not be more than this one + prev_indent = indent.pop() or last_indent[1] +@@ -1122,14 +1122,14 @@ + if line[:3] == '@@ ': + hunk_match = HUNK_REGEX.match(line) + (row, nrows) = [int(g or '1') for g in hunk_match.groups()] +- rv[path].update(range(row, row + nrows)) ++ rv[path].update(list(range(row, row + nrows))) + elif line[:3] == '+++': + path = line[4:].split('\t', 1)[0] + if path[:2] == 'b/': + path = path[2:] + rv[path] = set() + return dict([(os.path.join(parent, path), rows) +- for (path, rows) in rv.items() ++ for (path, rows) in list(rv.items()) + if rows and filename_match(path, patterns)]) + + +@@ -1331,10 +1331,10 @@ + if self.blank_before < self.blank_lines: + self.blank_before = self.blank_lines + if self.verbose >= 2: +- print(self.logical_line[:80].rstrip()) ++ print((self.logical_line[:80].rstrip())) + for name, check, argument_names in self._logical_checks: + if self.verbose >= 4: +- print(' ' + name) ++ print((' ' + name)) + for offset, text in self.run_check(check, argument_names) or (): + if not isinstance(offset, tuple): + for token_offset, pos in mapping: +@@ -1425,8 +1425,8 @@ + pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) + else: + pos = 'l.%s' % token[3][0] +- print('l.%s\t%s\t%s\t%r' % +- (token[2][0], pos, tokenize.tok_name[token[0]], text)) ++ print(('l.%s\t%s\t%s\t%r' % ++ (token[2][0], pos, tokenize.tok_name[token[0]], text))) + if token_type == tokenize.OP: + if text in '([{': + parens += 1 +@@ -1507,7 +1507,7 @@ + if code in self.expected: + return + if self.print_filename and not self.file_errors: +- print(self.filename) ++ print((self.filename)) + self.file_errors += 1 + self.total_errors += 1 + return code +@@ -1539,12 +1539,12 @@ + + def print_benchmark(self): + """Print benchmark numbers.""" +- print('%-7.2f %s' % (self.elapsed, 'seconds elapsed')) ++ print(('%-7.2f %s' % (self.elapsed, 'seconds elapsed'))) + if self.elapsed: + for key in self._benchmark_keys: +- print('%-7d %s per second (%d total)' % ++ print(('%-7d %s per second (%d total)' % + (self.counters[key] / self.elapsed, key, +- self.counters[key])) ++ self.counters[key]))) + + + class FileReport(BaseReport): +@@ -1582,20 +1582,20 @@ + """Print the result and return the overall count for this file.""" + self._deferred_print.sort() + for line_number, offset, code, text, doc in self._deferred_print: +- print(self._fmt % { ++ print((self._fmt % { + 'path': self.filename, + 'row': self.line_offset + line_number, 'col': offset + 1, + 'code': code, 'text': text, +- }) ++ })) + if self._show_source: + if line_number > len(self.lines): + line = '' + else: + line = self.lines[line_number - 1] +- print(line.rstrip()) +- print(re.sub(r'\S', ' ', line[:offset]) + '^') ++ print((line.rstrip())) ++ print((re.sub(r'\S', ' ', line[:offset]) + '^')) + if self._show_pep8 and doc: +- print(' ' + doc.strip()) ++ print((' ' + doc.strip())) + return self.file_errors + + +@@ -1678,7 +1678,7 @@ + def input_file(self, filename, lines=None, expected=None, line_offset=0): + """Run all checks on a Python source file.""" + if self.options.verbose: +- print('checking %s' % filename) ++ print(('checking %s' % filename)) + fchecker = self.checker_class( + filename, lines=lines, options=self.options) + return fchecker.check_all(expected=expected, line_offset=line_offset) +@@ -1694,7 +1694,7 @@ + runner = self.runner + for root, dirs, files in os.walk(dirname): + if verbose: +- print('directory ' + root) ++ print(('directory ' + root)) + counters['directories'] += 1 + for subdir in sorted(dirs): + if self.excluded(subdir, root): +@@ -1740,7 +1740,7 @@ + starts with argument_name and which contain selected tests. + """ + checks = [] +- for check, attrs in _checks[argument_name].items(): ++ for check, attrs in list(_checks[argument_name].items()): + (codes, args) = attrs + if any(not (code and self.ignore_code(code)) for code in codes): + checks.append((check.__name__, check, args)) +@@ -1814,7 +1814,7 @@ + user_conf = options.config + if user_conf and os.path.isfile(user_conf): + if options.verbose: +- print('user configuration: %s' % user_conf) ++ print(('user configuration: %s' % user_conf)) + config.read(user_conf) + + local_dir = os.curdir +@@ -1823,7 +1823,7 @@ + if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]): + local_dir = parent + if options.verbose: +- print('local configuration: in %s' % parent) ++ print(('local configuration: in %s' % parent)) + break + (parent, tail) = os.path.split(parent) + +@@ -1838,10 +1838,10 @@ + # Second, parse the configuration + for opt in config.options(pep8_section): + if opt.replace('_', '-') not in parser.config_options: +- print(" unknown option '%s' ignored" % opt) ++ print((" unknown option '%s' ignored" % opt)) + continue + if options.verbose > 1: +- print(" %s = %s" % (opt, config.get(pep8_section, opt))) ++ print((" %s = %s" % (opt, config.get(pep8_section, opt)))) + normalized_opt = opt.replace('-', '_') + opt_type = option_list[normalized_opt] + if opt_type in ('int', 'count'): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/PRESUBMIT.py 2025-01-16 02:26:08.552346873 +0800 +@@ -23,7 +23,7 @@ + message=output_api.PresubmitError + ) + if input_api.verbose: +- print('Running ' + abspath_to_test) ++ print(('Running ' + abspath_to_test)) + return input_api.RunTests([command]) + + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/update_certs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/update_certs.py 2025-01-16 02:26:08.552346873 +0800 +@@ -18,14 +18,13 @@ + def main(): + cert_dir = os.path.join(_THIS_DIR, 'certs') + +- print '===> Removing old files...' +- old_files = filter(lambda filename: '.sxg.' not in filename, +- os.listdir(cert_dir)) ++ print('===> Removing old files...') ++ old_files = [filename for filename in os.listdir(cert_dir) if '.sxg.' not in filename] + old_files = [os.path.join(cert_dir, fn) for fn in old_files] + if subprocess.call(['git', 'rm'] + old_files) != 0: + sys.exit(1) + +- print '\n===> Regenerating keys and certificates...' ++ print('\n===> Regenerating keys and certificates...') + env = OpenSSLEnvironment(logging.getLogger(__name__), + base_path=cert_dir, + force_regenerate=True, +@@ -42,7 +41,7 @@ + if subprocess.call('git add -v ' + os.path.join(cert_dir, '*'), shell=True) != 0: + sys.exit(1) + +- print '\n===> Updating wpt.config.json and base.py...' ++ print('\n===> Updating wpt.config.json and base.py...') + key_basename = os.path.basename(key_path) + pem_basename = os.path.basename(pem_path) + config_path = os.path.join(_THIS_DIR, 'wpt.config.json') +@@ -65,7 +64,7 @@ + if subprocess.call(['git', 'add', '-v', config_path, base_py_path]) != 0: + sys.exit(1) + +- print '\n===> Certificate validity:' ++ print('\n===> Certificate validity:') + subprocess.call(['grep', 'Not After', pem_path]) + + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/fnmatch.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/fnmatch.py 2025-01-16 02:26:08.552346873 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import ++ + + import fnmatch as _stdlib_fnmatch + import os +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/lint.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/lint.py 2025-01-16 02:26:08.553430188 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import print_function, unicode_literals ++ + + import abc + import argparse +@@ -294,23 +294,23 @@ + + for path in paths: + if os.name == "nt": +- path = path.replace(u"\\", u"/") ++ path = path.replace("\\", "/") + +- if not path.startswith(u"css/"): ++ if not path.startswith("css/"): + continue + +- source_file = SourceFile(repo_root, path, u"/") ++ source_file = SourceFile(repo_root, path, "/") + if source_file.name_is_non_test: + # If we're name_is_non_test for a reason apart from support, ignore it. + # We care about support because of the requirement all support files in css/ to be in + # a support directory; see the start of check_parsed. +- offset = path.find(u"/support/") ++ offset = path.find("/support/") + if offset == -1: + continue + + parts = source_file.dir_path.split(os.path.sep) + if (parts[0] in source_file.root_dir_non_test or +- any(item in source_file.dir_non_test - {u"support"} for item in parts) or ++ any(item in source_file.dir_non_test - {"support"} for item in parts) or + any(parts[:len(non_test_path)] == list(non_test_path) for non_test_path in source_file.dir_path_non_test)): + continue + +@@ -320,7 +320,7 @@ + ref_files[source_file.name].add(path) + else: + test_name = source_file.name # type: Text +- test_name = test_name.replace(u'-manual', u'') ++ test_name = test_name.replace('-manual', '') + test_files[test_name].add(path) + + errors = [] +@@ -331,7 +331,7 @@ + # Only compute by_spec if there are prima-facie collisions because of cost + by_spec = defaultdict(set) # type: Dict[Text, Set[Text]] + for path in colliding: +- source_file = SourceFile(repo_root, path, u"/") ++ source_file = SourceFile(repo_root, path, "/") + for link in source_file.spec_links: + for r in (drafts_csswg_re, w3c_tr_re, w3c_dev_re): + m = r.match(link) +@@ -388,7 +388,7 @@ + continue + file_name, file_extension = os.path.splitext(path) + file_dict[file_name].append(file_extension) +- for k, v in file_dict.items(): ++ for k, v in list(file_dict.items()): + if len(v) == 1: + continue + context = (', '.join(v),) +@@ -586,9 +586,9 @@ + if variant != "" and variant[0] not in ("?", "#"): + errors.append(rules.MalformedVariant.error(path, (path,))) + +- required_elements.extend(key for key, value in {"testharness": True, ++ required_elements.extend(key for key, value in list({"testharness": True, + "testharnessreport": len(testharnessreport_nodes) > 0, +- "timeout": len(source_file.timeout_nodes) > 0}.items() ++ "timeout": len(source_file.timeout_nodes) > 0}.items()) + if value) + + testdriver_vendor_nodes = [] # type: List[ElementTree.Element] +@@ -895,7 +895,7 @@ + return + + assert logger is not None +- by_type = " ".join("%s: %d" % item for item in error_count.items()) ++ by_type = " ".join("%s: %d" % item for item in list(error_count.items())) + count = sum(error_count.values()) + logger.info("") + if count == 1: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/rules.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/lint/rules.py 2025-01-16 02:26:08.553430188 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import unicode_literals ++ + + import abc + import inspect +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/download.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/download.py 2025-01-16 02:26:08.553430188 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import ++ + + import argparse + import bz2 +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/item.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/item.py 2025-01-16 02:26:08.553430188 +0800 +@@ -129,7 +129,7 @@ + @property + def url(self): + # type: () -> Text +- rel_url = self._url or self.path.replace(os.path.sep, u"/") ++ rel_url = self._url or self.path.replace(os.path.sep, "/") + # we can outperform urljoin, because we know we just have path relative URLs + if self.url_base == "/": + return "/" + rel_url +@@ -149,7 +149,7 @@ + + def to_json(self): + # type: () -> Tuple[Optional[Text], Dict[Any, Any]] +- rel_url = None if self._url == self.path.replace(os.path.sep, u"/") else self._url ++ rel_url = None if self._url == self.path.replace(os.path.sep, "/") else self._url + rv = (rel_url, {}) # type: Tuple[Optional[Text], Dict[Any, Any]] + return rv + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/manifest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/manifest.py 2025-01-16 02:26:08.553430188 +0800 +@@ -66,15 +66,15 @@ + pass + + +-item_classes = {u"testharness": TestharnessTest, +- u"reftest": RefTest, +- u"print-reftest": PrintRefTest, +- u"crashtest": CrashTest, +- u"manual": ManualTest, +- u"wdspec": WebDriverSpecTest, +- u"conformancechecker": ConformanceCheckerTest, +- u"visual": VisualTest, +- u"support": SupportFile} # type: Dict[Text, Type[ManifestItem]] ++item_classes = {"testharness": TestharnessTest, ++ "reftest": RefTest, ++ "print-reftest": PrintRefTest, ++ "crashtest": CrashTest, ++ "manual": ManualTest, ++ "wdspec": WebDriverSpecTest, ++ "conformancechecker": ConformanceCheckerTest, ++ "visual": VisualTest, ++ "support": SupportFile} # type: Dict[Text, Type[ManifestItem]] + + + def compute_manifest_items(source_file): +@@ -151,7 +151,7 @@ + # type: (Text) -> Iterable[ManifestItem] + tpath = tuple(path.split(os.path.sep)) + +- for type_tests in self._data.values(): ++ for type_tests in list(self._data.values()): + i = type_tests.get(tpath, set()) + assert i is not None + for test in i: +@@ -162,7 +162,7 @@ + tpath = tuple(dir_name.split(os.path.sep)) + tpath_len = len(tpath) + +- for type_tests in self._data.values(): ++ for type_tests in list(self._data.values()): + for path, tests in iteritems(type_tests): + if path[:tpath_len] == tpath: + for test in tests: +@@ -242,9 +242,9 @@ + chunksize=max(1, len(to_update) // 10000) + ) # type: Iterator[Tuple[Tuple[Text, ...], Text, Set[ManifestItem], Text]] + elif PY3: +- results = map(compute_manifest_items, to_update) ++ results = list(map(compute_manifest_items, to_update)) + else: +- results = itertools.imap(compute_manifest_items, to_update) ++ results = map(compute_manifest_items, to_update) + + for result in results: + rel_path_parts, new_type, manifest_items, file_hash = result +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/sourcefile.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/sourcefile.py 2025-01-16 02:26:08.553430188 +0800 +@@ -51,7 +51,7 @@ + + reference_file_re = re.compile(r'(^|[\-_])(not)?ref[0-9]*([\-_]|$)') + +-space_chars = u"".join(html5lib.constants.spaceCharacters) # type: Text ++space_chars = "".join(html5lib.constants.spaceCharacters) # type: Text + + + def replace_end(s, old, new): +@@ -185,19 +185,19 @@ + + + class SourceFile(object): +- parsers = {u"html":_parse_html, +- u"xhtml":_parse_xml, +- u"svg":_parse_xml} # type: Dict[Text, Callable[[BinaryIO], ElementTree.ElementTree]] +- +- root_dir_non_test = {u"common"} +- +- dir_non_test = {u"resources", +- u"support", +- u"tools"} +- +- dir_path_non_test = {(u"css21", u"archive"), +- (u"css", u"CSS2", u"archive"), +- (u"css", u"common")} # type: Set[Tuple[Text, ...]] ++ parsers = {"html":_parse_html, ++ "xhtml":_parse_xml, ++ "svg":_parse_xml} # type: Dict[Text, Callable[[BinaryIO], ElementTree.ElementTree]] ++ ++ root_dir_non_test = {"common"} ++ ++ dir_non_test = {"resources", ++ "support", ++ "tools"} ++ ++ dir_path_non_test = {("css21", "archive"), ++ ("css", "CSS2", "archive"), ++ ("css", "common")} # type: Set[Tuple[Text, ...]] + + def __init__(self, tests_root, rel_path, url_base, hash=None, contents=None): + # type: (Text, Text, Text, Optional[Text], Optional[bytes]) -> None +@@ -212,7 +212,7 @@ + assert not os.path.isabs(rel_path), rel_path + if os.name == "nt": + # do slash normalization on Windows +- rel_path = rel_path.replace(u"/", u"\\") ++ rel_path = rel_path.replace("/", "\\") + + dir_path, filename = os.path.split(rel_path) + name, ext = os.path.splitext(filename) +@@ -331,11 +331,11 @@ + """Check if the file name matches the conditions for the file to + be a non-test file""" + return (self.is_dir() or +- self.name_prefix(u"MANIFEST") or +- self.filename == u"META.yml" or +- self.filename.startswith(u".") or +- self.filename.endswith(u".headers") or +- self.filename.endswith(u".ini") or ++ self.name_prefix("MANIFEST") or ++ self.filename == "META.yml" or ++ self.filename.startswith(".") or ++ self.filename.endswith(".headers") or ++ self.filename.endswith(".ini") or + self.in_non_test_dir()) + + @property +@@ -435,14 +435,14 @@ + + if not ext: + return None +- if ext[0] == u".": ++ if ext[0] == ".": + ext = ext[1:] +- if ext in [u"html", u"htm"]: +- return u"html" +- if ext in [u"xhtml", u"xht", u"xml"]: +- return u"xhtml" +- if ext == u"svg": +- return u"svg" ++ if ext in ["html", "htm"]: ++ return "html" ++ if ext in ["xhtml", "xht", "xml"]: ++ return "xhtml" ++ if ext == "svg": ++ return "svg" + return None + + @cached_property +@@ -550,9 +550,9 @@ + + def parse_ref_keyed_meta(self, node): + # type: (ElementTree.Element) -> Tuple[Optional[Tuple[Text, Text, Text]], Text] +- item = node.attrib.get(u"content", u"") # type: Text ++ item = node.attrib.get("content", "") # type: Text + +- parts = item.rsplit(u":", 1) ++ parts = item.rsplit(":", 1) + if len(parts) == 1: + key = None # type: Optional[Tuple[Text, Text, Text]] + value = parts[0] +@@ -563,7 +563,7 @@ + if ref[0] == key_part: + reftype = ref[1] + break +- if reftype not in (u"==", u"!="): ++ if reftype not in ("==", "!="): + raise ValueError("Key %s doesn't correspond to a reference" % key_part) + key = (self.url, key_part, reftype) + value = parts[1] +@@ -590,26 +590,26 @@ + if not self.fuzzy_nodes: + return rv + +- args = [u"maxDifference", u"totalPixels"] ++ args = ["maxDifference", "totalPixels"] + + for node in self.fuzzy_nodes: + key, value = self.parse_ref_keyed_meta(node) +- ranges = value.split(u";") ++ ranges = value.split(";") + if len(ranges) != 2: + raise ValueError("Malformed fuzzy value %s" % value) + arg_values = {} # type: Dict[Text, List[int]] + positional_args = deque() # type: Deque[List[int]] + for range_str_value in ranges: # type: Text + name = None # type: Optional[Text] +- if u"=" in range_str_value: ++ if "=" in range_str_value: + name, range_str_value = [part.strip() +- for part in range_str_value.split(u"=", 1)] ++ for part in range_str_value.split("=", 1)] + if name not in args: + raise ValueError("%s is not a valid fuzzy property" % name) + if arg_values.get(name): + raise ValueError("Got multiple values for argument %s" % name) +- if u"-" in range_str_value: +- range_min, range_max = range_str_value.split(u"-") ++ if "-" in range_str_value: ++ range_min, range_max = range_str_value.split("-") + else: + range_min = range_str_value + range_max = range_str_value +@@ -1017,7 +1017,7 @@ + )] + + elif self.name_is_multi_global: +- globals = u"" ++ globals = "" + script_metadata = self.script_metadata + assert script_metadata is not None + for (key, value) in script_metadata: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/testpaths.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/testpaths.py 2025-01-16 02:26:08.553430188 +0800 +@@ -100,12 +100,12 @@ + def write_output(path_id_map, as_json): + # type: (Dict[Text, List[Text]], bool) -> None + if as_json: +- print(json.dumps(path_id_map)) ++ print((json.dumps(path_id_map))) + else: + for path, test_ids in sorted(iteritems(path_id_map)): + print(path) + for test_id in sorted(test_ids): +- print(" " + test_id) ++ print((" " + test_id)) + + + def run(**kwargs): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/typedata.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/typedata.py 2025-01-16 02:26:08.553430188 +0800 +@@ -193,7 +193,7 @@ + + return count + +- def __nonzero__(self): ++ def __bool__(self): + # type: () -> bool + return bool(self._data) or bool(self._json_data) + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/utils.py 2025-01-16 02:26:08.553430188 +0800 +@@ -22,52 +22,52 @@ + def rel_path_to_url(rel_path, url_base="/"): + # type: (Text, Text) -> Text + assert not os.path.isabs(rel_path), rel_path +- if url_base[0] != u"/": +- url_base = u"/" + url_base +- if url_base[-1] != u"/": +- url_base += u"/" +- return url_base + rel_path.replace(os.sep, u"/") ++ if url_base[0] != "/": ++ url_base = "/" + url_base ++ if url_base[-1] != "/": ++ url_base += "/" ++ return url_base + rel_path.replace(os.sep, "/") + + + def from_os_path(path): + # type: (Text) -> Text +- assert os.path.sep == u"/" or platform.system() == "Windows" +- if u"/" == os.path.sep: ++ assert os.path.sep == "/" or platform.system() == "Windows" ++ if "/" == os.path.sep: + rv = path + else: +- rv = path.replace(os.path.sep, u"/") +- if u"\\" in rv: ++ rv = path.replace(os.path.sep, "/") ++ if "\\" in rv: + raise ValueError("path contains \\ when separator is %s" % os.path.sep) + return rv + + + def to_os_path(path): + # type: (Text) -> Text +- assert os.path.sep == u"/" or platform.system() == "Windows" +- if u"\\" in path: ++ assert os.path.sep == "/" or platform.system() == "Windows" ++ if "\\" in path: + raise ValueError("normalised path contains \\") +- if u"/" == os.path.sep: ++ if "/" == os.path.sep: + return path +- return path.replace(u"/", os.path.sep) ++ return path.replace("/", os.path.sep) + + + def git(path): + # type: (Text) -> Optional[Callable[..., Text]] + def gitfunc(cmd, *args): + # type: (Text, *Text) -> Text +- full_cmd = [u"git", cmd] + list(args) ++ full_cmd = ["git", cmd] + list(args) + try: + return subprocess.check_output(full_cmd, cwd=path, stderr=subprocess.STDOUT).decode('utf8') + except Exception as e: + if platform.uname()[0] == "Windows" and isinstance(e, WindowsError): +- full_cmd[0] = u"git.bat" ++ full_cmd[0] = "git.bat" + return subprocess.check_output(full_cmd, cwd=path, stderr=subprocess.STDOUT).decode('utf8') + else: + raise + + try: + # this needs to be a command that fails if we aren't in a git repo +- gitfunc(u"rev-parse", u"--show-toplevel") ++ gitfunc("rev-parse", "--show-toplevel") + except (subprocess.CalledProcessError, OSError): + return None + else: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/vcs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/manifest/vcs.py 2025-01-16 02:26:08.553430188 +0800 +@@ -32,7 +32,7 @@ + # type: (Text, Manifest, Optional[Text], Optional[Text], bool, bool) -> FileSystem + tree = None + if cache_root is None: +- cache_root = os.path.join(tests_root, u".wptcache") ++ cache_root = os.path.join(tests_root, ".wptcache") + if not os.path.exists(cache_root): + try: + os.makedirs(cache_root) +@@ -175,7 +175,7 @@ + + + class MtimeCache(CacheFile): +- file_name = u"mtime.json" ++ file_name = "mtime.json" + + def __init__(self, cache_root, tests_root, manifest_path, rebuild=False): + # type: (Text, Text, Text, bool) -> None +@@ -196,12 +196,12 @@ + + def check_valid(self, data): + # type: (Dict[Any, Any]) -> Dict[Any, Any] +- if data.get(u"/tests_root") != self.tests_root: ++ if data.get("/tests_root") != self.tests_root: + self.modified = True + else: + if self.manifest_path is not None and os.path.exists(self.manifest_path): + mtime = os.path.getmtime(self.manifest_path) +- if data.get(u"/manifest_path") != [self.manifest_path, mtime]: ++ if data.get("/manifest_path") != [self.manifest_path, mtime]: + self.modified = True + else: + self.modified = True +@@ -229,10 +229,10 @@ + # type: (Dict[Any, Any]) -> Dict[Any, Any] + ignore_path = os.path.join(self.tests_root, ".gitignore") + mtime = os.path.getmtime(ignore_path) +- if data.get(u"/gitignore_file") != [ignore_path, mtime]: ++ if data.get("/gitignore_file") != [ignore_path, mtime]: + self.modified = True + data = {} +- data[u"/gitignore_file"] = [ignore_path, mtime] ++ data["/gitignore_file"] = [ignore_path, mtime] + return data + + def __contains__(self, key): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/serve/serve.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/serve/serve.py 2025-01-16 02:26:08.554513503 +0800 +@@ -1,6 +1,6 @@ + # -*- coding: utf-8 -*- + +-from __future__ import print_function ++ + + import abc + import argparse +@@ -179,7 +179,7 @@ + + def check_exposure(self, request): + if self.global_type: +- globals = u"" ++ globals = "" + for (key, value) in self._get_metadata(request): + if key == "global": + globals = value +@@ -342,7 +342,7 @@ + # Using reversed here means that mount points that are added later + # get higher priority. This makes sense since / is typically added + # first. +- for item in reversed(self.mountpoint_routes.values()): ++ for item in reversed(list(self.mountpoint_routes.values())): + routes.extend(item) + return routes + +@@ -526,7 +526,7 @@ + + def start_servers(host, ports, paths, routes, bind_address, config, **kwargs): + servers = defaultdict(list) +- for scheme, ports in ports.items(): ++ for scheme, ports in list(ports.items()): + assert len(ports) == {"http": 2, "https": 2}.get(scheme, 1) + + # If trying to start HTTP/2.0 server, check compatibility +@@ -788,25 +788,25 @@ + + + def iter_procs(servers): +- for servers in servers.values(): ++ for servers in list(servers.values()): + for port, server in servers: + yield server.proc + + + def _make_subdomains_product(s, depth=2): +- return {u".".join(x) for x in chain(*(product(s, repeat=i) for i in range(1, depth+1)))} ++ return {".".join(x) for x in chain(*(product(s, repeat=i) for i in range(1, depth+1)))} + + def _make_origin_policy_subdomains(limit): +- return {u"op%d" % x for x in range(1,limit+1)} ++ return {"op%d" % x for x in range(1,limit+1)} + + +-_subdomains = {u"www", +- u"www1", +- u"www2", +- u"天気の良い日", +- u"élève"} ++_subdomains = {"www", ++ "www1", ++ "www2", ++ "天気の良い日", ++ "élève"} + +-_not_subdomains = {u"nonexistent"} ++_not_subdomains = {"nonexistent"} + + _subdomains = _make_subdomains_product(_subdomains) + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/atomicwrites/atomicwrites/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/atomicwrites/atomicwrites/__init__.py 2025-01-16 02:26:08.554513503 +0800 +@@ -13,7 +13,7 @@ + + PY2 = sys.version_info[0] == 2 + +-text_type = unicode if PY2 else str # noqa ++text_type = str if PY2 else str # noqa + + + def _path_to_unicode(x): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/enum/enum/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/enum/enum/__init__.py 2025-01-16 02:26:08.554513503 +0800 +@@ -23,17 +23,17 @@ + OrderedDict = None + + try: +- basestring ++ str + except NameError: + # In Python 2 basestring is the ancestor of both str and unicode + # in Python 3 it's just str, but was missing in 3.1 +- basestring = str ++ str = str + + try: +- unicode ++ str + except NameError: + # In Python 3 unicode no longer exists (it's just str) +- unicode = str ++ str = str + + class _RouteClassAttributeToGetattr(object): + """Route attribute access on a class to __getattr__. +@@ -158,7 +158,7 @@ + if type(classdict) is dict: + original_dict = classdict + classdict = _EnumDict() +- for k, v in original_dict.items(): ++ for k, v in list(original_dict.items()): + classdict[k] = v + + member_type, first_enum = metacls._get_mixins_(bases) +@@ -175,7 +175,7 @@ + if _order_ is None: + if pyver < 3.0: + try: +- _order_ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])] ++ _order_ = [name for (name, value) in sorted(list(members.items()), key=lambda item: item[1])] + except TypeError: + _order_ = [name for name in sorted(members.keys())] + else: +@@ -236,7 +236,7 @@ + enum_member.__init__(*args) + # If another member with the same value was already defined, the + # new member becomes an alias to the existing one. +- for name, canonical_member in enum_class._member_map_.items(): ++ for name, canonical_member in list(enum_class._member_map_.items()): + if canonical_member.value == enum_member._value_: + enum_member = canonical_member + break +@@ -433,7 +433,7 @@ + """ + if pyver < 3.0: + # if class_name is unicode, attempt a conversion to ASCII +- if isinstance(class_name, unicode): ++ if isinstance(class_name, str): + try: + class_name = class_name.encode('ascii') + except UnicodeEncodeError: +@@ -447,22 +447,22 @@ + _order_ = [] + + # special processing needed for names? +- if isinstance(names, basestring): ++ if isinstance(names, str): + names = names.replace(',', ' ').split() +- if isinstance(names, (tuple, list)) and isinstance(names[0], basestring): ++ if isinstance(names, (tuple, list)) and isinstance(names[0], str): + names = [(e, i+start) for (i, e) in enumerate(names)] + + # Here, names is either an iterable of (name, value) or a mapping. + item = None # in case names is empty + for item in names: +- if isinstance(item, basestring): ++ if isinstance(item, str): + member_name, member_value = item, names[item] + else: + member_name, member_value = item + classdict[member_name] = member_value + _order_.append(member_name) + # only set _order_ in classdict if name/value was not from a mapping +- if not isinstance(item, basestring): ++ if not isinstance(item, str): + classdict['_order_'] = ' '.join(_order_) + enum_class = metacls.__new__(metacls, class_name, bases, classdict) + +@@ -656,7 +656,7 @@ + return cls._value2member_map_[value] + except TypeError: + # not there, now do long search -- O(n) behavior +- for member in cls._member_map_.values(): ++ for member in list(cls._member_map_.values()): + if member.value == value: + return member + raise ValueError("%s is not a valid %s" % (value, cls.__name__)) +@@ -800,7 +800,7 @@ + source = vars(source) + else: + source = module_globals +- members = dict((name, value) for name, value in source.items() if filter(name)) ++ members = dict((name, value) for name, value in list(source.items()) if list(filter(name))) + cls = cls(name, members, module=module) + cls.__reduce_ex__ = _reduce_ex_by_name + module_globals.update(cls.__members__) +@@ -824,7 +824,7 @@ + def unique(enumeration): + """Class decorator that ensures only unique members exist in an enumeration.""" + duplicates = [] +- for name, member in enumeration.__members__.items(): ++ for name, member in list(enumeration.__members__.items()): + if name != member.name: + duplicates.append((name, member.name)) + if duplicates: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/h2/h2/connection.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/h2/h2/connection.py 2025-01-16 02:26:08.554513503 +0800 +@@ -393,7 +393,7 @@ + count = 0 + to_delete = [] + +- for stream_id, stream in self.streams.items(): ++ for stream_id, stream in list(self.streams.items()): + if stream.open and (stream_id % 2 == remainder): + count += 1 + elif stream.closed: +@@ -469,7 +469,7 @@ + s.max_outbound_frame_size = self.max_outbound_frame_size + + self.streams[stream_id] = s +- self.config.logger.debug("Current streams: %s", self.streams.keys()) ++ self.config.logger.debug("Current streams: %s", list(self.streams.keys())) + + if outbound: + self.highest_outbound_stream_id = stream_id +@@ -491,7 +491,7 @@ + preamble = b'' + + f = SettingsFrame(0) +- for setting, value in self.local_settings.items(): ++ for setting, value in list(self.local_settings.items()): + f.settings[setting] = value + self.config.logger.debug( + "Send Settings frame: %s", self.local_settings +@@ -542,7 +542,7 @@ + + if self.config.client_side: + f = SettingsFrame(0) +- for setting, value in self.local_settings.items(): ++ for setting, value in list(self.local_settings.items()): + f.settings[setting] = value + + frame_data = f.serialize_body() +@@ -1393,7 +1393,7 @@ + if SettingCodes.MAX_FRAME_SIZE in changes: + setting = changes[SettingCodes.MAX_FRAME_SIZE] + self.max_outbound_frame_size = setting.new_value +- for stream in self.streams.values(): ++ for stream in list(self.streams.values()): + stream.max_outbound_frame_size = setting.new_value + + f = SettingsFrame(0) +@@ -1412,7 +1412,7 @@ + """ + delta = new_value - old_value + +- for stream in self.streams.values(): ++ for stream in list(self.streams.values()): + stream.outbound_flow_control_window = guard_increment_window( + stream.outbound_flow_control_window, + delta +@@ -1428,7 +1428,7 @@ + """ + delta = new_value - old_value + +- for stream in self.streams.values(): ++ for stream in list(self.streams.values()): + stream._inbound_flow_control_change_from_settings(delta) + + def receive_data(self, data): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/h2/h2/events.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/h2/h2/events.py 2025-01-16 02:26:08.554513503 +0800 +@@ -342,7 +342,7 @@ + the form of a dictionary of ``{setting: value}``. + """ + e = cls() +- for setting, new_value in new_settings.items(): ++ for setting, new_value in list(new_settings.items()): + setting = _setting_code_from_int(setting) + original_value = old_settings.get(setting) + change = ChangedSetting(setting, original_value, new_value) +@@ -352,7 +352,7 @@ + + def __repr__(self): + return "" % ( +- ", ".join(repr(cs) for cs in self.changed_settings.values()), ++ ", ".join(repr(cs) for cs in list(self.changed_settings.values())), + ) + + +@@ -455,7 +455,7 @@ + + def __repr__(self): + return "" % ( +- ", ".join(repr(cs) for cs in self.changed_settings.values()), ++ ", ".join(repr(cs) for cs in list(self.changed_settings.values())), + ) + + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/h2/h2/frame_buffer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/h2/h2/frame_buffer.py 2025-01-16 02:26:08.554513503 +0800 +@@ -130,7 +130,7 @@ + def __iter__(self): + return self + +- def next(self): # Python 2 ++ def __next__(self): # Python 2 + # First, check that we have enough data to successfully parse the + # next frame header. If not, bail. Otherwise, parse it. + if len(self.data) < 9: +@@ -169,7 +169,7 @@ + # frame in the sequence instead. Recurse back into ourselves to do + # that. This is safe because the amount of work we have to do here is + # strictly bounded by the length of the buffer. +- return f if f is not None else self.next() ++ return f if f is not None else next(self) + + def __next__(self): # Python 3 +- return self.next() ++ return next(self) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/h2/h2/settings.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/h2/h2/settings.py 2025-01-16 02:31:45.623609312 +0800 +@@ -8,6 +8,7 @@ + state of the settings and the unacknowledged future values of the settings. + """ + import collections ++import collections.abc + import enum + + from hyperframe.frame import SettingsFrame +@@ -88,7 +89,7 @@ + ) + + +-class Settings(collections.MutableMapping): ++class Settings(collections.abc.MutableMapping): + """ + An object that encapsulates HTTP/2 settings state. + +@@ -137,7 +138,7 @@ + SettingCodes.MAX_FRAME_SIZE: collections.deque([16384]), + } + if initial_values is not None: +- for key, value in initial_values.items(): ++ for key, value in list(initial_values.items()): + invalid = _validate_setting(key, value) + if invalid: + raise InvalidSettingsValueError( +@@ -157,7 +158,7 @@ + + # If there is more than one setting in the list, we have a setting + # value outstanding. Update them. +- for k, v in self._settings.items(): ++ for k, v in list(self._settings.items()): + if len(v) > 1: + old_setting = v.popleft() + new_setting = v[0] +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/h2/h2/utilities.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/h2/h2/utilities.py 2025-01-16 02:26:08.554513503 +0800 +@@ -19,45 +19,45 @@ + # A set of headers that are hop-by-hop or connection-specific and thus + # forbidden in HTTP/2. This list comes from RFC 7540 § 8.1.2.2. + CONNECTION_HEADERS = frozenset([ +- b'connection', u'connection', +- b'proxy-connection', u'proxy-connection', +- b'keep-alive', u'keep-alive', +- b'transfer-encoding', u'transfer-encoding', +- b'upgrade', u'upgrade', ++ b'connection', 'connection', ++ b'proxy-connection', 'proxy-connection', ++ b'keep-alive', 'keep-alive', ++ b'transfer-encoding', 'transfer-encoding', ++ b'upgrade', 'upgrade', + ]) + + + _ALLOWED_PSEUDO_HEADER_FIELDS = frozenset([ +- b':method', u':method', +- b':scheme', u':scheme', +- b':authority', u':authority', +- b':path', u':path', +- b':status', u':status', ++ b':method', ':method', ++ b':scheme', ':scheme', ++ b':authority', ':authority', ++ b':path', ':path', ++ b':status', ':status', + ]) + + + _SECURE_HEADERS = frozenset([ + # May have basic credentials which are vulnerable to dictionary attacks. +- b'authorization', u'authorization', +- b'proxy-authorization', u'proxy-authorization', ++ b'authorization', 'authorization', ++ b'proxy-authorization', 'proxy-authorization', + ]) + + + _REQUEST_ONLY_HEADERS = frozenset([ +- b':scheme', u':scheme', +- b':path', u':path', +- b':authority', u':authority', +- b':method', u':method' ++ b':scheme', ':scheme', ++ b':path', ':path', ++ b':authority', ':authority', ++ b':method', ':method' + ]) + + +-_RESPONSE_ONLY_HEADERS = frozenset([b':status', u':status']) ++_RESPONSE_ONLY_HEADERS = frozenset([b':status', ':status']) + + + if sys.version_info[0] == 2: # Python 2.X + _WHITESPACE = frozenset(whitespace) + else: # Python 3.3+ +- _WHITESPACE = frozenset(map(ord, whitespace)) ++ _WHITESPACE = frozenset(list(map(ord, whitespace))) + + + def _secure_headers(headers, hdr_validation_flags): +@@ -81,7 +81,7 @@ + for header in headers: + if header[0] in _SECURE_HEADERS: + yield NeverIndexedHeaderTuple(*header) +- elif header[0] in (b'cookie', u'cookie') and len(header[1]) < 20: ++ elif header[0] in (b'cookie', 'cookie') and len(header[1]) < 20: + yield NeverIndexedHeaderTuple(*header) + else: + yield header +@@ -92,7 +92,7 @@ + Extracts the request method from the headers list. + """ + for k, v in headers: +- if k in (b':method', u':method'): ++ if k in (b':method', ':method'): + if not isinstance(v, bytes): + return v.encode('utf-8') + else: +@@ -116,9 +116,9 @@ + status = b':status' + informational_start = b'1' + else: +- sigil = u':' +- status = u':status' +- informational_start = u'1' ++ sigil = ':' ++ status = ':status' ++ informational_start = '1' + + # If we find a non-special header, we're done here: stop looping. + if not n.startswith(sigil): +@@ -173,7 +173,7 @@ + # This gets run against headers that come both from HPACK and from the + # user, so we may have unicode floating around in here. We only want + # bytes. +- if n in (b':authority', u':authority'): ++ if n in (b':authority', ':authority'): + return v.encode('utf-8') if not isinstance(v, bytes) else v + + return None +@@ -266,8 +266,8 @@ + its value is anything other than "trailers". + """ + for header in headers: +- if header[0] in (b'te', u'te'): +- if header[1].lower() not in (b'trailers', u'trailers'): ++ if header[0] in (b'te', 'te'): ++ if header[1].lower() not in (b'trailers', 'trailers'): + raise ProtocolError( + "Invalid value for Transfer-Encoding header: %s" % + header[1] +@@ -325,7 +325,7 @@ + seen_regular_header = False + + for header in headers: +- if _custom_startswith(header[0], b':', u':'): ++ if _custom_startswith(header[0], b':', ':'): + if header[0] in seen_pseudo_header_fields: + raise ProtocolError( + "Received duplicate pseudo-header field %s" % header[0] +@@ -374,7 +374,7 @@ + # Relevant RFC section: RFC 7540 § 8.1.2.4 + # https://tools.ietf.org/html/rfc7540#section-8.1.2.4 + if hdr_validation_flags.is_response_header: +- _assert_header_in_set(u':status', b':status', pseudo_headers) ++ _assert_header_in_set(':status', b':status', pseudo_headers) + invalid_response_headers = pseudo_headers & _REQUEST_ONLY_HEADERS + if invalid_response_headers: + raise ProtocolError( +@@ -385,9 +385,9 @@ + not hdr_validation_flags.is_trailer): + # This is a request, so we need to have seen :path, :method, and + # :scheme. +- _assert_header_in_set(u':path', b':path', pseudo_headers) +- _assert_header_in_set(u':method', b':method', pseudo_headers) +- _assert_header_in_set(u':scheme', b':scheme', pseudo_headers) ++ _assert_header_in_set(':path', b':path', pseudo_headers) ++ _assert_header_in_set(':method', b':method', pseudo_headers) ++ _assert_header_in_set(':scheme', b':scheme', pseudo_headers) + invalid_request_headers = pseudo_headers & _RESPONSE_ONLY_HEADERS + if invalid_request_headers: + raise ProtocolError( +@@ -417,9 +417,9 @@ + host_header_val = None + + for header in headers: +- if header[0] in (b':authority', u':authority'): ++ if header[0] in (b':authority', ':authority'): + authority_header_val = header[1] +- elif header[0] in (b'host', u'host'): ++ elif header[0] in (b'host', 'host'): + host_header_val = header[1] + + yield header +@@ -472,7 +472,7 @@ + """ + def inner(): + for header in headers: +- if header[0] in (b':path', u':path'): ++ if header[0] in (b':path', ':path'): + if not header[1]: + raise ProtocolError("An empty :path header is forbidden") + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/h2/h2/windows.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/h2/h2/windows.py 2025-01-16 02:26:08.554513503 +0800 +@@ -12,7 +12,7 @@ + to manage the flow control window without user input, trying to ensure that it + does not emit too many WINDOW_UPDATE frames. + """ +-from __future__ import division ++ + + from .exceptions import FlowControlError + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/hpack/hpack/compat.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/hpack/hpack/compat.py 2025-01-16 02:26:08.554513503 +0800 +@@ -25,7 +25,7 @@ + else: + return bytes(b) + +- unicode = unicode # noqa ++ str = str # noqa + bytes = str + + elif is_py3: +@@ -38,5 +38,5 @@ + def to_bytes(b): + return bytes(b) + +- unicode = str ++ str = str + bytes = bytes +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/hpack/hpack/hpack.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/hpack/hpack/hpack.py 2025-01-16 02:26:08.554513503 +0800 +@@ -31,9 +31,9 @@ + _PREFIX_BIT_MAX_NUMBERS = [(2 ** i) - 1 for i in range(9)] + + try: # pragma: no cover +- basestring = basestring ++ str = str + except NameError: # pragma: no cover +- basestring = (str, bytes) ++ str = (str, bytes) + + + # We default the maximum header list we're willing to accept to 64kB. That's a +@@ -137,7 +137,7 @@ + """ + assert isinstance(header_dict, dict) + keys = sorted( +- header_dict.keys(), ++ list(header_dict.keys()), + key=lambda k: not _to_bytes(k).startswith(b':') + ) + for key in keys: +@@ -148,7 +148,7 @@ + """ + Convert string to bytes. + """ +- if not isinstance(string, basestring): # pragma: no cover ++ if not isinstance(string, str): # pragma: no cover + string = str(string) + + return string if isinstance(string, bytes) else string.encode('utf-8') +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/parse.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/parse.py 2025-01-16 02:26:08.554513503 +0800 +@@ -101,7 +101,7 @@ + + def printOutput(parser, document, opts): + if opts.encoding: +- print("Encoding:", parser.tokenizer.stream.charEncoding) ++ print(("Encoding:", parser.tokenizer.stream.charEncoding)) + + for item in parser.log: + print(item) +@@ -120,7 +120,7 @@ + if not hasattr(document, '__getitem__'): + document = [document] + for fragment in document: +- print(parser.tree.testSerializer(fragment)) ++ print((parser.tree.testSerializer(fragment))) + elif opts.html: + kwargs = {} + for opt in serializer.HTMLSerializer.options: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/__init__.py 2025-01-16 02:26:08.554513503 +0800 +@@ -20,7 +20,7 @@ + * :func:`~.serializer.serialize` + """ + +-from __future__ import absolute_import, division, unicode_literals ++ + + from .html5parser import HTMLParser, parse, parseFragment + from .treebuilders import getTreeBuilder +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/_ihatexml.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/_ihatexml.py 2025-01-16 02:26:08.555596818 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import re + import warnings +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/_inputstream.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/_inputstream.py 2025-01-16 02:26:08.555596818 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from six import text_type + from six.moves import http_client, urllib +@@ -598,7 +598,7 @@ + raise TypeError + return self[p:p + 1] + +- def next(self): ++ def __next__(self): + # Py2 compat + return self.__next__() + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/_tokenizer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/_tokenizer.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,6 +1,6 @@ +-from __future__ import absolute_import, division, unicode_literals + +-from six import unichr as chr ++ ++from six import chr as chr + + from collections import deque, OrderedDict + from sys import version_info +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/_utils.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,11 +1,11 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from types import ModuleType + + try: + from collections.abc import Mapping + except ImportError: +- from collections import Mapping ++ from collections.abc import Mapping + + from six import text_type, PY3 + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/constants.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/constants.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import string + +@@ -520,7 +520,7 @@ + } + + unadjustForeignAttributes = {(ns, local): qname for qname, (prefix, local, ns) in +- adjustForeignAttributes.items()} ++ list(adjustForeignAttributes.items())} + + spaceCharacters = frozenset([ + "\t", +@@ -2933,7 +2933,7 @@ + tokenTypes["EmptyTag"]]) + + +-prefixes = {v: k for k, v in namespaces.items()} ++prefixes = {v: k for k, v in list(namespaces.items())} + prefixes["http://www.w3.org/1998/Math/MathML"] = "math" + + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/html5parser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/html5parser.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + from six import with_metaclass, viewkeys + + import types +@@ -74,7 +74,7 @@ + def method_decorator_metaclass(function): + class Decorated(type): + def __new__(meta, classname, bases, classDict): +- for attributeName, attribute in classDict.items(): ++ for attributeName, attribute in list(classDict.items()): + if isinstance(attribute, types.FunctionType): + attribute = function(attribute) + +@@ -119,7 +119,7 @@ + self.errors = [] + + self.phases = {name: cls(self, self.tree) for name, cls in +- getPhases(debug).items()} ++ list(getPhases(debug).items())} + + def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs): + +@@ -397,7 +397,7 @@ + def getPhases(debug): + def log(function): + """Logger that records which phase processes each token""" +- type_names = {value: key for key, value in tokenTypes.items()} ++ type_names = {value: key for key, value in list(tokenTypes.items())} + + def wrapped(self, *args, **kwargs): + if function.__name__.startswith("process") and len(args) > 0: +@@ -473,7 +473,7 @@ + self.parser.parseError("non-html-root") + # XXX Need a check here to see if the first start tag token emitted is + # this token... If it's not, invoke self.parser.parseError(). +- for attr, value in token["data"].items(): ++ for attr, value in list(token["data"].items()): + if attr not in self.tree.openElements[0].attributes: + self.tree.openElements[0].attributes[attr] = value + self.parser.firstStartTag = False +@@ -1020,7 +1020,7 @@ + assert self.parser.innerHTML + else: + self.parser.framesetOK = False +- for attr, value in token["data"].items(): ++ for attr, value in list(token["data"].items()): + if attr not in self.tree.openElements[1].attributes: + self.tree.openElements[1].attributes[attr] = value + +@@ -2779,7 +2779,7 @@ + needs_adjustment = viewkeys(token['data']) & viewkeys(replacements) + if needs_adjustment: + token['data'] = type(token['data'])((replacements.get(k, k), v) +- for k, v in token['data'].items()) ++ for k, v in list(token['data'].items())) + + + def impliedTagToken(name, type="EndTag", attributes=None, +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/serializer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/serializer.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + from six import text_type + + import re +@@ -298,7 +298,7 @@ + in_cdata = True + elif in_cdata: + self.serializeError("Unexpected child element of a CDATA element") +- for (_, attr_name), attr_value in token["data"].items(): ++ for (_, attr_name), attr_value in list(token["data"].items()): + # TODO: Add namespace support here + k = attr_name + v = attr_value +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/_trie/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/_trie/__init__.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from .py import Trie + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/_trie/_base.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/_trie/_base.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,9 +1,9 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + try: + from collections.abc import Mapping + except ImportError: # Python 2.7 +- from collections import Mapping ++ from collections.abc import Mapping + + + class Trie(Mapping): +@@ -11,7 +11,7 @@ + + def keys(self, prefix=None): + # pylint:disable=arguments-differ +- keys = super(Trie, self).keys() ++ keys = list(super(Trie, self).keys()) + + if prefix is None: + return set(keys) +@@ -19,7 +19,7 @@ + return {x for x in keys if x.startswith(prefix)} + + def has_keys_with_prefix(self, prefix): +- for key in self.keys(): ++ for key in list(self.keys()): + if key.startswith(prefix): + return True + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/_trie/py.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/_trie/py.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + from six import text_type + + from bisect import bisect_left +@@ -8,7 +8,7 @@ + + class Trie(ABCTrie): + def __init__(self, data): +- if not all(isinstance(x, text_type) for x in data.keys()): ++ if not all(isinstance(x, text_type) for x in list(data.keys())): + raise TypeError("All keys must be strings") + + self._data = data +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/filters/alphabeticalattributes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/filters/alphabeticalattributes.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from . import base + +@@ -22,7 +22,7 @@ + for token in base.Filter.__iter__(self): + if token["type"] in ("StartTag", "EmptyTag"): + attrs = OrderedDict() +- for name, value in sorted(token["data"].items(), ++ for name, value in sorted(list(token["data"].items()), + key=_attr_key): + attrs[name] = value + token["data"] = attrs +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/filters/base.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/filters/base.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + + class Filter(object): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/filters/inject_meta_charset.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/filters/inject_meta_charset.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from . import base + +@@ -31,7 +31,7 @@ + if token["name"].lower() == "meta": + # replace charset with actual encoding + has_http_equiv_content_type = False +- for (namespace, name), value in token["data"].items(): ++ for (namespace, name), value in list(token["data"].items()): + if namespace is not None: + continue + elif name.lower() == 'charset': +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/filters/lint.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/filters/lint.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from six import text_type + +@@ -44,7 +44,7 @@ + assert type == "StartTag" + if type == "StartTag" and self.require_matching_tags: + open_elements.append((namespace, name)) +- for (namespace, name), value in token["data"].items(): ++ for (namespace, name), value in list(token["data"].items()): + assert namespace is None or isinstance(namespace, text_type) + assert namespace != "" + assert isinstance(name, text_type) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/filters/optionaltags.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/filters/optionaltags.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from . import base + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/filters/sanitizer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/filters/sanitizer.py 2025-01-16 02:26:08.556680132 +0800 +@@ -6,7 +6,7 @@ + if Bleach is unsuitable for your needs. + + """ +-from __future__ import absolute_import, division, unicode_literals ++ + + import re + import warnings +@@ -873,7 +873,7 @@ + elif token["data"]: + assert token_type in ("StartTag", "EmptyTag") + attrs = [] +- for (ns, name), v in token["data"].items(): ++ for (ns, name), v in list(token["data"].items()): + attrs.append(' %s="%s"' % (name if ns is None else "%s:%s" % (prefixes[ns], name), escape(v))) + token["data"] = "<%s%s>" % (token["name"], ''.join(attrs)) + else: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/filters/whitespace.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/filters/whitespace.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import re + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treeadapters/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treeadapters/__init__.py 2025-01-16 02:26:08.556680132 +0800 +@@ -16,7 +16,7 @@ + genshi_tree = genshi.to_genshi(TreeWalker(tree)) + + """ +-from __future__ import absolute_import, division, unicode_literals ++ + + from . import sax + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treeadapters/genshi.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treeadapters/genshi.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from genshi.core import QName, Attrs + from genshi.core import START, END, TEXT, COMMENT, DOCTYPE +@@ -27,7 +27,7 @@ + else: + name = token["name"] + attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value) +- for attr, value in token["data"].items()]) ++ for attr, value in list(token["data"].items())]) + yield (START, (QName(name), attrs), (None, -1, -1)) + if type == "EmptyTag": + type = "EndTag" +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treeadapters/sax.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treeadapters/sax.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,11 +1,11 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from xml.sax.xmlreader import AttributesNSImpl + + from ..constants import adjustForeignAttributes, unadjustForeignAttributes + + prefix_mapping = {} +-for prefix, localName, namespace in adjustForeignAttributes.values(): ++for prefix, localName, namespace in list(adjustForeignAttributes.values()): + if prefix is not None: + prefix_mapping[prefix] = namespace + +@@ -19,7 +19,7 @@ + + """ + handler.startDocument() +- for prefix, namespace in prefix_mapping.items(): ++ for prefix, namespace in list(prefix_mapping.items()): + handler.startPrefixMapping(prefix, namespace) + + for token in walker: +@@ -45,6 +45,6 @@ + else: + assert False, "Unknown token type" + +- for prefix, namespace in prefix_mapping.items(): ++ for prefix, namespace in list(prefix_mapping.items()): + handler.endPrefixMapping(prefix) + handler.endDocument() +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treebuilders/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treebuilders/__init__.py 2025-01-16 02:26:08.556680132 +0800 +@@ -29,7 +29,7 @@ + + """ + +-from __future__ import absolute_import, division, unicode_literals ++ + + from .._utils import default_etree + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treebuilders/base.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treebuilders/base.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + from six import text_type + + from ..constants import scopingElements, tableInsertModeElements, namespaces +@@ -45,7 +45,7 @@ + def __str__(self): + attributesStr = " ".join(["%s=\"%s\"" % (name, value) + for name, value in +- self.attributes.items()]) ++ list(self.attributes.items())]) + if attributesStr: + return "<%s %s>" % (self.name, attributesStr) + else: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treebuilders/dom.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treebuilders/dom.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,10 +1,10 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + + try: + from collections.abc import MutableMapping + except ImportError: # Python 2.7 +- from collections import MutableMapping ++ from collections.abc import MutableMapping + from xml.dom import minidom, Node + import weakref + +@@ -22,7 +22,7 @@ + self.element = element + + def __iter__(self): +- return iter(self.element.attributes.keys()) ++ return iter(list(self.element.attributes.keys())) + + def __setitem__(self, name, value): + if isinstance(name, tuple): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treebuilders/etree.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treebuilders/etree.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + # pylint:disable=protected-access + + from six import text_type +@@ -68,7 +68,7 @@ + if attributes: + # calling .items _always_ allocates, and the above truthy check is cheaper than the + # allocation on average +- for key, value in attributes.items(): ++ for key, value in list(attributes.items()): + if isinstance(key, tuple): + name = "{%s}%s" % (key[2], key[1]) + else: +@@ -236,7 +236,7 @@ + + if hasattr(element, "attrib"): + attributes = [] +- for name, value in element.attrib.items(): ++ for name, value in list(element.attrib.items()): + nsmatch = tag_regexp.match(name) + if nsmatch is not None: + ns, name = nsmatch.groups() +@@ -296,7 +296,7 @@ + else: + attr = " ".join(["%s=\"%s\"" % ( + filter.fromXmlName(name), value) +- for name, value in element.attrib.items()]) ++ for name, value in list(element.attrib.items())]) + rv.append("<%s %s>" % (element.tag, attr)) + if element.text: + rv.append(element.text) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treebuilders/etree_lxml.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treebuilders/etree_lxml.py 2025-01-16 02:26:08.556680132 +0800 +@@ -9,7 +9,7 @@ + When any of these things occur, we emit a DataLossWarning + """ + +-from __future__ import absolute_import, division, unicode_literals ++ + # pylint:disable=protected-access + + import warnings +@@ -19,7 +19,7 @@ + try: + from collections.abc import MutableMapping + except ImportError: +- from collections import MutableMapping ++ from collections.abc import MutableMapping + + from . import base + from ..constants import DataLossWarning +@@ -115,7 +115,7 @@ + + if hasattr(element, "attrib"): + attributes = [] +- for name, value in element.attrib.items(): ++ for name, value in list(element.attrib.items()): + nsmatch = tag_regexp.match(name) + if nsmatch is not None: + ns, name = nsmatch.groups() +@@ -164,7 +164,7 @@ + rv.append("<%s>" % (element.tag,)) + else: + attr = " ".join(["%s=\"%s\"" % (name, value) +- for name, value in element.attrib.items()]) ++ for name, value in list(element.attrib.items())]) + rv.append("<%s %s>" % (element.tag, attr)) + if element.text: + rv.append(element.text) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treewalkers/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treewalkers/__init__.py 2025-01-16 02:26:08.556680132 +0800 +@@ -8,7 +8,7 @@ + returns an iterator which generates tokens. + """ + +-from __future__ import absolute_import, division, unicode_literals ++ + + from .. import constants + from .._utils import default_etree +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treewalkers/base.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treewalkers/base.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from xml.dom import Node + from ..constants import namespaces, voidElements, spaceCharacters +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treewalkers/dom.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treewalkers/dom.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from xml.dom import Node + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treewalkers/etree.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treewalkers/etree.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from collections import OrderedDict + import re +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treewalkers/etree_lxml.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treewalkers/etree_lxml.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + from six import text_type + + from collections import OrderedDict +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treewalkers/genshi.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/html5lib/html5lib/treewalkers/genshi.py 2025-01-16 02:26:08.556680132 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from genshi.core import QName + from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/hyperframe/hyperframe/frame.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/hyperframe/hyperframe/frame.py 2025-01-16 02:26:08.556680132 +0800 +@@ -407,7 +407,7 @@ + + def serialize_body(self): + return b''.join([_STRUCT_HL.pack(setting & 0xFF, value) +- for setting, value in self.settings.items()]) ++ for setting, value in list(self.settings.items())]) + + def parse_body(self, data): + body_len = 0 +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/six/six.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/six/six.py 2025-01-16 02:26:08.556680132 +0800 +@@ -20,7 +20,7 @@ + + """Utilities for writing code that runs on Python 2 and 3""" + +-from __future__ import absolute_import ++ + + import functools + import itertools +@@ -46,10 +46,10 @@ + + MAXSIZE = sys.maxsize + else: +- string_types = basestring, +- integer_types = (int, long) +- class_types = (type, types.ClassType) +- text_type = unicode ++ string_types = str, ++ integer_types = (int, int) ++ class_types = (type, type) ++ text_type = str + binary_type = str + + if sys.platform.startswith("java"): +@@ -529,7 +529,7 @@ + advance_iterator = next + except NameError: + def advance_iterator(it): +- return it.next() ++ return it.__next__() + next = advance_iterator + + +@@ -552,7 +552,7 @@ + Iterator = object + else: + def get_unbound_function(unbound): +- return unbound.im_func ++ return unbound.__func__ + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) +@@ -562,7 +562,7 @@ + + class Iterator(object): + +- def next(self): ++ def __next__(self): + return type(self).__next__(self) + + callable = callable +@@ -629,7 +629,7 @@ + + def u(s): + return s +- unichr = chr ++ chr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct +@@ -655,8 +655,8 @@ + # Workaround for standalone backslash + + def u(s): +- return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") +- unichr = unichr ++ return str(s.replace(r'\\', r'\\\\'), "unicode_escape") ++ chr = chr + int2byte = chr + + def byte2int(bs): +@@ -665,8 +665,8 @@ + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) +- import StringIO +- StringIO = BytesIO = StringIO.StringIO ++ import io ++ StringIO = BytesIO = io.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +@@ -747,11 +747,11 @@ + return + + def write(data): +- if not isinstance(data, basestring): ++ if not isinstance(data, str): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and +- isinstance(data, unicode) and ++ isinstance(data, str) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: +@@ -761,13 +761,13 @@ + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: +- if isinstance(sep, unicode): ++ if isinstance(sep, str): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: +- if isinstance(end, unicode): ++ if isinstance(end, str): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") +@@ -775,12 +775,12 @@ + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: +- if isinstance(arg, unicode): ++ if isinstance(arg, str): + want_unicode = True + break + if want_unicode: +- newline = unicode("\n") +- space = unicode(" ") ++ newline = str("\n") ++ space = str(" ") + else: + newline = "\n" + space = " " +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/webencodings/webencodings/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/webencodings/webencodings/__init__.py 2025-01-16 02:26:08.556680132 +0800 +@@ -12,7 +12,7 @@ + + """ + +-from __future__ import unicode_literals ++ + + import codecs + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/webencodings/webencodings/mklabels.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/webencodings/webencodings/mklabels.py 2025-01-16 02:26:08.556680132 +0800 +@@ -12,7 +12,7 @@ + + import json + try: +- from urllib import urlopen ++ from urllib.request import urlopen + except ImportError: + from urllib.request import urlopen + +@@ -56,4 +56,4 @@ + + + if __name__ == '__main__': +- print(generate('http://encoding.spec.whatwg.org/encodings.json')) ++ print((generate('http://encoding.spec.whatwg.org/encodings.json'))) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/webencodings/webencodings/tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/webencodings/webencodings/tests.py 2025-01-16 02:26:08.556680132 +0800 +@@ -11,7 +11,7 @@ + + """ + +-from __future__ import unicode_literals ++ + + from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode, + IncrementalDecoder, IncrementalEncoder, UTF8) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/webencodings/webencodings/x_user_defined.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/third_party/webencodings/webencodings/x_user_defined.py 2025-01-16 02:26:08.556680132 +0800 +@@ -11,7 +11,7 @@ + + """ + +-from __future__ import unicode_literals ++ + + import codecs + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/browser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/browser.py 2025-01-16 02:26:08.556680132 +0800 +@@ -47,9 +47,7 @@ + return resp + + +-class Browser(object): +- __metaclass__ = ABCMeta +- ++class Browser(object, metaclass=ABCMeta): + def __init__(self, logger): + self.logger = logger + +@@ -800,7 +798,7 @@ + return m.group(1) + + +-class ChromeAndroidBase(Browser): ++class ChromeAndroidBase(Browser, metaclass=ABCMeta): + """A base class for ChromeAndroid and AndroidWebView. + + On Android, WebView is based on Chromium open source project, and on some +@@ -808,7 +806,6 @@ + a very similar WPT runner implementation. + Includes webdriver installation. + """ +- __metaclass__ = ABCMeta # This is an abstract class. + + def __init__(self, logger): + super(ChromeAndroidBase, self).__init__(logger) +@@ -1114,11 +1111,11 @@ + if os.path.isfile(edgedriver_path): + # remove read-only attribute + os.chmod(edgedriver_path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777 +- print("Delete %s file" % edgedriver_path) ++ print(("Delete %s file" % edgedriver_path)) + os.remove(edgedriver_path) + driver_notes_path = os.path.join(dest, "Driver_notes") + if os.path.isdir(driver_notes_path): +- print("Delete %s folder" % driver_notes_path) ++ print(("Delete %s folder" % driver_notes_path)) + rmtree(driver_notes_path) + + self.logger.info("Downloading MSEdgeDriver from %s" % url) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/create.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/create.py 2025-01-16 02:26:08.556680132 +0800 +@@ -63,11 +63,11 @@ + kwargs["reftest"] = True + + if ".." in path: +- print("""Test path %s is not under wpt root.""" % path) ++ print(("""Test path %s is not under wpt root.""" % path)) + return 1 + + if ref_path and ".." in ref_path: +- print("""Reference path %s is not under wpt root""" % ref_path) ++ print(("""Reference path %s is not under wpt root""" % ref_path)) + return 1 + + +@@ -125,7 +125,7 @@ + path = "%s %s" % (path, ref_path) + proc = subprocess.Popen("%s %s" % (editor, path), shell=True) + else: +- print("Created test %s" % path) ++ print(("Created test %s" % path)) + + if proc: + proc.wait() +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/install.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/install.py 2025-01-16 02:26:08.556680132 +0800 +@@ -22,7 +22,7 @@ + } + + channel_args = argparse.ArgumentParser(add_help=False) +-channel_args.add_argument('--channel', choices=channel_by_name.keys(), ++channel_args.add_argument('--channel', choices=list(channel_by_name.keys()), + default='nightly', action='store', + help=''' + Name of browser release channel (default: nightly). "stable" and "release" are +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/markdown.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/markdown.py 2025-01-16 02:26:08.556680132 +0800 +@@ -17,17 +17,17 @@ + + def markdown_adjust(s): + """Escape problematic markdown sequences.""" +- s = s.replace('\t', u'\\t') +- s = s.replace('\n', u'\\n') +- s = s.replace('\r', u'\\r') +- s = s.replace('`', u'') +- s = s.replace('|', u'\\|') ++ s = s.replace('\t', '\\t') ++ s = s.replace('\n', '\\n') ++ s = s.replace('\r', '\\r') ++ s = s.replace('`', '') ++ s = s.replace('|', '\\|') + return s + + + def table(headings, data, log): + """Create and log data to specified logger in tabular format.""" +- cols = range(len(headings)) ++ cols = list(range(len(headings))) + assert all(len(item) == len(cols) for item in data) + max_widths = reduce(lambda prev, cur: [(len(cur[i]) + 2) + if (len(cur[i]) + 2) > prev[i] +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/revlist.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/revlist.py 2025-01-16 02:26:08.556680132 +0800 +@@ -40,15 +40,15 @@ + git = get_git_cmd(wpt_root) + args = [ + pattern, +- u'--sort=-committerdate', +- u'--format=%(refname:lstrip=2) %(objectname) %(committerdate:raw)', +- u'--count=100000' ++ '--sort=-committerdate', ++ '--format=%(refname:lstrip=2) %(objectname) %(committerdate:raw)', ++ '--count=100000' + ] +- ref_list = git(u"for-each-ref", *args) ++ ref_list = git("for-each-ref", *args) + for line in ref_list.splitlines(): + if not line: + continue +- tag, commit, date, _ = line.split(u" ") ++ tag, commit, date, _ = line.split(" ") + date = int(date) + yield tag, commit, date + +@@ -84,7 +84,7 @@ + # Expected result: N,M,K,J,H,G,F,C,A + + cutoff_date = calculate_cutoff_date(until, epoch, epoch_offset) +- for _, commit, date in get_tagged_revisions(u"refs/tags/merge_pr_*"): ++ for _, commit, date in get_tagged_revisions("refs/tags/merge_pr_*"): + if count >= max_count: + return + if date < cutoff_date: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/testfiles.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wpt/testfiles.py 2025-01-16 02:26:08.556680132 +0800 +@@ -45,7 +45,7 @@ + + def display_branch_point(): + # type: () -> None +- print(branch_point()) ++ print((branch_point())) + + + def branch_point(): +@@ -124,17 +124,17 @@ + + def compile_ignore_rule(rule): + # type: (Text) -> Pattern[Text] +- rule = rule.replace(ensure_text(os.path.sep), u"/") +- parts = rule.split(u"/") ++ rule = rule.replace(ensure_text(os.path.sep), "/") ++ parts = rule.split("/") + re_parts = [] + for part in parts: +- if part.endswith(u"**"): +- re_parts.append(re.escape(part[:-2]) + u".*") +- elif part.endswith(u"*"): +- re_parts.append(re.escape(part[:-1]) + u"[^/]*") ++ if part.endswith("**"): ++ re_parts.append(re.escape(part[:-2]) + ".*") ++ elif part.endswith("*"): ++ re_parts.append(re.escape(part[:-1]) + "[^/]*") + else: + re_parts.append(re.escape(part)) +- return re.compile(u"^%s$" % u"/".join(re_parts)) ++ return re.compile("^%s$" % "/".join(re_parts)) + + + def repo_files_changed(revish, include_uncommitted=False, include_new=False): +@@ -143,7 +143,7 @@ + if git is None: + raise Exception("git not found") + +- files_list = git("diff", "--name-only", "-z", revish).split(u"\0") ++ files_list = git("diff", "--name-only", "-z", revish).split("\0") + assert not files_list[-1] + files = set(files_list[:-1]) + +@@ -218,7 +218,7 @@ + def load_manifest(manifest_path=None, manifest_update=True): + # type: (Optional[Text], bool) -> manifest.Manifest + if manifest_path is None: +- manifest_path = os.path.join(wpt_root, u"MANIFEST.json") ++ manifest_path = os.path.join(wpt_root, "MANIFEST.json") + return manifest.load_and_update(wpt_root, manifest_path, "/", + update=manifest_update) + +@@ -231,7 +231,7 @@ + # type: (...) -> Tuple[Set[Text], Set[Text]] + """Determine and return list of test files that reference changed files.""" + if skip_dirs is None: +- skip_dirs = {u"conformance-checkers", u"docs", u"tools"} ++ skip_dirs = {"conformance-checkers", "docs", "tools"} + affected_testfiles = set() + # Exclude files that are in the repo root, because + # they are not part of any test. +@@ -370,7 +370,7 @@ + # type: (**Any) -> Text + revish = kwargs.get("revish") + if revish is None: +- revish = u"%s..HEAD" % branch_point() ++ revish = "%s..HEAD" % branch_point() + return ensure_text(revish).strip() + + +@@ -382,7 +382,7 @@ + include_uncommitted=kwargs["modified"], + include_new=kwargs["new"]) + +- separator = u"\0" if kwargs["null"] else u"\n" ++ separator = "\0" if kwargs["null"] else "\n" + + for item in sorted(changed): + line = os.path.relpath(item, wpt_root) + separator +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/config.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/config.py 2025-01-16 02:26:08.556680132 +0800 +@@ -302,7 +302,7 @@ + + rv = {} + for name, host in iteritems(hosts): +- rv[name] = {subdomain: (subdomain.encode("idna").decode("ascii") + u"." + host) ++ rv[name] = {subdomain: (subdomain.encode("idna").decode("ascii") + "." + host) + for subdomain in data["subdomains"]} + rv[name][""] = host + return rv +@@ -314,7 +314,7 @@ + + rv = {} + for name, host in iteritems(hosts): +- rv[name] = {subdomain: (subdomain.encode("idna").decode("ascii") + u"." + host) ++ rv[name] = {subdomain: (subdomain.encode("idna").decode("ascii") + "." + host) + for subdomain in data["not_subdomains"]} + return rv + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/request.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/request.py 2025-01-16 02:26:08.556680132 +0800 +@@ -378,7 +378,7 @@ + (i.e. names of headers) and values have binary type. + """ + def __init__(self, items): +- for header in items.keys(): ++ for header in list(items.keys()): + key = isomorphic_encode(header).lower() + # get all headers with the same name + values = items.getallmatchingheaders(header) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/response.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/response.py 2025-01-16 02:26:08.561013392 +0800 +@@ -179,9 +179,9 @@ + cookie = isomorphic_decode(cookie) + parser.load(cookie) + +- if name in parser.keys(): ++ if name in list(parser.keys()): + del self.headers["Set-Cookie"] +- for m in parser.values(): ++ for m in list(parser.values()): + if m.key != name: + self.headers.append(("Set-Cookie", m.OutputString())) + +@@ -241,7 +241,7 @@ + self.write_status_headers() + self.write_content() + +- def set_error(self, code, message=u""): ++ def set_error(self, code, message=""): + """Set the response status headers and return a JSON error object: + + {"error": {"code": code, "message": message}} +@@ -413,9 +413,9 @@ + item = None + item_iter = self.iter_content() + try: +- item = item_iter.next() ++ item = next(item_iter) + while True: +- check_last = item_iter.next() ++ check_last = next(item_iter) + self.writer.write_data(item, last=False) + item = check_last + except StopIteration: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/server.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/server.py 2025-01-16 02:26:08.561013392 +0800 +@@ -382,7 +382,7 @@ + self.close_connection = True + + # Flood all the streams with connection terminated, this will cause them to stop +- for stream_id, (thread, queue) in stream_queues.items(): ++ for stream_id, (thread, queue) in list(stream_queues.items()): + queue.put(frame) + + elif hasattr(frame, 'stream_id'): +@@ -398,12 +398,12 @@ + self.logger.error('(%s) Closing Connection - \n%s' % (self.uid, str(e))) + if not self.close_connection: + self.close_connection = True +- for stream_id, (thread, queue) in stream_queues.items(): ++ for stream_id, (thread, queue) in list(stream_queues.items()): + queue.put(None) + except Exception as e: + self.logger.error('(%s) Unexpected Error - \n%s' % (self.uid, str(e))) + finally: +- for stream_id, (thread, queue) in stream_queues.items(): ++ for stream_id, (thread, queue) in list(stream_queues.items()): + thread.join() + + def start_stream_thread(self, frame, queue): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/utils.py 2025-01-16 02:26:08.561013392 +0800 +@@ -42,7 +42,7 @@ + + def invert_dict(dict): + rv = {} +- for key, values in dict.items(): ++ for key, values in list(dict.items()): + for value in values: + if value in rv: + raise ValueError +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/wptserve.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/third_party/wpt/wpt/tools/wptserve/wptserve/wptserve.py 2025-01-16 02:26:08.561013392 +0800 +@@ -2,7 +2,7 @@ + import argparse + import os + +-import server ++from . import server + + def abs_path(path): + return os.path.abspath(path) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/abstract_local_server_command.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/abstract_local_server_command.py 2025-01-16 02:26:08.561013392 +0800 +@@ -57,8 +57,8 @@ + + server_url = 'http://localhost:%d%s' % (options.httpd_port, + self.launch_path) +- print 'Starting server at %s' % server_url +- print "Use the 'Exit' link in the UI, %squitquitquit or Ctrl-C to stop" % server_url ++ print('Starting server at %s' % server_url) ++ print("Use the 'Exit' link in the UI, %squitquitquit or Ctrl-C to stop" % server_url) + + if options.show_results: + # FIXME: This seems racy. +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/analyze_baselines.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/analyze_baselines.py 2025-01-16 02:26:08.561013392 +0800 +@@ -56,7 +56,7 @@ + self._tool = None + + def _write(self, msg): +- print msg ++ print(msg) + + def _analyze_baseline(self, options, test_name): + # TODO(robertma): Investigate changing the CLI to take extensions with leading '.'. +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/flaky_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/flaky_tests.py 2025-01-16 02:26:08.561013392 +0800 +@@ -121,5 +121,5 @@ + ','.join(test_names) + expectations_string = '\n'.join(line.to_string() for line in lines) + +- print self.OUTPUT % (self.HEADER, expectations_string, +- flakiness_dashboard_url) ++ print(self.OUTPUT % (self.HEADER, expectations_string, ++ flakiness_dashboard_url)) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/help_command.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/help_command.py 2025-01-16 02:26:08.561013392 +0800 +@@ -58,8 +58,8 @@ + relevant_commands = self._tool.commands[:] + else: + epilog = 'Common %prog commands:\n' +- relevant_commands = filter(self._tool.should_show_in_main_help, +- self._tool.commands) ++ relevant_commands = list(filter(self._tool.should_show_in_main_help, ++ self._tool.commands)) + longest_name_length = max( + len(command.name) for command in relevant_commands) + relevant_commands.sort(lambda a, b: cmp(a.name, b.name)) +@@ -83,7 +83,7 @@ + if args: + command = self._tool.command_by_name(args[0]) + if command: +- print command.standalone_help() ++ print(command.standalone_help()) + return 0 + + self.show_all_commands = options.show_all_commands +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/pretty_diff.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/pretty_diff.py 2025-01-16 02:26:08.561013392 +0800 +@@ -30,7 +30,7 @@ + import optparse + import sys + import tempfile +-import urllib ++import urllib.request, urllib.parse, urllib.error + + from blinkpy.common.pretty_diff import prettify_diff + from blinkpy.common.system.executive import ScriptError +@@ -101,5 +101,5 @@ + return diff_file + + def _open_pretty_diff(self, file_path): +- url = 'file://%s' % urllib.quote(file_path) ++ url = 'file://%s' % urllib.parse.quote(file_path) + self._tool.user.open_url(url) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/queries.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/queries.py 2025-01-16 02:26:08.561013392 +0800 +@@ -53,7 +53,7 @@ + pid = None + if len(args) > 1: + pid = int(args[1]) +- print crash_logs.find_newest_log(args[0], pid) ++ print(crash_logs.find_newest_log(args[0], pid)) + + + class PrintExpectations(Command): +@@ -103,7 +103,7 @@ + + def execute(self, options, args, tool): + if not options.paths and not args and not options.all: +- print 'You must either specify one or more test paths or --all.' ++ print('You must either specify one or more test paths or --all.') + return + + if options.platform: +@@ -114,7 +114,7 @@ + if default_port: + port_names = [default_port.name()] + else: +- print "No port names match '%s'" % options.platform ++ print("No port names match '%s'" % options.platform) + return + else: + default_port = tool.port_factory.get(port_names[0]) +@@ -129,7 +129,7 @@ + if file.startswith(web_tests_dir): + file = file.replace(web_tests_dir, + WEB_TESTS_LAST_COMPONENT) +- print file ++ print(file) + return + + tests = set(default_port.tests(args)) +@@ -143,8 +143,8 @@ + for test in sorted(tests_to_print) + ] + if port_name != port_names[0]: +- print +- print '\n'.join(self._format_lines(options, port_name, lines)) ++ print() ++ print('\n'.join(self._format_lines(options, port_name, lines))) + + @staticmethod + def _test_set_for_keyword(keyword, test_expectations, tests): +@@ -220,7 +220,7 @@ + + def execute(self, options, args, tool): + if not args and not options.all: +- print 'You must either specify one or more test paths or --all.' ++ print('You must either specify one or more test paths or --all.') + return + + default_port = tool.port_factory.get() +@@ -228,7 +228,7 @@ + port_names = fnmatch.filter(tool.port_factory.all_port_names(), + options.platform) + if not port_names: +- print "No port names match '%s'" % options.platform ++ print("No port names match '%s'" % options.platform) + else: + port_names = [default_port.name()] + +@@ -239,9 +239,9 @@ + + for port_name in port_names: + if port_name != port_names[0]: +- print ++ print() + if not options.csv: +- print '// For %s' % port_name ++ print('// For %s' % port_name) + port = tool.port_factory.get(port_name) + for test_name in tests: + self._print_baselines( +@@ -253,13 +253,13 @@ + baseline_location = baselines[extension] + if baseline_location: + if options.csv: +- print '%s,%s,%s,%s,%s,%s' % ( ++ print('%s,%s,%s,%s,%s,%s' % ( + port_name, test_name, + self._platform_for_path(test_name), extension[1:], + baseline_location, +- self._platform_for_path(baseline_location)) ++ self._platform_for_path(baseline_location))) + else: +- print baseline_location ++ print(baseline_location) + + def _platform_for_path(self, relpath): + platform_matchobj = self._platform_regexp.match(relpath) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/rebaseline.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/rebaseline.py 2025-01-16 02:26:08.561013392 +0800 +@@ -183,7 +183,7 @@ + + def _iter_combinations(self): + """Iterates through (test, build, port) combinations.""" +- for test_prefix, build_port_pairs in self._test_prefix_map.iteritems(): ++ for test_prefix, build_port_pairs in self._test_prefix_map.items(): + for test in self._port.tests([test_prefix]): + for build, port_name in build_port_pairs: + yield (test, build, port_name) +@@ -276,7 +276,7 @@ + for builder in list(release_builders) + list(debug_builders): + port = self._tool.port_factory.get_from_builder_name(builder) + fallback_path = port.baseline_search_path() +- if fallback_path not in builders_to_fallback_paths.values(): ++ if fallback_path not in list(builders_to_fallback_paths.values()): + builders_to_fallback_paths[builder] = fallback_path + + return set(builders_to_fallback_paths) +@@ -352,7 +352,7 @@ + change_set = ChangeSet() + for _, stdout, _ in command_results: + updated = False +- for line in filter(None, stdout.splitlines()): ++ for line in [_f for _f in stdout.splitlines() if _f]: + try: + parsed_line = json.loads(line) + change_set.update(ChangeSet.from_dict(parsed_line)) +@@ -376,7 +376,7 @@ + self._suffixes_for_actual_failures(test, build)) + + optimize_commands = [] +- for test, suffixes in tests_to_suffixes.iteritems(): ++ for test, suffixes in tests_to_suffixes.items(): + # No need to optimize baselines for a test with no failures. + if not suffixes: + continue +@@ -397,7 +397,7 @@ + return optimize_commands + + def _update_expectations_files(self, lines_to_remove): +- tests = lines_to_remove.keys() ++ tests = list(lines_to_remove.keys()) + to_remove = defaultdict(set) + all_versions = frozenset([ + config.version.lower() for config in self._tool.port_factory.get(). +@@ -416,7 +416,7 @@ + port.test_configuration().version.lower()) + + # Get configurations to remove based on builders for each test +- for test, port_names in lines_to_remove.items(): ++ for test, port_names in list(lines_to_remove.items()): + for port_name in port_names: + port = self._tool.port_factory.get(port_name) + if port.test_configuration().version.lower() in all_versions: +@@ -430,7 +430,7 @@ + path: self._tool.filesystem.read_text_file(path) + }) + system_remover = SystemConfigurationRemover(test_expectations) +- for test, versions in to_remove.items(): ++ for test, versions in list(to_remove.items()): + system_remover.remove_os_versions(test, versions) + system_remover.update_expectations() + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/rebaseline_cl.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/rebaseline_cl.py 2025-01-16 02:26:08.561013392 +0800 +@@ -201,7 +201,7 @@ + Args: + jobs: A dict mapping Build objects to TryJobStatus objects. + """ +- finished_jobs = {b for b, s in jobs.items() if s.status == 'COMPLETED'} ++ finished_jobs = {b for b, s in list(jobs.items()) if s.status == 'COMPLETED'} + if self.selected_try_bots.issubset( + {b.builder_name + for b in finished_jobs}): +@@ -240,7 +240,7 @@ + """ + results_fetcher = self._tool.results_fetcher + results = {} +- for build, status in jobs.iteritems(): ++ for build, status in jobs.items(): + if status == TryJobStatus('COMPLETED', 'SUCCESS'): + # Builds with passing try jobs are mapped to None, to indicate + # that there are no baselines to download. +@@ -310,7 +310,7 @@ + A TestBaselineSet object. + """ + builds_to_tests = {} +- for build, results in builds_to_results.iteritems(): ++ for build, results in builds_to_results.items(): + builds_to_tests[build] = self._tests_to_rebaseline(build, results) + if only_changed_tests: + files_in_cl = self._tool.git().changed_files(diff_filter='AM') +@@ -323,7 +323,7 @@ + ] + + test_baseline_set = TestBaselineSet(self._tool) +- for build, tests in builds_to_tests.iteritems(): ++ for build, tests in builds_to_tests.items(): + for test in tests: + if only_changed_tests and test not in tests_in_cl: + continue +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/rebaseline_server.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/tool/commands/rebaseline_server.py 2025-01-16 02:26:08.561013392 +0800 +@@ -83,7 +83,7 @@ + results_directory = args[0] + host = Host() + +- print 'Parsing full_results.json...' ++ print('Parsing full_results.json...') + results_json_path = host.filesystem.join(results_directory, + 'full_results.json') + results_json = json_results_generator.load_json( +@@ -96,7 +96,7 @@ + self._test_config = TestConfig(port, web_tests_directory, + results_directory, platforms, host) + +- print 'Gathering current baselines...' ++ print('Gathering current baselines...') + self._gather_baselines(results_json) + + return { +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/android_wpt_expectations_updater.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/android_wpt_expectations_updater.py 2025-01-16 02:26:08.561013392 +0800 +@@ -28,6 +28,7 @@ + from blinkpy.web_tests.port.android import ( + PRODUCTS, PRODUCTS_TO_STEPNAMES, PRODUCTS_TO_BROWSER_TAGS, + PRODUCTS_TO_EXPECTATION_FILE_PATHS, ANDROID_DISABLED_TESTS) ++from functools import reduce + + _log = logging.getLogger(__name__) + +@@ -50,7 +51,7 @@ + # We need to put all the Android expectation files in + # the _test_expectations member variable so that the + # files get cleaned in cleanup_test_expectations_files() +- return (PRODUCTS_TO_EXPECTATION_FILE_PATHS.values() + ++ return (list(PRODUCTS_TO_EXPECTATION_FILE_PATHS.values()) + + [ANDROID_DISABLED_TESTS]) + + def _get_web_test_results(self, build): +@@ -72,7 +73,7 @@ + step_name = PRODUCTS_TO_STEPNAMES[product] + results_sets.append(self.host.results_fetcher.fetch_results( + build, True, '%s (with patch)' % step_name)) +- return filter(None, results_sets) ++ return [_f for _f in results_sets if _f] + + def get_builder_configs(self, build, results_set=None): + """Gets step name from WebTestResults instance and uses +@@ -99,7 +100,7 @@ + else: + step_name = results_set.step_name() + step_name = step_name[: step_name.index(' (with patch)')] +- product = {s: p for p, s in PRODUCTS_TO_STEPNAMES.items()}[step_name] ++ product = {s: p for p, s in list(PRODUCTS_TO_STEPNAMES.items())}[step_name] + products = {product} + + for product in products: +@@ -168,7 +169,7 @@ + """ + browser_to_exp_path = { + browser: PRODUCTS_TO_EXPECTATION_FILE_PATHS[product] +- for product, browser in PRODUCTS_TO_BROWSER_TAGS.items()} ++ for product, browser in list(PRODUCTS_TO_BROWSER_TAGS.items())} + product_exp_paths = {PRODUCTS_TO_EXPECTATION_FILE_PATHS[prod] + for prod in self.options.android_product} + untriaged_exps = self._get_untriaged_test_expectations( +@@ -177,18 +178,18 @@ + self._never_fix_expectations, [ANDROID_DISABLED_TESTS], + self.NEVER_FIX_MARKER_COMMENT)[ANDROID_DISABLED_TESTS] + +- for path, test_exps in untriaged_exps.items(): ++ for path, test_exps in list(untriaged_exps.items()): + self._test_expectations.remove_expectations( +- path, reduce(lambda x, y: x + y, test_exps.values())) ++ path, reduce(lambda x, y: x + y, list(test_exps.values()))) + + if neverfix_tests: + self._never_fix_expectations.remove_expectations( + ANDROID_DISABLED_TESTS, +- reduce(lambda x, y: x + y, neverfix_tests.values())) ++ reduce(lambda x, y: x + y, list(neverfix_tests.values()))) + +- for results_test_name, platform_results in test_to_results.items(): ++ for results_test_name, platform_results in list(test_to_results.items()): + exps_test_name = 'external/wpt/%s' % results_test_name +- for configs, test_results in platform_results.items(): ++ for configs, test_results in list(platform_results.items()): + for config in configs: + path = browser_to_exp_path[config.browser] + neverfix_exp = self._maybe_create_never_fix_expectation( +@@ -221,7 +222,7 @@ + self._test_expectations, path, self.MARKER_COMMENT) + self._test_expectations.add_expectations( + path, +- sorted([exps[0] for exps in untriaged_exps[path].values()], ++ sorted([exps[0] for exps in list(untriaged_exps[path].values())], + key=lambda e: e.test), + marker_lineno) + +@@ -233,7 +234,7 @@ + if neverfix_tests: + self._never_fix_expectations.add_expectations( + ANDROID_DISABLED_TESTS, +- sorted(reduce(lambda x, y: x + y, neverfix_tests.values()), ++ sorted(reduce(lambda x, y: x + y, list(neverfix_tests.values())), + key=lambda e: e.test), + disabled_tests_marker_lineno) + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/android_wpt_expectations_updater_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/android_wpt_expectations_updater_unittest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -77,7 +77,7 @@ + }, + }) + # Write dummy expectations +- for path in PRODUCTS_TO_EXPECTATION_FILE_PATHS.values(): ++ for path in list(PRODUCTS_TO_EXPECTATION_FILE_PATHS.values()): + host.filesystem.write_text_file( + path, self._raw_android_expectations) + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/directory_owners_extractor.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/directory_owners_extractor.py 2025-01-16 02:26:08.561013392 +0800 +@@ -69,7 +69,7 @@ + email_map[tuple(owners)].add(owned_directory_relpath) + return { + owners: sorted(owned_directories) +- for owners, owned_directories in email_map.iteritems() ++ for owners, owned_directories in email_map.items() + } + + def find_owners_file(self, start_path): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/directory_owners_extractor_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/directory_owners_extractor_unittest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -30,7 +30,7 @@ + def _write_files(self, files): + # Use write_text_file instead of directly assigning to filesystem.files + # so that intermediary directories are correctly created, too. +- for path, contents in files.iteritems(): ++ for path, contents in files.items(): + self.host.filesystem.write_text_file(path, contents) + + def test_list_owners_combines_same_owners(self): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/export_notifier.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/export_notifier.py 2025-01-16 02:26:08.561013392 +0800 +@@ -92,7 +92,7 @@ + """Processes and comments on CLs with failed Tackcluster checks.""" + _log.info('Processing %d CLs with failed Taskcluster checks.', + len(gerrit_dict)) +- for change_id, pr_status_info in gerrit_dict.items(): ++ for change_id, pr_status_info in list(gerrit_dict.items()): + try: + cl = self.gerrit.query_cl_comments_and_revisions(change_id) + has_commented = self.has_latest_taskcluster_status_commented( +@@ -184,7 +184,7 @@ + + def _checks_results_as_comment(self): + comment = '' +- for check, url in self._checks_results.items(): ++ for check, url in list(self._checks_results.items()): + comment += '\n%s (%s)' % (check, url) + + return comment +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/gerrit.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/gerrit.py 2025-01-16 02:26:08.561013392 +0800 +@@ -5,7 +5,7 @@ + import base64 + import json + import logging +-from urllib2 import HTTPError ++from urllib.error import HTTPError + + from blinkpy.common.net.network_transaction import NetworkTimeout + from blinkpy.w3c.chromium_commit import ChromiumCommit +@@ -166,7 +166,7 @@ + # TODO(robertma): Consolidate with the related part in chromium_exportable_commits.py. + + try: +- files = self.current_revision['files'].keys() ++ files = list(self.current_revision['files'].keys()) + except KeyError: + # Empty (deleted) CL is not exportable. + return False +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/import_notifier.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/import_notifier.py 2025-01-16 02:26:08.561013392 +0800 +@@ -113,7 +113,7 @@ + changed baselines. + gerrit_url_with_ps: Gerrit URL of this CL with the patchset number. + """ +- for test_name, changed_baselines in changed_test_baselines.iteritems(): ++ for test_name, changed_baselines in changed_test_baselines.items(): + directory = self.find_owned_directory(test_name) + if not directory: + _log.warning('Cannot find OWNERS of %s', test_name) +@@ -161,7 +161,7 @@ + test_expectations: A dictionary mapping names of tests that cannot + be rebaselined to a list of new test expectation lines. + """ +- for test_name, expectation_lines in test_expectations.iteritems(): ++ for test_name, expectation_lines in test_expectations.items(): + directory = self.find_owned_directory(test_name) + if not directory: + _log.warning('Cannot find OWNERS of %s', test_name) +@@ -191,7 +191,7 @@ + imported_commits = self.local_wpt.commits_in_range( + wpt_revision_start, wpt_revision_end) + bugs = [] +- for directory, failures in self.new_failures_by_directory.iteritems(): ++ for directory, failures in self.new_failures_by_directory.items(): + summary = '[WPT] New failures introduced in {} by import {}'.format( + directory, gerrit_url) + +@@ -228,7 +228,7 @@ + cc, + components, + labels=['Test-WebTest']) +- _log.info(unicode(bug)) ++ _log.info(str(bug)) + + if is_wpt_notify_enabled: + _log.info( +@@ -259,7 +259,7 @@ + commit_list = '' + for sha, subject in imported_commits: + # subject is a Unicode string and can contain non-ASCII characters. +- line = u'{}: {}'.format(subject, GITHUB_COMMIT_PREFIX + sha) ++ line = '{}: {}'.format(subject, GITHUB_COMMIT_PREFIX + sha) + if self.local_wpt.is_commit_affecting_directory( + sha, path_from_wpt): + line += ' [affecting this directory]' +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/import_notifier_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/import_notifier_unittest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -227,7 +227,7 @@ + imported_commits = [ + ('SHA1', 'Subject 1'), + # Use non-ASCII chars to really test Unicode handling. +- ('SHA2', u'ABC~‾¥≈¤・・•∙·☼★星🌟星★☼·∙•・・¤≈¥‾~XYZ') ++ ('SHA2', 'ABC~‾¥≈¤・・•∙·☼★星🌟星★☼·∙•・・¤≈¥‾~XYZ') + ] + + def _is_commit_affecting_directory(commit, directory): +@@ -239,8 +239,8 @@ + self.assertEqual( + self.notifier.format_commit_list( + imported_commits, MOCK_WEB_TESTS + 'external/wpt/foo'), +- u'Subject 1: https://github.com/web-platform-tests/wpt/commit/SHA1 [affecting this directory]\n' +- u'ABC~‾¥≈¤・・•∙·☼★星🌟星★☼·∙•・・¤≈¥‾~XYZ: https://github.com/web-platform-tests/wpt/commit/SHA2\n' ++ 'Subject 1: https://github.com/web-platform-tests/wpt/commit/SHA1 [affecting this directory]\n' ++ 'ABC~‾¥≈¤・・•∙·☼★星🌟星★☼·∙•・・¤≈¥‾~XYZ: https://github.com/web-platform-tests/wpt/commit/SHA2\n' + ) + + def test_find_owned_directory_non_virtual(self): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/monorail.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/monorail.py 2025-01-16 02:26:08.561013392 +0800 +@@ -36,7 +36,7 @@ + for field in self._STRING_LIST_FIELDS: + if field in self._body: + # Not a str or unicode. +- assert not isinstance(self._body[field], basestring) ++ assert not isinstance(self._body[field], str) + # Is iterable (TypeError would be raised otherwise). + self._body[field] = list(self._body[field]) + # We expect a KeyError to be raised if 'status' is missing. +@@ -46,19 +46,19 @@ + assert self._body['summary'], 'summary cannot be empty.' + + def __unicode__(self): +- result = (u'Monorail issue in project {}\n' ++ result = ('Monorail issue in project {}\n' + 'Summary: {}\n' + 'Status: {}\n').format(self.project_id, self.body['summary'], + self.body['status']) + if 'cc' in self.body: +- result += u'CC: {}\n'.format(', '.join(self.body['cc'])) ++ result += 'CC: {}\n'.format(', '.join(self.body['cc'])) + if 'components' in self.body: +- result += u'Components: {}\n'.format(', '.join( ++ result += 'Components: {}\n'.format(', '.join( + self.body['components'])) + if 'labels' in self.body: +- result += u'Labels: {}\n'.format(', '.join(self.body['labels'])) ++ result += 'Labels: {}\n'.format(', '.join(self.body['labels'])) + if 'description' in self.body: +- result += u'Description:\n{}\n'.format(self.body['description']) ++ result += 'Description:\n{}\n'.format(self.body['description']) + return result + + @property +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/monorail_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/monorail_unittest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -29,22 +29,22 @@ + def test_unicode(self): + issue = MonorailIssue( + 'chromium', +- summary=u'test', ++ summary='test', + status='Untriaged', +- description=u'ABC~‾¥≈¤・・•∙·☼★星🌟星★☼·∙•・・¤≈¥‾~XYZ', ++ description='ABC~‾¥≈¤・・•∙·☼★星🌟星★☼·∙•・・¤≈¥‾~XYZ', + cc=['foo@chromium.org', 'bar@chromium.org'], + labels=['Flaky'], + components=['Infra']) +- self.assertEqual(type(unicode(issue)), unicode) ++ self.assertEqual(type(str(issue)), str) + self.assertEqual( +- unicode(issue), +- (u'Monorail issue in project chromium\n' +- u'Summary: test\n' +- u'Status: Untriaged\n' +- u'CC: foo@chromium.org, bar@chromium.org\n' +- u'Components: Infra\n' +- u'Labels: Flaky\n' +- u'Description:\nABC~‾¥≈¤・・•∙·☼★星🌟星★☼·∙•・・¤≈¥‾~XYZ\n')) ++ str(issue), ++ ('Monorail issue in project chromium\n' ++ 'Summary: test\n' ++ 'Status: Untriaged\n' ++ 'CC: foo@chromium.org, bar@chromium.org\n' ++ 'Components: Infra\n' ++ 'Labels: Flaky\n' ++ 'Description:\nABC~‾¥≈¤・・•∙·☼★星🌟星★☼·∙•・・¤≈¥‾~XYZ\n')) + + def test_init_unknown_fields(self): + with self.assertRaises(AssertionError): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/test_importer_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/test_importer_unittest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -30,7 +30,7 @@ + + def mock_host(self): + host = MockHost() +- for path in PRODUCTS_TO_EXPECTATION_FILE_PATHS.values(): ++ for path in list(PRODUCTS_TO_EXPECTATION_FILE_PATHS.values()): + host.filesystem.write_text_file(path, '') + return host + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/wpt_expectations_updater.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/wpt_expectations_updater.py 2025-01-16 02:26:08.561013392 +0800 +@@ -71,8 +71,8 @@ + for tests that were renamed. Also the files may have their expectations + updated using builder results. + """ +- return (self.port.all_expectations_dict().keys() + +- PRODUCTS_TO_EXPECTATION_FILE_PATHS.values()) ++ return (list(self.port.all_expectations_dict().keys()) + ++ list(PRODUCTS_TO_EXPECTATION_FILE_PATHS.values())) + + def run(self): + """Does required setup before calling update_expectations(). +@@ -162,7 +162,7 @@ + + # Here we build up a dict of failing test results for all platforms. + test_expectations = {} +- for build, job_status in build_to_status.iteritems(): ++ for build, job_status in build_to_status.items(): + if (job_status.result == 'SUCCESS' and + not self.options.include_unexpected_pass): + continue +@@ -179,7 +179,7 @@ + # } + # } + # And then we merge results for different platforms that had the same results. +- for test_name, platform_result in test_expectations.iteritems(): ++ for test_name, platform_result in test_expectations.items(): + # platform_result is a dict mapping platforms to results. + test_expectations[test_name] = self.merge_same_valued_keys( + platform_result) +@@ -239,7 +239,7 @@ + self.host.results_fetcher.fetch_webdriver_test_results( + build, master)) + +- test_results_list = filter(None, test_results_list) ++ test_results_list = [_f for _f in test_results_list if _f] + if not test_results_list: + _log.warning('No results for build %s', build) + self.configs_with_no_results.extend(self.get_builder_configs(build)) +@@ -475,13 +475,13 @@ + (each SimpleTestResult turns into a line). + """ + line_dict = defaultdict(list) +- for test_name, test_results in sorted(merged_results.iteritems()): ++ for test_name, test_results in sorted(merged_results.items()): + if not self._is_wpt_test(test_name): + _log.warning( + 'Non-WPT test "%s" unexpectedly passed to create_line_dict.', + test_name) + continue +- for configs, result in sorted(test_results.iteritems()): ++ for configs, result in sorted(test_results.items()): + line_dict[test_name].extend( + self._create_lines(test_name, configs, result)) + return line_dict +@@ -604,7 +604,7 @@ + """ + specifiers = {s.lower() for s in specifiers} + covered_by_try_bots = self._platform_specifiers_covered_by_try_bots() +- for macro, versions in specifier_macros.iteritems(): ++ for macro, versions in specifier_macros.items(): + macro = macro.lower() + + # Only consider version specifiers that have corresponding try bots. +@@ -656,7 +656,7 @@ + line_list = [] + wont_fix_list = [] + webdriver_list = [] +- for lines in line_dict.itervalues(): ++ for lines in line_dict.values(): + for line in lines: + if 'Skip' in line and '-manual.' in line: + wont_fix_list.append(line) +@@ -669,7 +669,7 @@ + self.port.path_to_generic_test_expectations_file(): line_list, + self.port.path_to_webdriver_expectations_file(): webdriver_list + } +- for expectations_file_path, lines in list_to_expectation.iteritems(): ++ for expectations_file_path, lines in list_to_expectation.items(): + if not lines: + continue + +@@ -926,7 +926,7 @@ + new_test_results = copy.deepcopy(test_results) + tests_to_rebaseline = set() + for test_name in test_results: +- for platforms, result in test_results[test_name].iteritems(): ++ for platforms, result in test_results[test_name].items(): + if self.can_rebaseline(test_name, result): + del new_test_results[test_name][platforms] + tests_to_rebaseline.add(test_name) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/wpt_expectations_updater_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/wpt_expectations_updater_unittest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -91,7 +91,7 @@ + }, + })) + +- for path in PRODUCTS_TO_EXPECTATION_FILE_PATHS.values(): ++ for path in list(PRODUCTS_TO_EXPECTATION_FILE_PATHS.values()): + host.filesystem.write_text_file(path, '') + return host + +@@ -1237,7 +1237,7 @@ + host.filesystem.files[MOCK_WEB_TESTS + 'new/b.html'] = '' + # TODO(rmhasan): Remove creation of Android files within + # tests. +- for path in PRODUCTS_TO_EXPECTATION_FILE_PATHS.values(): ++ for path in list(PRODUCTS_TO_EXPECTATION_FILE_PATHS.values()): + host.filesystem.write_text_file(path, '') + + updater = WPTExpectationsUpdater(host) +@@ -1282,7 +1282,7 @@ + + # TODO(rmhasan): Remove creation of Android files within + # tests. +- for path in PRODUCTS_TO_EXPECTATION_FILE_PATHS.values(): ++ for path in list(PRODUCTS_TO_EXPECTATION_FILE_PATHS.values()): + host.filesystem.write_text_file(path, '') + + updater = WPTExpectationsUpdater( +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/wpt_github.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/wpt_github.py 2025-01-16 02:26:08.561013392 +0800 +@@ -7,7 +7,7 @@ + import json + import logging + import re +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + from collections import namedtuple + + from blinkpy.common.memoized import memoized +@@ -128,7 +128,7 @@ + } + try: + response = self.request(path, method='POST', body=body) +- except urllib2.HTTPError as e: ++ except urllib.error.HTTPError as e: + _log.error(e.reason) + if e.code == 422: + _log.error('Please check if branch already exists; If so, ' +@@ -185,7 +185,7 @@ + WPT_GH_ORG, + WPT_GH_REPO_NAME, + number, +- urllib2.quote(label), ++ urllib.parse.quote(label), + ) + response = self.request(path, method='DELETE') + +@@ -373,7 +373,7 @@ + else: + raise GitHubError(204, response.status_code, + 'check if PR %d is merged' % pr_number) +- except urllib2.HTTPError as e: ++ except urllib.error.HTTPError as e: + if e.code == 404: + return False + else: +@@ -395,7 +395,7 @@ + + try: + response = self.request(path, method='PUT', body=body) +- except urllib2.HTTPError as e: ++ except urllib.error.HTTPError as e: + if e.code == 405: + raise MergeError(pr_number) + else: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/wpt_manifest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/wpt_manifest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -153,7 +153,7 @@ + for test_type in self.test_types: + if test_type not in items: + continue +- for filename, records in items[test_type].iteritems(): ++ for filename, records in items[test_type].items(): + for item in filter(self._is_not_jsshell, records): + url_for_item = self._get_url_from_item(item) + url_items[url_for_item] = item +@@ -163,7 +163,7 @@ + @memoized + def all_urls(self): + """Returns a set of the URLs for all items in the manifest.""" +- return frozenset(self.all_url_items().keys()) ++ return frozenset(list(self.all_url_items().keys())) + + def is_test_file(self, path_in_wpt): + """Checks if path_in_wpt is a test file according to the manifest.""" +@@ -329,7 +329,7 @@ + """ + assert isinstance(node, dict) + +- for k, v in node.items(): ++ for k, v in list(node.items()): + # WPT urls are always joined by '/', even on Windows. + new_path = k if not path else path + '/' + k + +@@ -360,7 +360,7 @@ + _handle_node(test_type_items, v, new_path) + + new_items = {} +- for test_type, value in items.items(): ++ for test_type, value in list(items.items()): + test_type_items = {} + _handle_node(test_type_items, value, '') + new_items[test_type] = test_type_items +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/wpt_manifest_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/wpt_manifest_unittest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -92,7 +92,7 @@ + manifest = WPTManifest(manifest_json) + self.assertTrue(manifest.is_test_file('test.any.js')) + self.assertEqual(manifest.all_url_items(), +- {u'test.any.html': [u'test.any.html', {}]}) ++ {'test.any.html': ['test.any.html', {}]}) + self.assertEqual(manifest.extract_reference_list('/foo/bar.html'), []) + + def test_all_url_items_skips_jsshell_tests(self): +@@ -113,7 +113,7 @@ + ''' + manifest = WPTManifest(manifest_json) + self.assertEqual(manifest.all_url_items(), +- {u'test.any.html': [u'test.any.html', {}]}) ++ {'test.any.html': ['test.any.html', {}]}) + + def test_file_for_test(self): + # Test that we can lookup a test's filename for various cases like +@@ -135,8 +135,8 @@ + manifest = WPTManifest(manifest_json) + self.assertEqual( + manifest.all_url_items(), { +- u'test.any.html': [u'test.any.html', {}], +- u'test.any.worker.html': [u'test.any.worker.html', {}] ++ 'test.any.html': ['test.any.html', {}], ++ 'test.any.worker.html': ['test.any.worker.html', {}] + }) + # Ensure that we can get back to `test.any.js` from both of the tests. + self.assertEqual( +@@ -171,10 +171,10 @@ + manifest = WPTManifest(manifest_json) + self.assertEqual( + manifest.all_url_items(), { +- u'test.html': [u'test.html', {}], +- u'test-crash.html': [u'test-crash.html', {}] ++ 'test.html': ['test.html', {}], ++ 'test-crash.html': ['test-crash.html', {}] + }) + +- self.assertTrue(manifest.is_crash_test(u'test-crash.html')) +- self.assertFalse(manifest.is_crash_test(u'test.html')) +- self.assertFalse(manifest.is_crash_test(u'different-test-crash.html')) ++ self.assertTrue(manifest.is_crash_test('test-crash.html')) ++ self.assertFalse(manifest.is_crash_test('test.html')) ++ self.assertFalse(manifest.is_crash_test('different-test-crash.html')) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/wpt_metadata_builder.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/wpt_metadata_builder.py 2025-01-16 02:26:08.561013392 +0800 +@@ -124,7 +124,7 @@ + + tests_for_metadata = self.get_tests_needing_metadata() + _log.info("Found %d tests requiring metadata", len(tests_for_metadata)) +- for test_name, test_status_bitmap in tests_for_metadata.items(): ++ for test_name, test_status_bitmap in list(tests_for_metadata.items()): + filename, file_contents = self.get_metadata_filename_and_contents( + test_name, test_status_bitmap) + if not filename or not file_contents: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/wpt_metadata_builder_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/w3c/wpt_metadata_builder_unittest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -131,7 +131,7 @@ + test_names = metadata_builder.get_tests_needing_metadata() + # The test will appear in the result but won't have a SKIP status + found = False +- for name_item, status_item in test_names.items(): ++ for name_item, status_item in list(test_names.items()): + if name_item == test_name: + found = True + self.assertNotEqual(SKIP_TEST, status_item) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/bisect_test_ordering.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/bisect_test_ordering.py 2025-01-16 02:26:08.561013392 +0800 +@@ -57,7 +57,7 @@ + def bisect(self): + if self.test_fails_in_isolation(): + self.buckets = [Bucket([self.expected_failure])] +- print '%s fails when run in isolation.' % self.expected_failure ++ print('%s fails when run in isolation.' % self.expected_failure) + self.print_result() + return 0 + if not self.test_fails(self.tests): +@@ -81,26 +81,26 @@ + return self.test_bucket_list_fails([Bucket([self.expected_failure])]) + + def verify_non_flaky(self): +- print 'Verifying the failure is not flaky by running 10 times.' ++ print('Verifying the failure is not flaky by running 10 times.') + count_failures = 0 + for _ in range(0, 10): + if self.test_bucket_list_fails(self.buckets): + count_failures += 1 +- print 'Failed %d/10 times' % count_failures ++ print('Failed %d/10 times' % count_failures) + + def print_progress(self): + count = 0 + for bucket in self.buckets: + count += len(bucket.tests) +- print '%d tests left, %d buckets' % (count, len(self.buckets)) ++ print('%d tests left, %d buckets' % (count, len(self.buckets))) + + def print_result(self): + tests = [] + for bucket in self.buckets: + tests += bucket.tests + extra_args = ' --debug' if self.is_debug else '' +- print 'run_web_tests.py%s --jobs=1 --order=none %s' % (extra_args, +- ' '.join(tests)) ++ print('run_web_tests.py%s --jobs=1 --order=none %s' % (extra_args, ++ ' '.join(tests))) + + def is_done(self): + for bucket in self.buckets: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/builder_list.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/builder_list.py 2025-01-16 02:26:08.561013392 +0800 +@@ -100,7 +100,7 @@ + return sorted(builders) + + def all_port_names(self): +- return sorted({b['port_name'] for b in self._builders.values()}) ++ return sorted({b['port_name'] for b in list(self._builders.values())}) + + def bucket_for_builder(self, builder_name): + return self._builders[builder_name].get('bucket', '') +@@ -131,7 +131,7 @@ + to non-debug builders. If no builder is found, None is returned. + """ + debug_builder_name = None +- for builder_name, builder_info in self._builders.iteritems(): ++ for builder_name, builder_info in self._builders.items(): + if builder_info.get('is_try_builder'): + continue + if builder_info['port_name'] == target_port_name: +@@ -148,7 +148,7 @@ + the version specifier for the first builder that matches, even + if it's a try bot builder. + """ +- for _, builder_info in sorted(self._builders.iteritems()): ++ for _, builder_info in sorted(self._builders.items()): + if builder_info['port_name'] == target_port_name: + return builder_info['specifiers'][0] + return None +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/lint_test_expectations.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/lint_test_expectations.py 2025-01-16 02:26:08.561013392 +0800 +@@ -41,6 +41,7 @@ + PRODUCTS_TO_EXPECTATION_FILE_PATHS, ANDROID_DISABLED_TESTS, + ANDROID_WEBLAYER) + from blinkpy.web_tests.port.factory import platform_options ++from functools import reduce + + _log = logging.getLogger(__name__) + +@@ -51,7 +52,7 @@ + + # Add all extra expectation files to be linted. + options.additional_expectations.extend( +- PRODUCTS_TO_EXPECTATION_FILE_PATHS.values() + [ANDROID_DISABLED_TESTS] + [ ++ list(PRODUCTS_TO_EXPECTATION_FILE_PATHS.values()) + [ANDROID_DISABLED_TESTS] + [ + host.filesystem.join(port.web_tests_dir(), 'WPTOverrideExpectations'), + host.filesystem.join(port.web_tests_dir(), 'WebGPUExpectations'), + ]) +@@ -82,16 +83,16 @@ + if config_macro_dict: + all_system_specifiers.update( + {s.lower() +- for s in config_macro_dict.keys()}) ++ for s in list(config_macro_dict.keys())}) + all_system_specifiers.update({ + s.lower() +- for s in reduce(lambda x, y: x + y, config_macro_dict.values()) ++ for s in reduce(lambda x, y: x + y, list(config_macro_dict.values())) + }) + for path in port.extra_expectations_files(): + if host.filesystem.exists(path): + expectations_dict[path] = host.filesystem.read_text_file(path) + +- for path, content in expectations_dict.items(): ++ for path, content in list(expectations_dict.items()): + # Check the expectations file content + failures.extend(_check_expectations_file_content(content)) + +@@ -153,7 +154,7 @@ + def _check_test_existence(host, port, path, expectations, wpt_tests): + failures = [] + warnings = [] +- is_android_path = path in PRODUCTS_TO_EXPECTATION_FILE_PATHS.values() ++ is_android_path = path in list(PRODUCTS_TO_EXPECTATION_FILE_PATHS.values()) + for exp in expectations: + if not exp.test: + continue +@@ -308,7 +309,7 @@ + host, port, path, expectations, wpt_tests) + failures.extend(_check_directory_glob(host, port, path, expectations)) + failures.extend(_check_never_fix_tests(host, port, path, expectations)) +- if path in PRODUCTS_TO_EXPECTATION_FILE_PATHS.values(): ++ if path in list(PRODUCTS_TO_EXPECTATION_FILE_PATHS.values()): + failures.extend(_check_non_wpt_in_android_override( + host, port, path, expectations)) + # TODO(crbug.com/1080691): Change this to failures once +@@ -490,7 +491,7 @@ + except KeyboardInterrupt: + exit_status = exit_codes.INTERRUPTED_EXIT_STATUS + except Exception as error: # pylint: disable=broad-except +- print >> stderr, '\n%s raised: %s' % (error.__class__.__name__, error) ++ print('\n%s raised: %s' % (error.__class__.__name__, error), file=stderr) + traceback.print_exc(file=stderr) + exit_status = exit_codes.EXCEPTIONAL_EXIT_STATUS + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/lint_test_expectations_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/lint_test_expectations_unittest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -26,7 +26,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-import StringIO ++import io + import optparse + import unittest + +@@ -274,7 +274,7 @@ + self.assertTrue(failures) + self.assertEqual(warnings, []) + +- self.assertEquals(len(failures), 6) ++ self.assertEqual(len(failures), 6) + expected_non_existence = [ + 'test1/*', + 'test2/bar.html', +@@ -298,7 +298,7 @@ + raw_expectations = ('# results: [ Failure ]\n' + 'external/wpt/test.html [ Failure ]\n' + 'non-wpt/test.html [ Failure ]\n') +- for path in PRODUCTS_TO_EXPECTATION_FILE_PATHS.values(): ++ for path in list(PRODUCTS_TO_EXPECTATION_FILE_PATHS.values()): + host.filesystem.write_text_file(path, raw_expectations) + host.port_factory.get = lambda platform, options=None: port + host.port_factory.all_port_names = lambda platform=None: [port.name()] +@@ -370,8 +370,8 @@ + failures, warnings = lint_test_expectations.lint(host, options) + self.assertEqual(failures, []) + +- self.assertEquals(len(warnings), 1) +- self.assertRegexpMatches(warnings[0], ':5 .*redundant with.* line 4$') ++ self.assertEqual(len(warnings), 1) ++ self.assertRegex(warnings[0], ':5 .*redundant with.* line 4$') + + def test_never_fix_tests(self): + options = optparse.Values({ +@@ -405,11 +405,11 @@ + failures, warnings = lint_test_expectations.lint(host, options) + self.assertEqual(warnings, []) + +- self.assertEquals(len(failures), 4) +- self.assertRegexpMatches(failures[0], ':7 .*must override') +- self.assertRegexpMatches(failures[1], ':8 .*must override') +- self.assertRegexpMatches(failures[2], ':9 Only one of') +- self.assertRegexpMatches(failures[3], ':11 .*must override') ++ self.assertEqual(len(failures), 4) ++ self.assertRegex(failures[0], ':7 .*must override') ++ self.assertRegex(failures[1], ':8 .*must override') ++ self.assertRegex(failures[2], ':9 Only one of') ++ self.assertRegex(failures[3], ':11 .*must override') + + + class CheckVirtualSuiteTest(unittest.TestCase): +@@ -481,7 +481,7 @@ + self.orig_lint_fn = lint_test_expectations.lint + self.orig_check_fn = lint_test_expectations.check_virtual_test_suites + lint_test_expectations.check_virtual_test_suites = lambda host, options: [] +- self.stderr = StringIO.StringIO() ++ self.stderr = io.StringIO() + + def tearDown(self): + lint_test_expectations.lint = self.orig_lint_fn +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/merge_results.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/merge_results.py 2025-01-16 02:26:08.561013392 +0800 +@@ -161,8 +161,8 @@ + Merger.__init__(self) + + self.add_helper( +- TypeMatch(types.ListType, types.TupleType), self.merge_listlike) +- self.add_helper(TypeMatch(types.DictType), self.merge_dictlike) ++ TypeMatch(list, tuple), self.merge_listlike) ++ self.add_helper(TypeMatch(dict), self.merge_dictlike) + + def fallback_matcher(self, objs, name=None): + raise MergeFailure("No merge helper found!", name, objs) +@@ -210,7 +210,7 @@ + dict_mid.setdefault(key, []).append(dobj[key]) + + dict_out = dicts[0].__class__({}) +- for k, v in dict_mid.iteritems(): ++ for k, v in dict_mid.items(): + assert v + if len(v) == 1: + dict_out[k] = v[0] +@@ -492,7 +492,7 @@ + + # Go through each file and try to merge it. + # partial_file_path is the file relative to the directories. +- for partial_file_path, in_dirs in sorted(files.iteritems()): ++ for partial_file_path, in_dirs in sorted(files.items()): + out_path = self.filesystem.join(output_dir, partial_file_path) + if self.filesystem.exists(out_path): + raise MergeFailure('File %s already exist in output.', +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/merge_results_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/merge_results_unittest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -9,7 +9,7 @@ + import types + import unittest + +-import cStringIO as StringIO ++import io as StringIO + + from collections import OrderedDict + +@@ -19,13 +19,13 @@ + + class JSONMergerTests(unittest.TestCase): + def test_type_match(self): +- self.assertTrue(merge_results.TypeMatch(types.DictType)(dict())) ++ self.assertTrue(merge_results.TypeMatch(dict)(dict())) + self.assertFalse( +- merge_results.TypeMatch(types.ListType, types.TupleType)(dict())) ++ merge_results.TypeMatch(list, tuple)(dict())) + self.assertTrue( +- merge_results.TypeMatch(types.ListType, types.TupleType)(list())) ++ merge_results.TypeMatch(list, tuple)(list())) + self.assertTrue( +- merge_results.TypeMatch(types.ListType, types.TupleType)(tuple())) ++ merge_results.TypeMatch(list, tuple)(tuple())) + + def test_merge_listlike(self): + m = merge_results.JSONMerger() +@@ -45,10 +45,10 @@ + self.assertListEqual(expected, m.merge([inputa, inputb])) + self.assertSequenceEqual( + expected, m.merge_listlike([tuple(inputa), +- tuple(inputb)]), types.TupleType) ++ tuple(inputb)]), tuple) + self.assertSequenceEqual(expected, + m.merge([tuple(inputa), +- tuple(inputb)]), types.TupleType) ++ tuple(inputb)]), tuple) + + def test_merge_simple_dict(self): + m = merge_results.JSONMerger() +@@ -445,11 +445,11 @@ + b_before_a['a'] = 1 + + r1 = m.merge([a, b]) +- self.assertSequenceEqual(a_before_b.items(), r1.items()) ++ self.assertSequenceEqual(list(a_before_b.items()), list(r1.items())) + self.assertIsInstance(r1, OrderedDict) + + r2 = m.merge([b, a]) +- self.assertSequenceEqual(b_before_a.items(), r2.items()) ++ self.assertSequenceEqual(list(b_before_a.items()), list(r2.items())) + self.assertIsInstance(r2, OrderedDict) + + def test_custom_match_on_name(self): +@@ -1477,7 +1477,7 @@ + fs, results_json_value_overrides={'layout_tests_dir': 'src'}) + merger.merge('/out', ['/shards/0', '/shards/1']) + +- for fname, contents in self.web_test_output_filesystem.items(): ++ for fname, contents in list(self.web_test_output_filesystem.items()): + self.assertIn(fname, fs.files) + self.assertMultiLineEqual(contents, fs.files[fname]) + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/run_web_tests_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/run_web_tests_unittest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -31,7 +31,7 @@ + import json + import os + import re +-import StringIO ++import io + import sys + import unittest + +@@ -84,7 +84,7 @@ + if shared_port: + port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj + +- printer = Printer(host, options, StringIO.StringIO()) ++ printer = Printer(host, options, io.StringIO()) + run_details = run_web_tests.run(port_obj, options, parsed_args, printer) + return run_details.exit_code == 0 + +@@ -109,7 +109,7 @@ + def run_and_capture(port_obj, options, parsed_args, shared_port=True): + if shared_port: + port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj +- logging_stream = StringIO.StringIO() ++ logging_stream = io.StringIO() + printer = Printer(port_obj.host, options, logging_stream) + run_details = run_web_tests.run(port_obj, options, parsed_args, printer) + return (run_details, logging_stream) +@@ -142,7 +142,7 @@ + port_obj = port_obj or host.port_factory.get( + port_name=options.platform, options=options) + +- printer = Printer(host, options, StringIO.StringIO()) ++ printer = Printer(host, options, io.StringIO()) + run_details = run_web_tests.run(port_obj, options, parsed_args, printer) + + all_results = [] +@@ -183,7 +183,7 @@ + '/tmp/json_failing_test_results.json' + ], + tests_included=True) +- logging_stream = StringIO.StringIO() ++ logging_stream = io.StringIO() + host = MockHost() + port_obj = host.port_factory.get(options.platform, options) + printer = Printer(host, options, logging_stream) +@@ -1842,14 +1842,14 @@ + run_details, err, _ = logging_run( + ['passes/args.html', 'virtual/passes/'], tests_included=True) + self.assertEqual( +- len(run_details.summarized_full_results['tests']['passes'].keys()), ++ len(list(run_details.summarized_full_results['tests']['passes'].keys())), + 1) + self.assertFalse(virtual_test_warning_msg in err.getvalue()) + + run_details, err, _ = logging_run( + ['passes/args.html', 'virtual/passes/*'], tests_included=True) + self.assertEqual( +- len(run_details.summarized_full_results['tests']['passes'].keys()), ++ len(list(run_details.summarized_full_results['tests']['passes'].keys())), + 1) + self.assertTrue(virtual_test_warning_msg in err.getvalue()) + +@@ -2178,16 +2178,16 @@ + host=host) + written_files = host.filesystem.written_files + self.assertTrue( +- any(path.endswith('-diff.txt') for path in written_files.keys())) ++ any(path.endswith('-diff.txt') for path in list(written_files.keys()))) + self.assertTrue( + any( + path.endswith('-pretty-diff.html') +- for path in written_files.keys())) ++ for path in list(written_files.keys()))) + self.assertFalse( + any(path.endswith('-wdiff.html') for path in written_files)) + + def test_unsupported_platform(self): +- stderr = StringIO.StringIO() ++ stderr = io.StringIO() + res = run_web_tests.main(['--platform', 'foo'], stderr) + + self.assertEqual(res, exit_codes.UNEXPECTED_ERROR_EXIT_STATUS) +@@ -2207,7 +2207,7 @@ + host = MockHost() + port_obj = host.port_factory.get( + port_name=options.platform, options=options) +- logging_stream = StringIO.StringIO() ++ logging_stream = io.StringIO() + printer = Printer(host, options, logging_stream) + run_web_tests.run(port_obj, options, parsed_args, printer) + self.assertTrue('text.html passed' in logging_stream.getvalue()) +@@ -2326,7 +2326,7 @@ + # The run exit code is 0, indicating success; since we're resetting + # baselines, it's OK for actual results to not match baselines. + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 7) ++ self.assertEqual(len(list(written_files.keys())), 7) + self.assert_wpt_manifests_not_written(host, written_files) + self.assert_baselines( + written_files, +@@ -2365,7 +2365,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 8) ++ self.assertEqual(len(list(written_files.keys())), 8) + self.assert_baselines(written_files, log_stream, + 'failures/unexpected/missing_text', ['.txt']) + self.assert_baselines(written_files, log_stream, +@@ -2388,7 +2388,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 6) ++ self.assertEqual(len(list(written_files.keys())), 6) + self.assert_baselines(written_files, log_stream, + 'failures/unexpected/testharness', ['.txt']) + self.assert_baselines(written_files, log_stream, 'passes/testharness', +@@ -2406,7 +2406,7 @@ + host=host) + self.assertEqual(details.exit_code, 0) + written_files = host.filesystem.written_files +- self.assertEqual(len(written_files.keys()), 6) ++ self.assertEqual(len(list(written_files.keys())), 6) + self.assert_baselines(written_files, log_stream, + 'failures/unexpected/testharness', ['.txt']) + +@@ -2421,7 +2421,7 @@ + host=host) + self.assertEqual(details.exit_code, 0) + written_files = host.filesystem.written_files +- self.assertEqual(len(written_files.keys()), 6) ++ self.assertEqual(len(list(written_files.keys())), 6) + self.assert_baselines(written_files, log_stream, + 'failures/unexpected/image-only', ['.png']) + +@@ -2443,7 +2443,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 1) +- self.assertEqual(len(written_files.keys()), 11) ++ self.assertEqual(len(list(written_files.keys())), 11) + self.assert_contains( + log_stream, + 'Copying baseline to "platform/test-mac-mac10.10/failures/unexpected/text-image-checksum-expected.png"' +@@ -2472,7 +2472,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 7) ++ self.assertEqual(len(list(written_files.keys())), 7) + self.assert_baselines( + written_files, + log_stream, +@@ -2489,7 +2489,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 5) ++ self.assertEqual(len(list(written_files.keys())), 5) + self.assert_baselines( + written_files, + log_stream, +@@ -2508,7 +2508,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 6) ++ self.assertEqual(len(list(written_files.keys())), 6) + self.assert_baselines( + written_files, + log_stream, +@@ -2527,7 +2527,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 8) ++ self.assertEqual(len(list(written_files.keys())), 8) + self.assertIsNone(written_files[extra_txt]) + self.assertIsNone(written_files[extra_wav]) + self.assert_baselines( +@@ -2549,7 +2549,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 8) ++ self.assertEqual(len(list(written_files.keys())), 8) + self.assertIsNone(written_files[extra_png]) + self.assertIsNone(written_files[extra_wav]) + self.assertIsNone(written_files[extra_txt]) +@@ -2566,7 +2566,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 7) ++ self.assertEqual(len(list(written_files.keys())), 7) + self.assertIsNone(written_files[extra_png]) + self.assertIsNone(written_files[extra_wav]) + self.assertNotIn( +@@ -2585,7 +2585,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 7) ++ self.assertEqual(len(list(written_files.keys())), 7) + self.assertIsNone(written_files[extra_png]) + self.assertIsNone(written_files[extra_txt]) + self.assert_baselines( +@@ -2602,7 +2602,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 6) ++ self.assertEqual(len(list(written_files.keys())), 6) + self.assert_baselines( + written_files, + log_stream, +@@ -2628,7 +2628,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 7) ++ self.assertEqual(len(list(written_files.keys())), 7) + # We should create new image baseline only. + self.assert_baselines( + written_files, +@@ -2655,7 +2655,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 1) +- self.assertEqual(len(written_files.keys()), 11) ++ self.assertEqual(len(list(written_files.keys())), 11) + self.assert_contains( + log_stream, + 'Copying baseline to "flag-specific/flag/failures/unexpected/text-image-checksum-expected.png"' +@@ -2694,7 +2694,7 @@ + self.assertEqual(details.exit_code, 0) + self.assertFalse(host.filesystem.exists(flag_specific_baseline_txt)) + written_files = host.filesystem.written_files +- self.assertEqual(len(written_files.keys()), 8) ++ self.assertEqual(len(list(written_files.keys())), 8) + # We should create new image baseline only. + self.assert_baselines( + written_files, +@@ -2721,7 +2721,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 7) ++ self.assertEqual(len(list(written_files.keys())), 7) + # We should create new image baseline only. + self.assert_baselines( + written_files, +@@ -2746,7 +2746,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 7) ++ self.assertEqual(len(list(written_files.keys())), 7) + # We should reset the platform image baseline. + self.assert_baselines( + written_files, +@@ -2774,7 +2774,7 @@ + host=host) + written_files = host.filesystem.written_files + self.assertEqual(details.exit_code, 0) +- self.assertEqual(len(written_files.keys()), 8) ++ self.assertEqual(len(list(written_files.keys())), 8) + # We should reset the platform image baseline. + self.assert_baselines( + written_files, +@@ -2810,7 +2810,7 @@ + self.assertEqual(details.exit_code, 0) + self.assertFalse(host.filesystem.exists(virtual_baseline_txt)) + written_files = host.filesystem.written_files +- self.assertEqual(len(written_files.keys()), 8) ++ self.assertEqual(len(list(written_files.keys())), 8) + self.assert_wpt_manifests_not_written(host, written_files) + # We should create new image baseline only. + self.assert_baselines( +@@ -2837,7 +2837,7 @@ + def exception_raising_run(port, options, args, printer): + assert False + +- stderr = StringIO.StringIO() ++ stderr = io.StringIO() + try: + run_web_tests.run = interrupting_run + res = run_web_tests.main([], stderr) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/try_flag.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/try_flag.py 2025-01-16 02:26:08.561013392 +0800 +@@ -65,7 +65,7 @@ + test_expectations.parse_tagged_list(content) + return { + test_name +- for test_name in test_expectations.individual_exps.keys() ++ for test_name in list(test_expectations.individual_exps.keys()) + } + + def trigger(self): +@@ -103,7 +103,7 @@ + self._host.print_('Fetching results...') + # TODO: Get jobs from the _tryflag branch. Current branch for now. + jobs = self._git_cl.latest_try_jobs( +- builder_names=BUILDER_CONFIGS.keys()) ++ builder_names=list(BUILDER_CONFIGS.keys())) + results_fetcher = self._host.results_fetcher + for build in sorted(jobs): + self._host.print_('-- %s: %s/results.html' % +@@ -140,7 +140,7 @@ + elif action == 'update': + self.update() + else: +- print >> self._host.stderr, 'specify "trigger" or "update"' ++ print('specify "trigger" or "update"', file=self._host.stderr) + return 1 + return 0 + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/update_expectations.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/update_expectations.py 2025-01-16 02:26:08.561013392 +0800 +@@ -167,7 +167,7 @@ + # Initialize OS version to OS dictionary. + if not self._version_to_os: + for os, os_versions in \ +- self._port.configuration_specifier_macros().items(): ++ list(self._port.configuration_specifier_macros().items()): + for version in os_versions: + self._version_to_os[version.lower()] = os.lower() + +@@ -203,7 +203,7 @@ + + builders_checked.append(builder_name) + +- if builder_name not in self._builder_results_by_path.keys(): ++ if builder_name not in list(self._builder_results_by_path.keys()): + _log.error('Failed to find results for builder "%s"', + builder_name) + return False +@@ -211,7 +211,7 @@ + results_by_path = self._builder_results_by_path[builder_name] + + # No results means the tests were all skipped, or all results are passing. +- if expectation.test not in results_by_path.keys(): ++ if expectation.test not in list(results_by_path.keys()): + if self._remove_missing: + continue + return False +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/update_expectations_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/update_expectations_unittest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -22,7 +22,7 @@ + self._results = {} + + # Make the results distinct like the real BotTestExpectations. +- for path, results in results_by_path.iteritems(): ++ for path, results in results_by_path.items(): + self._results[path] = list(set(results)) + + def all_results_by_path(self): +@@ -171,7 +171,7 @@ + } + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals(updated_expectations, test_expectations_before) ++ self.assertEqual(updated_expectations, test_expectations_before) + + def test_fail_mode_doesnt_remove_non_fails(self): + """Tests that lines that aren't failing are not touched. +@@ -211,7 +211,7 @@ + self.FAIL_TYPE)) + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals(updated_expectations, test_expectations_before) ++ self.assertEqual(updated_expectations, test_expectations_before) + + def test_dont_remove_directory_flake(self): + """Tests that flake lines with directories are untouched.""" +@@ -244,7 +244,7 @@ + self.FLAKE_TYPE)) + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals(updated_expectations, test_expectations_before) ++ self.assertEqual(updated_expectations, test_expectations_before) + + def test_dont_remove_directory_fail(self): + """Tests that fail lines with directories are untouched.""" +@@ -277,7 +277,7 @@ + self.FAIL_TYPE)) + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals(updated_expectations, test_expectations_before) ++ self.assertEqual(updated_expectations, test_expectations_before) + + def test_dont_remove_skip(self): + """Tests that lines with Skip are untouched. +@@ -312,7 +312,7 @@ + self._expectations_remover = self._create_expectations_remover() + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals(updated_expectations, test_expectations_before) ++ self.assertEqual(updated_expectations, test_expectations_before) + + def test_all_failure_result_types(self): + """Tests that all failure types are treated as failure.""" +@@ -350,7 +350,7 @@ + self._expectations_remover = self._create_expectations_remover() + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals( ++ self.assertEqual( + updated_expectations, + _strip_multiline_string_spaces("""# results: [ Failure Pass ] + test/a.html [ Failure Pass ] +@@ -394,7 +394,7 @@ + # The line with test/d.html is not removed since + # --remove-missing is false by default; lines for + # tests with no actual results are kept. +- self.assertEquals( ++ self.assertEqual( + updated_expectations, + _strip_multiline_string_spaces( + """# results: [ Timeout Crash Failure ] +@@ -441,7 +441,7 @@ + self._expectations_remover = self._create_expectations_remover() + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals( ++ self.assertEqual( + updated_expectations, + _strip_multiline_string_spaces( + """# results: [ Failure Pass Crash Timeout ] +@@ -474,7 +474,7 @@ + self.FLAKE_TYPE)) + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals( ++ self.assertEqual( + updated_expectations, + _strip_multiline_string_spaces("""# results: [ Failure Pass ] + # Keep since it's all failures. +@@ -514,7 +514,7 @@ + self._expectations_remover = self._create_expectations_remover() + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals(updated_expectations, ++ self.assertEqual(updated_expectations, + ('# results: [ Failure Pass ]')) + + def test_empty_test_expectations(self): +@@ -539,7 +539,7 @@ + self._expectations_remover = self._create_expectations_remover() + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals(updated_expectations, '') ++ self.assertEqual(updated_expectations, '') + + def test_basic_multiple_builders(self): + """Tests basic functionality with multiple builders.""" +@@ -591,7 +591,7 @@ + self._expectations_remover = self._create_expectations_remover() + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals( ++ self.assertEqual( + updated_expectations, + _strip_multiline_string_spaces("""# results: [ Failure Pass ] + # Keep these two since they're failing on the Mac builder. +@@ -695,7 +695,7 @@ + self._expectations_remover = self._create_expectations_remover() + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals( ++ self.assertEqual( + updated_expectations, + _strip_multiline_string_spaces(""" + # tags: [ Linux Mac Win Mac ] +@@ -804,7 +804,7 @@ + self._expectations_remover = self._create_expectations_remover() + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals( ++ self.assertEqual( + updated_expectations, + _strip_multiline_string_spaces( + """# Keep these two since they fail in debug. +@@ -859,7 +859,7 @@ + self._expectations_remover = self._create_expectations_remover() + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals(updated_expectations, ++ self.assertEqual(updated_expectations, + (_strip_multiline_string_spaces(""" + # results: [ Failure Pass ] + # Comment A - Keep since these aren't part of any test. +@@ -909,7 +909,7 @@ + self._expectations_remover = self._create_expectations_remover() + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals(updated_expectations, test_expectations_before) ++ self.assertEqual(updated_expectations, test_expectations_before) + + def test_lines_with_no_results_on_builders_can_be_removed(self): + """Tests that we remove a line that has no results on the builders. +@@ -945,7 +945,7 @@ + remove_missing=True) + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals( ++ self.assertEqual( + updated_expectations, + _strip_multiline_string_spaces(""" + # results: [ Failure Timeout Pass Crash Skip ] +@@ -984,7 +984,7 @@ + self._expectations_remover = self._create_expectations_remover() + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals( ++ self.assertEqual( + updated_expectations, + _strip_multiline_string_spaces(""" + # results: [ Failure Pass ]""")) +@@ -993,7 +993,7 @@ + self._create_expectations_remover(include_cq_results=True)) + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals(updated_expectations, test_expectations_before) ++ self.assertEqual(updated_expectations, test_expectations_before) + + def test_missing_builders_for_some_configurations(self): + """Tests the behavior when there are no builders for some configurations. +@@ -1066,7 +1066,7 @@ + self._expectations_remover = self._create_expectations_remover() + updated_expectations = ( + self._expectations_remover.get_updated_test_expectations()) +- self.assertEquals( ++ self.assertEqual( + updated_expectations, + _strip_multiline_string_spaces(""" + # tags: [ Win Linux ] +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/web_tests_history.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/web_tests_history.py 2025-01-16 02:26:08.561013392 +0800 +@@ -97,7 +97,7 @@ + + def _process_single(self, path): + _init(self) +- print _run(path) ++ print(_run(path)) + return 0 + + def _process_many(self, paths): +@@ -126,7 +126,7 @@ + if isinstance(res, BaseException): + # Traceback is already printed in the worker; exit directly. + raise SystemExit +- print res ++ print(res) + pool.close() + except Exception: + # A user exception was raised from the manager (main) process. +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/breakpad/dump_reader_multipart.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/breakpad/dump_reader_multipart.py 2025-01-16 02:26:08.561013392 +0800 +@@ -29,7 +29,7 @@ + import cgi + import logging + import threading +-import Queue ++import queue + + from blinkpy.common.path_finder import PathFinder + from blinkpy.web_tests.breakpad.dump_reader import DumpReader +@@ -139,7 +139,7 @@ + self._generated_symbols = True + + _log.debug('Generating breakpad symbols') +- queue = Queue.Queue() ++ queue = queue.Queue() + thread = threading.Thread(target=_symbolize_keepalive, args=(queue, )) + thread.start() + try: +@@ -175,7 +175,7 @@ + try: + queue.get(block=True, timeout=60) + return +- except Queue.Empty: ++ except queue.Empty: + pass + + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/manager.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/manager.py 2025-01-16 02:26:08.561013392 +0800 +@@ -269,7 +269,7 @@ + tests_to_retry = self._tests_to_retry(initial_results) + all_retry_results = [] + if should_retry_failures and tests_to_retry: +- for retry_attempt in xrange(1, self._options.num_retries + 1): ++ for retry_attempt in range(1, self._options.num_retries + 1): + if not tests_to_retry: + break + +@@ -450,9 +450,9 @@ + retry_attempt=0): + + test_inputs = [] +- for _ in xrange(iterations): ++ for _ in range(iterations): + for test in tests_to_run: +- for _ in xrange(repeat_each): ++ for _ in range(repeat_each): + test_inputs.append( + self._test_input_for_file(test, retry_attempt)) + return self._runner.run_tests(self._expectations, test_inputs, +@@ -519,7 +519,7 @@ + test_failures.AbstractTestResultType.result_directory = self._results_directory + test_failures.AbstractTestResultType.filesystem = self._filesystem + +- for test, result in run_results.unexpected_results_by_name.iteritems(): ++ for test, result in run_results.unexpected_results_by_name.items(): + if result.type != ResultType.Crash: + continue + for failure in result.failures: +@@ -532,7 +532,7 @@ + + sample_files = self._port.look_for_new_samples(crashed_processes, + start_time) or {} +- for test, sample_file in sample_files.iteritems(): ++ for test, sample_file in sample_files.items(): + test_failures.AbstractTestResultType.test_name = test + test_result = run_results.unexpected_results_by_name[test] + artifact_relative_path = self._port.output_filename( +@@ -551,7 +551,7 @@ + + new_crash_logs = self._port.look_for_new_crash_logs( + crashed_processes, start_time) or {} +- for test, (crash_log, crash_site) in new_crash_logs.iteritems(): ++ for test, (crash_log, crash_site) in new_crash_logs.items(): + test_failures.AbstractTestResultType.test_name = test + failure.crash_log = crash_log + failure.has_log = self._port.output_contains_sanitizer_messages( +@@ -586,7 +586,7 @@ + # only consider the last retry attempt for the count of unexpected regressions. + return [ + result.test_name +- for result in run_results.unexpected_results_by_name.values() ++ for result in list(run_results.unexpected_results_by_name.values()) + if result.type != ResultType.Pass + ] + +@@ -597,7 +597,7 @@ + + # FIXME: Upload stats.json to the server and delete times_ms. + times_trie = json_results_generator.test_timings_trie( +- initial_results.results_by_name.values()) ++ list(initial_results.results_by_name.values())) + times_json_path = self._filesystem.join(self._artifacts_directory, + 'times_ms.json') + json_results_generator.write_json(self._filesystem, times_trie, +@@ -710,7 +710,7 @@ + return int(worker_name.split('/')[1]) if worker_name else -1 + + stats = {} +- for result in initial_results.results_by_name.values(): ++ for result in list(initial_results.results_by_name.values()): + if result.type != ResultType.Skip: + stats[result.test_name] = { + 'results': (_worker_number(result.worker_name), +@@ -719,6 +719,6 @@ + int(result.total_run_time * 1000)) + } + stats_trie = {} +- for name, value in stats.iteritems(): ++ for name, value in stats.items(): + json_results_generator.add_path_to_trie(name, value, stats_trie) + return stats_trie +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/repaint_overlay_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/repaint_overlay_unittest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -35,7 +35,7 @@ + self.assertFalse(repaint_overlay.result_contains_repaint_rects('ABCD')) + + def test_extract_layer_tree(self): +- self.assertEquals(LAYER_TREE, ++ self.assertEqual(LAYER_TREE, + repaint_overlay.extract_layer_tree(LAYER_TREE)) + + def test_generate_repaint_overlay_html(self): +@@ -67,7 +67,7 @@ + 'paint/invalidation/repaint-overlay/layers-overlay.html') + expected = host.filesystem.read_text_file(overlay_html_file) + +- self.assertEquals( ++ self.assertEqual( + expected, overlay_html, + 'This failure is probably caused by changed repaint_overlay.py. ' + 'Please examine the diffs:\n diff %s %s\n' +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/test_result_sink.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/test_result_sink.py 2025-01-16 02:26:08.561013392 +0800 +@@ -15,7 +15,7 @@ + + import json + import logging +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + from blinkpy.web_tests.models.typ_types import ResultType + +@@ -76,7 +76,7 @@ + self._sink_ctx['address']) + + def _send(self, data): +- req = urllib2.Request( ++ req = urllib.request.Request( + url=self._sink_url, + data=json.dumps(data), + headers={ +@@ -86,7 +86,7 @@ + 'ResultSink %s' % self._sink_ctx['auth_token'], + }, + ) +- return urllib2.urlopen(req) ++ return urllib.request.urlopen(req) + + def _status(self, result): + """Returns the TestStatus enum value corresponding to the result type. +@@ -130,7 +130,7 @@ + """ + ret = {} + base_dir = self._port.results_directory() +- for name, paths in result.artifacts.artifacts.iteritems(): ++ for name, paths in result.artifacts.artifacts.items(): + for p in paths: + art_id = name + i = 1 +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/test_result_sink_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/test_result_sink_unittest.py 2025-01-16 02:26:08.561013392 +0800 +@@ -7,7 +7,7 @@ + import mock + import sys + import unittest +-from urlparse import urlparse ++from urllib.parse import urlparse + + from blinkpy.common.host_mock import MockHost + from blinkpy.web_tests.controllers.test_result_sink import CreateTestResultSink +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder.py 2025-01-16 02:26:08.561013392 +0800 +@@ -113,7 +113,7 @@ + + # Ignore tests with a time==0 because those are skipped tests. + sorted_times = sorted( +- [test for (test, time) in times.iteritems() if time], ++ [test for (test, time) in times.items() if time], + key=lambda t: (times[t], t)) + clamped_percentile = max(0, min(100, fastest_percentile)) + number_of_tests_to_return = int( +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_finder_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -144,9 +144,9 @@ + self.assertEqual(tests, set([idlharness_test_1, idlharness_test_2])) + self.assertTrue( + expectations.get_expectations(non_idlharness_test).is_default_pass) +- self.assertEquals( ++ self.assertEqual( + expectations.get_expectations(idlharness_test_1).results, {'SKIP'}) +- self.assertEquals( ++ self.assertEqual( + expectations.get_expectations(idlharness_test_2).results, {'SKIP'}) + + # Disable expectations entirely; we should still skip the idlharness +@@ -165,9 +165,9 @@ + # TestExpectations work. + self.assertTrue( + expectations.get_expectations(non_idlharness_test).is_default_pass) +- self.assertEquals( ++ self.assertEqual( + expectations.get_expectations(idlharness_test_1).results, {'SKIP'}) +- self.assertEquals( ++ self.assertEqual( + expectations.get_expectations(idlharness_test_2).results, {'SKIP'}) + + def test_find_fastest_tests(self): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_runner.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/controllers/web_test_runner.py 2025-01-16 02:26:08.564263337 +0800 +@@ -177,7 +177,7 @@ + if args not in tests_by_args: + tests_by_args[args] = [] + tests_by_args[args].append(test_input) +- shard.test_inputs = list(itertools.chain(*tests_by_args.values())) ++ shard.test_inputs = list(itertools.chain(*list(tests_by_args.values()))) + + def _worker_factory(self, worker_connection): + return Worker(worker_connection, self._results_directory, +@@ -511,7 +511,7 @@ + tests_by_dir.setdefault(directory, []) + tests_by_dir[directory].append(test_input) + +- for directory, test_inputs in tests_by_dir.iteritems(): ++ for directory, test_inputs in tests_by_dir.items(): + shard = TestShard(directory, test_inputs) + if test_inputs[0].requires_lock: + locked_shards.append(shard) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/layout_package/bot_test_expectations.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/layout_package/bot_test_expectations.py 2025-01-16 02:26:08.564263337 +0800 +@@ -32,8 +32,8 @@ + import json + import logging + import os.path +-import urllib +-import urllib2 ++import urllib.request, urllib.parse, urllib.error ++import urllib.request, urllib.error, urllib.parse + + from blinkpy.web_tests.models.typ_types import Expectation, ResultType + +@@ -112,7 +112,7 @@ + self._json = json_dict + + def _walk_trie(self, trie, parent_path): +- for name, value in trie.items(): ++ for name, value in list(trie.items()): + full_path = os.path.join(parent_path, name) + + # FIXME: If we ever have a test directory self.RESULTS_KEY +@@ -162,9 +162,9 @@ + def _results_url_for_builder(self, builder, use_try_step=False): + test_type = (self.STEP_NAME_TRY if use_try_step else self.STEP_NAME) + return self.RESULTS_URL_FORMAT % ( +- urllib.quote(test_type), +- urllib.quote(self.builders.master_for_builder(builder)), +- urllib.quote(builder)) ++ urllib.parse.quote(test_type), ++ urllib.parse.quote(self.builders.master_for_builder(builder)), ++ urllib.parse.quote(builder)) + + def _results_json_for_builder(self, builder): + results_url = self._results_url_for_builder( +@@ -173,8 +173,8 @@ + _log.debug('Fetching flakiness data from appengine: %s', + results_url) + return ResultsJSON(builder, json.load( +- urllib2.urlopen(results_url))) +- except urllib2.URLError as error: ++ urllib.request.urlopen(results_url))) ++ except urllib.error.URLError as error: + _log.warning( + 'Could not retrieve flakiness data from the bot. url: %s', + results_url) +@@ -186,8 +186,8 @@ + _log.debug('Fetching flakiness data from appengine: %s', + results_url) + return ResultsFilter(builder, json.load( +- urllib2.urlopen(results_url))) +- except urllib2.URLError as error: ++ urllib.request.urlopen(results_url))) ++ except urllib.error.URLError as error: + _log.warning( + 'Could not retrieve flakiness data from the bot. url: %s', + results_url) +@@ -256,7 +256,7 @@ + result_types = self._all_types_in_results(results_dict) + + # Distinct results as non-encoded strings. +- results = map(self.results_json.expectation_for_type, result_types) ++ results = list(map(self.results_json.expectation_for_type, result_types)) + + # Get test expectations + expectations = exp_string.split(' ') +@@ -300,8 +300,8 @@ + continue + + # Distinct results as non-encoded strings. +- result_strings = map(self.results_json.expectation_for_type, +- result_types) ++ result_strings = list(map(self.results_json.expectation_for_type, ++ result_types)) + + results_by_path[test_path] = sorted(result_strings) + return results_by_path +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/layout_package/json_results_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/layout_package/json_results_generator.py 2025-01-16 02:26:08.564263337 +0800 +@@ -65,7 +65,7 @@ + def convert_times_trie_to_flat_paths(trie, prefix=None): + """Converts the directory structure in the given trie to flat paths, prepending a prefix to each.""" + result = {} +- for name, data in trie.iteritems(): ++ for name, data in trie.items(): + if prefix: + name = prefix + "/" + name + if isinstance(data, int): +@@ -115,7 +115,7 @@ + """A simple class that represents a single test result.""" + + # Test modifier constants. +- (NONE, FAILS, FLAKY, DISABLED) = range(4) ++ (NONE, FAILS, FLAKY, DISABLED) = list(range(4)) + + def __init__(self, test, failed=False, elapsed_time=0): + self.test_name = test +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_configuration.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_configuration.py 2025-01-16 02:26:08.564263337 +0800 +@@ -43,10 +43,10 @@ + return ['version', 'architecture', 'build_type'] + + def items(self): +- return self.__dict__.items() ++ return list(self.__dict__.items()) + + def keys(self): +- return self.__dict__.keys() ++ return list(self.__dict__.keys()) + + def __str__(self): + return ( +@@ -63,7 +63,7 @@ + + def values(self): + """Returns the configuration values of this instance as a tuple.""" +- return self.__dict__.values() ++ return list(self.__dict__.values()) + + + class SpecifierSorter(object): +@@ -73,7 +73,7 @@ + if not all_test_configurations: + return + for test_configuration in all_test_configurations: +- for category, specifier in test_configuration.items(): ++ for category, specifier in list(test_configuration.items()): + self.add_specifier(category, specifier) + + self.add_macros(macros) +@@ -85,7 +85,7 @@ + if not macros: + return + # Assume well-formed macros. +- for macro, specifier_list in macros.items(): ++ for macro, specifier_list in list(macros.items()): + self.add_specifier( + self.category_for_specifier(specifier_list[0]), macro) + +@@ -100,7 +100,7 @@ + return self._specifier_to_category.get(specifier) + + def sort_specifiers(self, specifiers): +- category_slots = map(lambda x: [], TestConfiguration.category_order()) ++ category_slots = [[] for x in TestConfiguration.category_order()] + for specifier in specifiers: + category_slots[self.specifier_priority(specifier)].append( + specifier) +@@ -123,24 +123,24 @@ + self._collapsing_sets_by_category = {} + matching_sets_by_category = {} + for configuration in all_test_configurations: +- for category, specifier in configuration.items(): ++ for category, specifier in list(configuration.items()): + self._specifier_to_configuration_set.setdefault( + specifier, set()).add(configuration) + self._specifier_sorter.add_specifier(category, specifier) + self._collapsing_sets_by_category.setdefault( + category, set()).add(specifier) + # FIXME: This seems extra-awful. +- for cat2, spec2 in configuration.items(): ++ for cat2, spec2 in list(configuration.items()): + if category == cat2: + continue + matching_sets_by_category.setdefault( + specifier, {}).setdefault(cat2, set()).add(spec2) +- for collapsing_set in self._collapsing_sets_by_category.values(): ++ for collapsing_set in list(self._collapsing_sets_by_category.values()): + self._collapsing_sets_by_size.setdefault( + len(collapsing_set), set()).add(frozenset(collapsing_set)) + +- for specifier, sets_by_category in matching_sets_by_category.items(): +- for category, set_by_category in sets_by_category.items(): ++ for specifier, sets_by_category in list(matching_sets_by_category.items()): ++ for category, set_by_category in list(sets_by_category.items()): + if (len(set_by_category) == 1 and + self._specifier_sorter.category_priority(category) > + self._specifier_sorter.specifier_priority(specifier)): +@@ -177,11 +177,11 @@ + matching_sets.setdefault(category, + set()).update(configurations) + +- return reduce(set.intersection, matching_sets.values()) ++ return reduce(set.intersection, list(matching_sets.values())) + + @classmethod + def collapse_macros(cls, macros_dict, specifiers_list): +- for macro_specifier, macro in macros_dict.items(): ++ for macro_specifier, macro in list(macros_dict.items()): + if len(macro) == 1: + continue + +@@ -209,7 +209,7 @@ + for specifier in specifiers_to_add: + specifiers_list.append(specifier) + +- for macro_specifier, macro in macros_dict.items(): ++ for macro_specifier, macro in list(macros_dict.items()): + collapse_individual_specifier_set(macro_specifier, macro) + + @classmethod +@@ -237,7 +237,7 @@ + for config in test_configuration_set: + values = set(config.values()) + for specifier, junk_specifier_set in \ +- self._junk_specifier_combinations.items(): ++ list(self._junk_specifier_combinations.items()): + if specifier in values: + values -= junk_specifier_set + specifiers_list.append(frozenset(values)) +@@ -256,7 +256,7 @@ + + # 2) Collapse specifier sets with common specifiers: + # (win7, release), (win7, debug) --> (win7, x86) +- for size, collapsing_sets in self._collapsing_sets_by_size.items(): ++ for size, collapsing_sets in list(self._collapsing_sets_by_size.items()): + while try_collapsing(size, collapsing_sets): + pass + +@@ -276,7 +276,7 @@ + + # 3) Abbreviate specifier sets by combining specifiers across categories. + # (win7, release), (win10, release) --> (win7, win10, release) +- while try_abbreviating(self._collapsing_sets_by_size.values()): ++ while try_abbreviating(list(self._collapsing_sets_by_size.values())): + pass + + # 4) Substitute specifier subsets that match macros within each set: +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_configuration_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_configuration_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -53,7 +53,7 @@ + def test_items(self): + config = TestConfiguration('win7', 'x86', 'release') + result_config_dict = {} +- for category, specifier in config.items(): ++ for category, specifier in list(config.items()): + result_config_dict[category] = specifier + self.assertEqual({ + 'version': 'win7', +@@ -64,7 +64,7 @@ + def test_keys(self): + config = TestConfiguration('win7', 'x86', 'release') + result_config_keys = [] +- for category in config.keys(): ++ for category in list(config.keys()): + result_config_keys.append(category) + self.assertEqual( + set(['version', 'architecture', 'build_type']), +@@ -107,14 +107,14 @@ + self.assertEqual( + TestConfiguration('win7', 'x86', 'release'), + TestConfiguration('win7', 'x86', 'release')) +- self.assertNotEquals( ++ self.assertNotEqual( + TestConfiguration('win7', 'x86', 'release'), + TestConfiguration('win7', 'x86', 'debug')) + + def test_values(self): + config = TestConfiguration('win7', 'x86', 'release') + result_config_values = [] +- for value in config.values(): ++ for value in list(config.values()): + result_config_values.append(value) + self.assertEqual( + set(['win7', 'x86', 'release']), set(result_config_values)) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_expectations.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_expectations.py 2025-01-16 02:26:08.564263337 +0800 +@@ -36,6 +36,7 @@ + + from blinkpy.common.memoized import memoized + from blinkpy.web_tests.models import typ_types ++from functools import reduce + + ResultType = typ_types.ResultType + +@@ -106,7 +107,7 @@ + # map file paths to sets of line numbers + self._expectation_file_linenos = defaultdict(set) + +- for path, content in self._expectations_dict.items(): ++ for path, content in list(self._expectations_dict.items()): + test_expectations = typ_types.TestExpectations( + tags=self._system_condition_tags) + ret, errors = test_expectations.parse_tagged_list( +@@ -184,7 +185,7 @@ + args: + path: Absolute path of expectations file.""" + content = self._expectations_dict[path] +- idx = self._expectations_dict.keys().index(path) ++ idx = list(self._expectations_dict.keys()).index(path) + typ_expectations = self._expectations[idx] + lines = [] + +@@ -231,7 +232,7 @@ + lines.append(_NotExpectation('', len(content_lines) + 1)) + + for line in sorted( +- reduce(lambda x,y: x+y, lineno_to_exps.values()), ++ reduce(lambda x,y: x+y, list(lineno_to_exps.values())), + key=lambda e: e.test): + if line.lineno: + raise ValueError( +@@ -281,7 +282,7 @@ + def _os_to_version(self): + os_to_version = {} + for os, os_versions in \ +- self._port.configuration_specifier_macros().items(): ++ list(self._port.configuration_specifier_macros().items()): + for version in os_versions: + os_to_version[version.lower()] = os.lower() + return os_to_version +@@ -332,7 +333,7 @@ + trailing_comments=trailing_comments) + + def get_expectations_from_file(self, path, test_name): +- idx = self._expectations_dict.keys().index(path) ++ idx = list(self._expectations_dict.keys()).index(path) + return copy.deepcopy( + self._expectations[idx].individual_exps.get(test_name) or []) + +@@ -386,7 +387,7 @@ + for test_exp in self._expectations: + tests.extend(test_exp.individual_exps) + tests.extend([ +- dir_name[:-1] for dir_name in test_exp.glob_exps.keys() ++ dir_name[:-1] for dir_name in list(test_exp.glob_exps.keys()) + if self.port.test_isdir(dir_name[:-1]) + ]) + return { +@@ -408,7 +409,7 @@ + if bot_expectations: + raw_expectations = ( + '# results: [ Failure Pass Crash Skip Timeout ]\n') +- for test, results in bot_expectations.items(): ++ for test, results in list(bot_expectations.items()): + raw_expectations += typ_types.Expectation( + test=test, results=results).to_string() + '\n' + self.merge_raw_expectations(raw_expectations) +@@ -422,7 +423,7 @@ + path: Absolute path of file where the Expectation instances + came from. + exps: List of Expectation instances to be deleted.""" +- idx = self._expectations_dict.keys().index(path) ++ idx = list(self._expectations_dict.keys()).index(path) + typ_expectations = self._expectations[idx] + + for exp in exps: +@@ -445,7 +446,7 @@ + exps: List of Expectation instances to be added to the file. + lineno: Line number in expectations file where the expectations will + be added.""" +- idx = self._expectations_dict.keys().index(path) ++ idx = list(self._expectations_dict.keys()).index(path) + typ_expectations = self._expectations[idx] + added_glob = False + +@@ -470,7 +471,7 @@ + + if added_glob: + glob_exps = reduce(lambda x, y: x + y, +- typ_expectations.glob_exps.values()) ++ list(typ_expectations.glob_exps.values())) + glob_exps.sort(key=lambda e: len(e.test), reverse=True) + typ_expectations.glob_exps = OrderedDict() + for exp in glob_exps: +@@ -487,16 +488,16 @@ + def __init__(self, test_expectations): + self._test_expectations = test_expectations + self._configuration_specifiers_dict = {} +- for os, os_versions in (self._test_expectations.port. +- configuration_specifier_macros().items()): ++ for os, os_versions in (list(self._test_expectations.port. ++ configuration_specifier_macros().items())): + self._configuration_specifiers_dict[os.lower()] = (frozenset( + version.lower() for version in os_versions)) + self._os_specifiers = frozenset( +- os for os in self._configuration_specifiers_dict.keys()) ++ os for os in list(self._configuration_specifiers_dict.keys())) + self._version_specifiers = frozenset( + specifier.lower() for specifier in reduce( +- lambda x, y: x | y, self._configuration_specifiers_dict. +- values())) ++ lambda x, y: x | y, list(self._configuration_specifiers_dict. ++ values()))) + self._deleted_lines = set() + self._generic_exp_file_path = \ + self._test_expectations.port.path_to_generic_test_expectations_file() +@@ -527,7 +528,7 @@ + # expectation for each version that is not in the versions_to_remove list + system_specifiers = set(self._version_specifiers - + versions_to_remove) +- for os, os_versions in self._configuration_specifiers_dict.items(): ++ for os, os_versions in list(self._configuration_specifiers_dict.items()): + # If all the versions of an OS are in the system specifiers set, then + # replace all those specifiers with the OS specifier. + if os_versions.issubset(system_specifiers): +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_expectations_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_expectations_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -37,6 +37,7 @@ + from blinkpy.web_tests.models.test_expectations import ( + TestExpectations, SystemConfigurationRemover, ParseError) + from blinkpy.web_tests.models.typ_types import ResultType, Expectation ++from functools import reduce + + + class Base(unittest.TestCase): +@@ -464,7 +465,7 @@ + self.set_up_using_raw_expectations(raw_expectations) + all_versions = reduce( + lambda x, y: x + y, +- self._port.configuration_specifier_macros_dict.values()) ++ list(self._port.configuration_specifier_macros_dict.values())) + self._system_config_remover.remove_os_versions( + 'failures/expected/text.html', all_versions) + self._system_config_remover.update_expectations() +@@ -484,7 +485,7 @@ + self.set_up_using_raw_expectations(raw_expectations) + all_versions = reduce( + lambda x, y: x + y, +- self._port.configuration_specifier_macros_dict.values()) ++ list(self._port.configuration_specifier_macros_dict.values())) + self._system_config_remover.remove_os_versions( + 'failures/expected/text.html', all_versions) + self._system_config_remover.update_expectations() +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_failures.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_failures.py 2025-01-16 02:26:08.564263337 +0800 +@@ -26,7 +26,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-import cPickle ++import pickle + + from blinkpy.web_tests.controllers import repaint_overlay + from blinkpy.web_tests.models.typ_types import ResultType +@@ -155,7 +155,7 @@ + @staticmethod + def loads(s): + """Creates a AbstractTestResultType object from the specified string.""" +- return cPickle.loads(s) ++ return pickle.loads(s) + + def message(self): + """Returns a string describing the failure in more detail.""" +@@ -172,7 +172,7 @@ + + def dumps(self): + """Returns the string/JSON representation of a AbstractTestResultType.""" +- return cPickle.dumps(self) ++ return pickle.dumps(self) + + def driver_needs_restart(self): + """Returns True if we should kill the driver before the next test.""" +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_results.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_results.py 2025-01-16 02:26:08.564263337 +0800 +@@ -26,7 +26,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-import cPickle ++import pickle + + from blinkpy.web_tests.models import test_failures, test_expectations + from blinkpy.web_tests.models.typ_types import ResultType, Artifacts +@@ -66,7 +66,7 @@ + + @staticmethod + def loads(string): +- return cPickle.loads(string) ++ return pickle.loads(string) + + def __init__(self, + test_name, +@@ -136,4 +136,4 @@ + return not (self == other) + + def dumps(self): +- return cPickle.dumps(self) ++ return pickle.dumps(self) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_run_results.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_run_results.py 2025-01-16 02:26:08.564263337 +0800 +@@ -78,7 +78,7 @@ + + self.tests_by_expectation = {} + for expected_result in \ +- test_expectations.EXPECTATION_DESCRIPTIONS.keys(): ++ list(test_expectations.EXPECTATION_DESCRIPTIONS.keys()): + self.tests_by_expectation[expected_result] = set() + + self.slow_tests = set() +@@ -199,7 +199,7 @@ + merged_results_by_name = collections.defaultdict(list) + for test_run_results in [initial_results] + all_retry_results: + # all_results does not include SKIP, so we need results_by_name. +- for test_name, result in test_run_results.results_by_name.iteritems(): ++ for test_name, result in test_run_results.results_by_name.items(): + if result.type == ResultType.Skip: + is_unexpected = test_name in test_run_results.unexpected_results_by_name + merged_results_by_name[test_name].append((result, +@@ -213,7 +213,7 @@ + + # Finally, compute the tests dict. + tests = {} +- for test_name, merged_results in merged_results_by_name.iteritems(): ++ for test_name, merged_results in merged_results_by_name.items(): + initial_result = merged_results[0][0] + + if only_include_failing and initial_result.type == ResultType.Skip: +@@ -341,7 +341,7 @@ + + for test_result, _ in merged_results: + for artifact_name, artifacts in \ +- test_result.artifacts.artifacts.items(): ++ list(test_result.artifacts.artifacts.items()): + artifact_dict = test_dict.setdefault('artifacts', {}) + artifact_dict.setdefault(artifact_name, []).extend(artifacts) + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_run_results_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/models/test_run_results_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -281,7 +281,7 @@ + def test_num_failures_by_type(self): + summary = summarized_results( + self.port, expected=False, passing=False, flaky=False) +- self.assertEquals(summary['num_failures_by_type'], { ++ self.assertEqual(summary['num_failures_by_type'], { + 'CRASH': 1, + 'PASS': 1, + 'SKIP': 0, +@@ -291,7 +291,7 @@ + + summary = summarized_results( + self.port, expected=True, passing=False, flaky=False) +- self.assertEquals(summary['num_failures_by_type'], { ++ self.assertEqual(summary['num_failures_by_type'], { + 'CRASH': 1, + 'PASS': 1, + 'SKIP': 0, +@@ -301,7 +301,7 @@ + + summary = summarized_results( + self.port, expected=False, passing=True, flaky=False) +- self.assertEquals(summary['num_failures_by_type'], { ++ self.assertEqual(summary['num_failures_by_type'], { + 'CRASH': 0, + 'PASS': 5, + 'SKIP': 1, +@@ -313,13 +313,13 @@ + self.port._options.builder_name = 'dummy builder' + summary = summarized_results( + self.port, expected=False, passing=False, flaky=False) +- self.assertNotEquals(summary['chromium_revision'], '') ++ self.assertNotEqual(summary['chromium_revision'], '') + + def test_bug_entry(self): + self.port._options.builder_name = 'dummy builder' + summary = summarized_results( + self.port, expected=False, passing=True, flaky=False) +- self.assertEquals( ++ self.assertEqual( + summary['tests']['passes']['skipped']['skip.html']['bugs'], + ['crbug.com/123']) + +@@ -331,14 +331,14 @@ + passing=True, + flaky=False, + extra_skipped_tests=['passes/text.html']) +- self.assertEquals(summary['tests']['passes']['text.html']['expected'], ++ self.assertEqual(summary['tests']['passes']['text.html']['expected'], + 'SKIP PASS') + + def test_summarized_results_wontfix(self): + self.port._options.builder_name = 'dummy builder' + summary = summarized_results( + self.port, expected=False, passing=False, flaky=False) +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['keyboard.html'] + ['expected'], 'SKIP CRASH') + self.assertTrue( +@@ -378,7 +378,7 @@ + self.port._options.builder_name = 'dummy builder' + summary = summarized_results( + self.port, expected=False, passing=True, flaky=False) +- self.assertEquals( ++ self.assertEqual( + summary['tests']['passes']['skipped']['skip.html']['expected'], + 'SKIP') + +@@ -398,10 +398,10 @@ + def test_rounded_run_times(self): + summary = summarized_results( + self.port, expected=False, passing=False, flaky=False) +- self.assertEquals(summary['tests']['passes']['text.html']['time'], 1) ++ self.assertEqual(summary['tests']['passes']['text.html']['time'], 1) + self.assertTrue('time' not in summary['tests']['failures']['expected'] + ['audio.html']) +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['timeout.html']['time'], + 0.1) + self.assertTrue('time' not in summary['tests']['failures']['expected'] +@@ -433,70 +433,70 @@ + self.port, expectations, initial_results, all_retry_results) + self.assertIn('is_unexpected', + summary['tests']['failures']['expected']['text.html']) +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['text.html']['expected'], + 'FAIL') +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['text.html']['actual'], + 'TIMEOUT FAIL PASS PASS') +- self.assertEquals(summary['num_passes'], 1) +- self.assertEquals(summary['num_regressions'], 0) +- self.assertEquals(summary['num_flaky'], 0) ++ self.assertEqual(summary['num_passes'], 1) ++ self.assertEqual(summary['num_regressions'], 0) ++ self.assertEqual(summary['num_flaky'], 0) + + def test_summarized_results_flaky(self): + summary = summarized_results( + self.port, expected=False, passing=False, flaky=True) + +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['crash.html']['expected'], + 'CRASH') +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['crash.html']['actual'], + 'TIMEOUT FAIL CRASH FAIL') + + self.assertTrue( + 'is_unexpected' not in summary['tests']['passes']['text.html']) +- self.assertEquals(summary['tests']['passes']['text.html']['expected'], ++ self.assertEqual(summary['tests']['passes']['text.html']['expected'], + 'PASS') +- self.assertEquals(summary['tests']['passes']['text.html']['actual'], ++ self.assertEqual(summary['tests']['passes']['text.html']['actual'], + 'TIMEOUT PASS PASS PASS') + + self.assertTrue(summary['tests']['failures']['expected'] + ['timeout.html']['is_unexpected']) +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['timeout.html'] + ['expected'], 'TIMEOUT') +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['timeout.html']['actual'], + 'FAIL FAIL FAIL FAIL') + + self.assertTrue('is_unexpected' not in summary['tests']['failures'] + ['expected']['leak.html']) +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['leak.html']['expected'], + 'FAIL') +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['leak.html']['actual'], + 'TIMEOUT FAIL FAIL FAIL') + + self.assertTrue('is_unexpected' not in summary['tests']['failures'] + ['expected']['audio.html']) +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['audio.html']['expected'], + 'FAIL') +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['audio.html']['actual'], + 'CRASH FAIL FAIL FAIL') + +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['text.html']['expected'], + 'FAIL') + self.assertTrue('is_unexpected' not in summary['tests']['failures'] + ['expected']['text.html']) + +- self.assertEquals(summary['num_flaky'], 6) +- self.assertEquals(summary['num_passes'], 1) # keyboard.html +- self.assertEquals(summary['num_regressions'], 0) ++ self.assertEqual(summary['num_flaky'], 6) ++ self.assertEqual(summary['num_passes'], 1) # keyboard.html ++ self.assertEqual(summary['num_regressions'], 0) + + def test_summarized_results_flaky_pass_after_first_retry(self): + test_name = 'passes/text.html' +@@ -520,13 +520,13 @@ + self.port, expectations, initial_results, all_retry_results) + self.assertTrue( + 'is_unexpected' not in summary['tests']['passes']['text.html']) +- self.assertEquals(summary['tests']['passes']['text.html']['expected'], ++ self.assertEqual(summary['tests']['passes']['text.html']['expected'], + 'PASS') +- self.assertEquals(summary['tests']['passes']['text.html']['actual'], ++ self.assertEqual(summary['tests']['passes']['text.html']['actual'], + 'CRASH TIMEOUT PASS PASS') +- self.assertEquals(summary['num_flaky'], 1) +- self.assertEquals(summary['num_passes'], 0) +- self.assertEquals(summary['num_regressions'], 0) ++ self.assertEqual(summary['num_flaky'], 1) ++ self.assertEqual(summary['num_passes'], 0) ++ self.assertEqual(summary['num_regressions'], 0) + + def test_summarized_results_with_iterations(self): + test_name = 'passes/text.html' +@@ -549,13 +549,13 @@ + + summary = test_run_results.summarize_results( + self.port, expectations, initial_results, all_retry_results) +- self.assertEquals(summary['tests']['passes']['text.html']['expected'], ++ self.assertEqual(summary['tests']['passes']['text.html']['expected'], + 'PASS') +- self.assertEquals(summary['tests']['passes']['text.html']['actual'], ++ self.assertEqual(summary['tests']['passes']['text.html']['actual'], + 'CRASH FAIL TIMEOUT FAIL FAIL') +- self.assertEquals(summary['num_flaky'], 0) +- self.assertEquals(summary['num_passes'], 0) +- self.assertEquals(summary['num_regressions'], 1) ++ self.assertEqual(summary['num_flaky'], 0) ++ self.assertEqual(summary['num_passes'], 0) ++ self.assertEqual(summary['num_regressions'], 1) + + def test_summarized_results_regression(self): + summary = summarized_results( +@@ -563,48 +563,48 @@ + + self.assertTrue(summary['tests']['failures']['expected'] + ['timeout.html']['is_unexpected']) +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['timeout.html'] + ['expected'], 'TIMEOUT') +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['timeout.html']['actual'], + 'FAIL FAIL CRASH FAIL') + + self.assertTrue( + summary['tests']['passes']['text.html']['is_unexpected']) +- self.assertEquals(summary['tests']['passes']['text.html']['expected'], ++ self.assertEqual(summary['tests']['passes']['text.html']['expected'], + 'PASS') +- self.assertEquals(summary['tests']['passes']['text.html']['actual'], ++ self.assertEqual(summary['tests']['passes']['text.html']['actual'], + 'TIMEOUT TIMEOUT TIMEOUT TIMEOUT') + + self.assertTrue(summary['tests']['failures']['expected']['crash.html'] + ['is_unexpected']) +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['crash.html']['expected'], + 'CRASH') +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['crash.html']['actual'], + 'TIMEOUT TIMEOUT TIMEOUT TIMEOUT') + + self.assertTrue(summary['tests']['failures']['expected']['leak.html'] + ['is_unexpected']) +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['leak.html']['expected'], + 'FAIL') +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['leak.html']['actual'], + 'TIMEOUT TIMEOUT TIMEOUT TIMEOUT') + +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['audio.html']['expected'], + 'FAIL') +- self.assertEquals( ++ self.assertEqual( + summary['tests']['failures']['expected']['audio.html']['actual'], + 'CRASH FAIL FAIL FAIL') + +- self.assertEquals(summary['num_regressions'], 6) +- self.assertEquals(summary['num_passes'], 1) # keyboard.html +- self.assertEquals(summary['num_flaky'], 0) ++ self.assertEqual(summary['num_regressions'], 6) ++ self.assertEqual(summary['num_passes'], 1) # keyboard.html ++ self.assertEqual(summary['num_flaky'], 0) + + def test_results_contains_path_delimiter(self): + summary = summarized_results( +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/android_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/android_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -117,8 +117,8 @@ + 'adb_devices': ['123456789ABCDEF9'] + })) + +- self.assertEquals(6, port_default.default_child_processes()) +- self.assertEquals(1, port_fixed_device.default_child_processes()) ++ self.assertEqual(6, port_default.default_child_processes()) ++ self.assertEqual(1, port_fixed_device.default_child_processes()) + + def test_no_bot_expectations_searched(self): + # We don't support bot expectations at the moment +@@ -189,7 +189,7 @@ + + # The cmd_line() method in the Android port is used for starting a shell, not the test runner. + def test_cmd_line(self): +- self.assertEquals(['adb', '-s', '123456789ABCDEF0', 'shell'], ++ self.assertEqual(['adb', '-s', '123456789ABCDEF0', 'shell'], + self._driver.cmd_line([])) + + # Test that the Chromium Android port can interpret Android's shell output. +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/base.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/base.py 2025-01-16 02:26:08.564263337 +0800 +@@ -309,7 +309,7 @@ + if name in configs: + raise ValueError('{} contains duplicated name {}.'.format( + config_file, name)) +- if args in configs.itervalues(): ++ if args in iter(configs.values()): + raise ValueError( + '{}: name "{}" has the same args as another entry.'.format( + config_file, name)) +@@ -440,8 +440,8 @@ + + The directories are searched in order. + """ +- return map(self._absolute_baseline_path, +- self.FALLBACK_PATHS[self.version()]) ++ return list(map(self._absolute_baseline_path, ++ self.FALLBACK_PATHS[self.version()])) + + @memoized + def _compare_baseline(self): +@@ -1635,7 +1635,7 @@ + the --additional-expectations flag is passed; those aren't included + here. + """ +- return filter(None, [ ++ return [_f for _f in [ + self.path_to_generic_test_expectations_file(), + self.path_to_webdriver_expectations_file(), + self._filesystem.join(self.web_tests_dir(), 'NeverFixTests'), +@@ -1643,7 +1643,7 @@ + 'StaleTestExpectations'), + self._filesystem.join(self.web_tests_dir(), 'SlowTests'), + self._flag_specific_expectations_path() +- ]) ++ ] if _f] + + def extra_expectations_files(self): + """Returns a list of paths to test expectations not loaded by default. +@@ -1795,15 +1795,15 @@ + + # We require stdout and stderr to be bytestrings, not character strings. + if stdout: +- assert isinstance(stdout, basestring) ++ assert isinstance(stdout, str) + stdout_lines = stdout.decode('utf8', 'replace').splitlines() + else: +- stdout_lines = [u''] ++ stdout_lines = [''] + if stderr: +- assert isinstance(stderr, basestring) ++ assert isinstance(stderr, str) + stderr_lines = stderr.decode('utf8', 'replace').splitlines() + else: +- stderr_lines = [u''] ++ stderr_lines = [''] + + return (stderr, 'crash log for %s (pid %s):\n%s\n%s\n' % + (name_str, pid_str, '\n'.join( +@@ -1875,8 +1875,7 @@ + # maps then this could be more efficient. + if suite.bases: + tests.extend( +- map(lambda x: suite.full_prefix + x, +- self.real_tests(suite.bases))) ++ [suite.full_prefix + x for x in self.real_tests(suite.bases)]) + + if suite_paths: + tests.extend( +@@ -1920,7 +1919,7 @@ + + tests = [] + tests.extend( +- map(lambda x: suite.full_prefix + x, self.real_tests(bases))) ++ [suite.full_prefix + x for x in self.real_tests(bases)]) + + wpt_bases = [] + for base in bases: +@@ -1981,7 +1980,7 @@ + # This walks through the set of paths where we should look for tests. + # For each path, a map can be provided that we replace 'path' with in + # the result. +- for filter_path, virtual_prefix in itertools.izip_longest( ++ for filter_path, virtual_prefix in itertools.zip_longest( + filter_paths, virtual_prefixes): + # This is to make sure "external[\\/]?" can also match to + # external/wpt. +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/base_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/base_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -61,24 +61,24 @@ + + def test_validate_wpt_dirs(self): + # Keys should not have trailing slashes. +- for wpt_path in Port.WPT_DIRS.keys(): ++ for wpt_path in list(Port.WPT_DIRS.keys()): + self.assertFalse(wpt_path.endswith('/')) + # Values should not be empty (except the last one). +- for url_prefix in Port.WPT_DIRS.values()[:-1]: ++ for url_prefix in list(Port.WPT_DIRS.values())[:-1]: + self.assertNotEqual(url_prefix, '/') +- self.assertEqual(Port.WPT_DIRS.values()[-1], '/') ++ self.assertEqual(list(Port.WPT_DIRS.values())[-1], '/') + + def test_validate_wpt_regex(self): +- self.assertEquals( ++ self.assertEqual( + Port.WPT_REGEX.match('external/wpt/foo/bar.html').groups(), + ('external/wpt', 'foo/bar.html')) +- self.assertEquals( ++ self.assertEqual( + Port.WPT_REGEX.match('virtual/test/external/wpt/foo/bar.html'). + groups(), ('external/wpt', 'foo/bar.html')) +- self.assertEquals( ++ self.assertEqual( + Port.WPT_REGEX.match('wpt_internal/foo/bar.html').groups(), + ('wpt_internal', 'foo/bar.html')) +- self.assertEquals( ++ self.assertEqual( + Port.WPT_REGEX.match('virtual/test/wpt_internal/foo/bar.html'). + groups(), ('wpt_internal', 'foo/bar.html')) + +@@ -224,7 +224,7 @@ + self.assertEqual( + port.expected_filename(test_file, '.txt', return_default=False), + MOCK_WEB_TESTS + 'platform/foo/fast/test-expected.txt') +- self.assertEquals( ++ self.assertEqual( + port.fallback_expected_filename(test_file, '.txt'), + MOCK_WEB_TESTS + 'fast/test-expected.txt') + port.host.filesystem.remove(MOCK_WEB_TESTS + 'fast/test-expected.txt') +@@ -461,7 +461,7 @@ + MOCK_WEB_TESTS + 'platform/nonexistant/TestExpectations'] + port.host.filesystem.write_text_file( + MOCK_WEB_TESTS + 'platform/exists/TestExpectations', '') +- self.assertEqual('\n'.join(port.expectations_dict().keys()), ++ self.assertEqual('\n'.join(list(port.expectations_dict().keys())), + MOCK_WEB_TESTS + 'platform/exists/TestExpectations') + + def _make_port_for_test_additional_expectations(self, options_dict={}): +@@ -479,13 +479,13 @@ + + def test_additional_expectations_empty(self): + port = self._make_port_for_test_additional_expectations() +- self.assertEqual(port.expectations_dict().values(), []) ++ self.assertEqual(list(port.expectations_dict().values()), []) + + def test_additional_expectations_1(self): + port = self._make_port_for_test_additional_expectations({ + 'additional_expectations': ['/tmp/additional-expectations-1.txt'] + }) +- self.assertEqual(port.expectations_dict().values(), ['content1\n']) ++ self.assertEqual(list(port.expectations_dict().values()), ['content1\n']) + + def test_additional_expectations_2(self): + port = self._make_port_for_test_additional_expectations({ +@@ -494,7 +494,7 @@ + '/tmp/additional-expectations-2.txt' + ] + }) +- self.assertEqual(port.expectations_dict().values(), ++ self.assertEqual(list(port.expectations_dict().values()), + ['content1\n', 'content2\n']) + + def test_additional_expectations_additional_flag(self): +@@ -505,7 +505,7 @@ + ], + 'additional_driver_flag': ['--special-flag'] + }) +- self.assertEqual(port.expectations_dict().values(), ++ self.assertEqual(list(port.expectations_dict().values()), + ['content3', 'content1\n', 'content2\n']) + + def test_flag_specific_expectations(self): +@@ -517,7 +517,7 @@ + port.host.filesystem.write_text_file( + MOCK_WEB_TESTS + 'FlagExpectations/README.txt', 'cc') + +- self.assertEqual(port.expectations_dict().values(), []) ++ self.assertEqual(list(port.expectations_dict().values()), []) + # all_expectations_dict() is an OrderedDict, but its order depends on + # file system walking order. + self.assertEqual( +@@ -1413,21 +1413,21 @@ + + tests = port.tests( + ['virtual/virtual_passes/passes/test-virtual-passes.html']) +- self.assertEquals( ++ self.assertEqual( + ['virtual/virtual_passes/passes/test-virtual-passes.html'], tests) + + tests = port.tests(['virtual/virtual_empty_bases']) +- self.assertEquals([ ++ self.assertEqual([ + 'virtual/virtual_empty_bases/physical1.html', + 'virtual/virtual_empty_bases/dir/physical2.html' + ], tests) + + tests = port.tests(['virtual/virtual_empty_bases/dir']) +- self.assertEquals(['virtual/virtual_empty_bases/dir/physical2.html'], ++ self.assertEqual(['virtual/virtual_empty_bases/dir/physical2.html'], + tests) + + tests = port.tests(['virtual/virtual_empty_bases/dir/physical2.html']) +- self.assertEquals(['virtual/virtual_empty_bases/dir/physical2.html'], ++ self.assertEqual(['virtual/virtual_empty_bases/dir/physical2.html'], + tests) + + def test_build_path(self): +@@ -1732,7 +1732,7 @@ + all_systems.append(system[0]) + all_systems.sort() + configuration_specifier_macros = [] +- for macros in Port.CONFIGURATION_SPECIFIER_MACROS.values(): ++ for macros in list(Port.CONFIGURATION_SPECIFIER_MACROS.values()): + configuration_specifier_macros += macros + configuration_specifier_macros.sort() + self.assertListEqual(all_systems, configuration_specifier_macros) +@@ -1740,7 +1740,7 @@ + def test_configuration_specifier_macros(self): + # CONFIGURATION_SPECIFIER_MACROS should contain all SUPPORTED_VERSIONS + # of each port. Must use real Port classes in this test. +- for port_name, versions in Port.CONFIGURATION_SPECIFIER_MACROS.items(): ++ for port_name, versions in list(Port.CONFIGURATION_SPECIFIER_MACROS.items()): + port_class, _ = PortFactory.get_port_class(port_name) + self.assertIsNotNone(port_class, port_name) + self.assertListEqual(versions, list(port_class.SUPPORTED_VERSIONS)) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/driver.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/driver.py 2025-01-16 02:26:08.564263337 +0800 +@@ -346,7 +346,7 @@ + self._port.abspath_for_test(test_name)) + + if using_wptserve: +- for wpt_path, url_prefix in self.WPT_DIRS.items(): ++ for wpt_path, url_prefix in list(self.WPT_DIRS.items()): + # The keys of WPT_DIRS do not have trailing slashes. + wpt_path += '/' + if test_name.startswith(wpt_path): +@@ -400,7 +400,7 @@ + for prefix in self._get_uri_prefixes(*self.WPT_HOST_AND_PORTS): + if uri.startswith(prefix): + url_path = '/' + uri[len(prefix):] +- for wpt_path, url_prefix in self.WPT_DIRS.items(): ++ for wpt_path, url_prefix in list(self.WPT_DIRS.items()): + if url_path.startswith(url_prefix): + return wpt_path + '/' + url_path[len(url_prefix):] + raise NotImplementedError('unknown url type: %s' % uri) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/driver_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/driver_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -332,7 +332,7 @@ + def _assert_coalesced_switches(self, input_switches, + expected_coalesced_switches): + output_switches = coalesce_repeated_switches(input_switches) +- self.assertEquals(output_switches, expected_coalesced_switches) ++ self.assertEqual(output_switches, expected_coalesced_switches) + + def test_no_dupes(self): + self._assert_coalesced_switches(['--a', '--b', '--c'], +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/factory_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/factory_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -105,7 +105,7 @@ + host = MockHost() + finder = PathFinder(host.filesystem) + files = files or {} +- for path, contents in files.items(): ++ for path, contents in list(files.items()): + host.filesystem.write_text_file( + finder.path_from_chromium_base(path), contents) + options = optparse.Values({ +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/fuchsia.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/fuchsia.py 2025-01-16 02:26:08.564263337 +0800 +@@ -374,7 +374,7 @@ + listen_socket.listen(1) + stdin_port = listen_socket.getsockname()[1] + +- command = ['%s=%s' % (k, v) for k, v in self._env.items()] + \ ++ command = ['%s=%s' % (k, v) for k, v in list(self._env.items())] + \ + self._cmd + \ + ['--no-sandbox', '--stdin-redirect=%s:%s' % + (qemu_target.HOST_IP_ADDRESS, stdin_port)] +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/port_testcase.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/port_testcase.py 2025-01-16 02:26:08.564263337 +0800 +@@ -268,9 +268,9 @@ + 'foo', 1234, 'foo\xa6bar', 'foo\xa6bar', newer_than=None) + self.assertEqual(stderr, 'foo\xa6bar') + self.assertEqual( +- details, u'crash log for foo (pid 1234):\n' +- u'STDOUT: foo\ufffdbar\n' +- u'STDERR: foo\ufffdbar\n') ++ details, 'crash log for foo (pid 1234):\n' ++ 'STDOUT: foo\ufffdbar\n' ++ 'STDERR: foo\ufffdbar\n') + self.assertIsNone(crash_site) + + def test_get_crash_log_newer_than(self): +@@ -279,9 +279,9 @@ + 'foo', 1234, 'foo\xa6bar', 'foo\xa6bar', newer_than=1.0) + self.assertEqual(stderr, 'foo\xa6bar') + self.assertEqual( +- details, u'crash log for foo (pid 1234):\n' +- u'STDOUT: foo\ufffdbar\n' +- u'STDERR: foo\ufffdbar\n') ++ details, 'crash log for foo (pid 1234):\n' ++ 'STDOUT: foo\ufffdbar\n' ++ 'STDERR: foo\ufffdbar\n') + self.assertIsNone(crash_site) + + def test_get_crash_log_crash_site(self): +@@ -317,7 +317,7 @@ + port.host.filesystem.write_text_file(path, '') + ordered_dict = port.expectations_dict() + self.assertEqual(port.path_to_generic_test_expectations_file(), +- ordered_dict.keys()[0]) ++ list(ordered_dict.keys())[0]) + + options = optparse.Values( + dict(additional_expectations=['/tmp/foo', '/tmp/bar'])) +@@ -327,9 +327,9 @@ + port.host.filesystem.write_text_file('/tmp/foo', 'foo') + port.host.filesystem.write_text_file('/tmp/bar', 'bar') + ordered_dict = port.expectations_dict() +- self.assertEqual(ordered_dict.keys()[-2:], ++ self.assertEqual(list(ordered_dict.keys())[-2:], + options.additional_expectations) +- self.assertEqual(ordered_dict.values()[-2:], ['foo', 'bar']) ++ self.assertEqual(list(ordered_dict.values())[-2:], ['foo', 'bar']) + + def test_path_to_apache_config_file(self): + # Specific behavior may vary by port, so unit test sub-classes may override this. +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/server_process.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/server_process.py 2025-01-16 02:26:08.564263337 +0800 +@@ -134,7 +134,7 @@ + env_str = '' + if self._env: + env_str += '\n'.join('%s=%s' % (k, v) +- for k, v in self._env.items()) + '\n' ++ for k, v in list(self._env.items())) + '\n' + _log.info('CMD: \n%s%s\n', env_str, _quote_cmd(self._cmd)) + proc = self._host.executive.popen( + self._cmd, +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/test.py 2025-01-16 02:26:08.564263337 +0800 +@@ -83,7 +83,7 @@ + + def add(self, name, **kwargs): + test = TestInstance(name) +- for key, value in kwargs.items(): ++ for key, value in list(kwargs.items()): + test.__dict__[key] = value + self.tests[name] = test + +@@ -123,7 +123,7 @@ + reference_name, actual_checksum='diff', actual_image='DIFF') + + def keys(self): +- return self.tests.keys() ++ return list(self.tests.keys()) + + def __contains__(self, item): + return item in self.tests +@@ -506,7 +506,7 @@ + + # Add each test and the expected output, if any. + test_list = unit_test_list() +- for test in test_list.tests.values(): ++ for test in list(test_list.tests.values()): + add_file(test, test.name[test.name.rfind('.'):], '') + if test.expected_audio: + add_file(test, '-expected.wav', test.expected_audio) +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/win.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/port/win.py 2025-01-16 02:26:08.564263337 +0800 +@@ -34,7 +34,7 @@ + # The _winreg library is only available on Windows. + # https://docs.python.org/2/library/_winreg.html + try: +- import _winreg # pylint: disable=import-error ++ import winreg # pylint: disable=import-error + except ImportError: + _winreg = None # pylint: disable=invalid-name + +@@ -114,9 +114,9 @@ + # Note that we HKCR is a union of HKLM and HKCR (with the latter + # overriding the former), so reading from HKCR ensures that we get + # the value if it is set in either place. See als comments below. +- hkey = _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, sub_key) +- args = _winreg.QueryValue(hkey, '').split() +- _winreg.CloseKey(hkey) ++ hkey = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, sub_key) ++ args = winreg.QueryValue(hkey, '').split() ++ winreg.CloseKey(hkey) + + # In order to keep multiple checkouts from stepping on each other, we simply check that an + # existing entry points to a valid path and has the right command line. +@@ -132,11 +132,11 @@ + # to the registry, and that will get reflected in HKCR when it is read, above. + cmdline = self._path_from_chromium_base('third_party', 'perl', 'perl', + 'bin', 'perl.exe') + ' -wT' +- hkey = _winreg.CreateKeyEx(_winreg.HKEY_CURRENT_USER, ++ hkey = winreg.CreateKeyEx(winreg.HKEY_CURRENT_USER, + 'Software\\Classes\\' + sub_key, 0, +- _winreg.KEY_WRITE) +- _winreg.SetValue(hkey, '', _winreg.REG_SZ, cmdline) +- _winreg.CloseKey(hkey) ++ winreg.KEY_WRITE) ++ winreg.SetValue(hkey, '', winreg.REG_SZ, cmdline) ++ winreg.CloseKey(hkey) + return True + + def setup_environ_for_server(self): +@@ -150,7 +150,7 @@ + self.host.environ['TMP'] = self.host.environ['TEMP'] + env = super(WinPort, self).setup_environ_for_server() + apache_envvars = ['SYSTEMDRIVE', 'SYSTEMROOT', 'TEMP', 'TMP'] +- for key, value in self.host.environ.copy().items(): ++ for key, value in list(self.host.environ.copy().items()): + if key not in env and key in apache_envvars: + env[key] = value + return env +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/servers/apache_http.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/servers/apache_http.py 2025-01-16 02:26:08.564263337 +0800 +@@ -173,7 +173,7 @@ + + if additional_dirs: + self._start_cmd = start_cmd +- for alias, path in additional_dirs.iteritems(): ++ for alias, path in additional_dirs.items(): + start_cmd += [ + '-c', + 'Alias %s "%s"' % (alias, path), +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/servers/cli_wrapper.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/servers/cli_wrapper.py 2025-01-16 02:26:08.564263337 +0800 +@@ -102,7 +102,7 @@ + server = server_constructor(port_obj, options.output_dir, **kwargs) + server.start() + +- print 'Press Ctrl-C or `kill {}` to stop the server'.format(os.getpid()) ++ print('Press Ctrl-C or `kill {}` to stop the server'.format(os.getpid())) + try: + while True: + sleep_fn() +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/views/metered_stream.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/views/metered_stream.py 2025-01-16 02:26:08.564263337 +0800 +@@ -113,7 +113,7 @@ + + # This is the easiest way to make sure a byte stream is printable as ascii + # with all non-ascii characters replaced. +- uni_msg = msg if isinstance(msg, unicode) else msg.decode( ++ uni_msg = msg if isinstance(msg, str) else msg.decode( + 'ascii', errors='replace') + self._stream.write(uni_msg.encode('ascii', errors='replace')) + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/views/metered_stream_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/views/metered_stream_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -26,7 +26,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-import StringIO ++import io + import logging + import re + import unittest +@@ -39,7 +39,7 @@ + isatty = False + + def setUp(self): +- self.stream = StringIO.StringIO() ++ self.stream = io.StringIO() + self.buflist = self.stream.buflist + self.stream.isatty = lambda: self.isatty + +@@ -49,7 +49,7 @@ + self.logger.propagate = False + + # add a dummy time counter for a default behavior. +- self.times = range(10) ++ self.times = list(range(10)) + + self.meter = MeteredStream(self.stream, self.verbose, self.logger, + self.time_fn, 8675) +@@ -65,7 +65,7 @@ + def test_logging_not_included(self): + # This tests that if we don't hand a logger to the MeteredStream, + # nothing is logged. +- logging_stream = StringIO.StringIO() ++ logging_stream = io.StringIO() + handler = logging.StreamHandler(logging_stream) + root_logger = logging.getLogger() + orig_level = root_logger.level +@@ -130,7 +130,7 @@ + + def test_bytestream(self): + self.meter.write('German umlauts: \xe4\xf6\xfc') +- self.meter.write(u'German umlauts: \xe4\xf6\xfc') ++ self.meter.write('German umlauts: \xe4\xf6\xfc') + self.assertEqual(self.buflist, + ['German umlauts: ???', 'German umlauts: ???']) + +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/views/printing.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/views/printing.py 2025-01-16 02:26:08.564263337 +0800 +@@ -185,7 +185,7 @@ + self._print_debug('Thread timing:') + stats = {} + cuml_time = 0 +- for result in run_results.results_by_name.values(): ++ for result in list(run_results.results_by_name.values()): + stats.setdefault(result.worker_name, { + 'num_tests': 0, + 'total_time': 0 +@@ -206,7 +206,7 @@ + if self._options.timing: + parallel_time = sum( + result.total_run_time +- for result in run_results.results_by_name.values()) ++ for result in list(run_results.results_by_name.values())) + + # There is serial overhead in web_test_runner.run() that we can't easily account for when + # really running in parallel, but taking the min() ensures that in the worst case +--- a/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/views/printing_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/blinkpy/web_tests/views/printing_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -28,7 +28,7 @@ + """Unit tests for printing.py.""" + + import optparse +-import StringIO ++import io + import sys + import unittest + +@@ -96,7 +96,7 @@ + host = MockHost() + self._port = host.port_factory.get('test', options) + +- regular_output = StringIO.StringIO() ++ regular_output = io.StringIO() + printer = printing.Printer(host, options, regular_output) + return printer, regular_output + +--- a/src/3rdparty/chromium/third_party/blink/tools/gdb/blink.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/gdb/blink.py 2025-01-16 02:26:08.564263337 +0800 +@@ -34,7 +34,7 @@ + import blink + """ + +-from __future__ import print_function ++ + + import gdb + import re +@@ -338,7 +338,7 @@ + return ('[%d]' % count, element) + + # Python version < 3 compatibility: +- def next(self): ++ def __next__(self): + return self.__next__() + + def __init__(self, val): +--- a/src/3rdparty/chromium/third_party/blink/tools/lldb/lldb_blink.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/blink/tools/lldb/lldb_blink.py 2025-01-16 02:26:08.564263337 +0800 +@@ -127,7 +127,7 @@ + if not valobj.GetValue(): + return 0 + +- for i in xrange(0, 2048): ++ for i in range(0, 2048): + if valobj.GetPointeeData(i, 1).GetUnsignedInt16(error, 0) == 0: + return i + +@@ -140,10 +140,10 @@ + else: + length = int(length) + +- out_string = u"" +- for i in xrange(0, length): ++ out_string = "" ++ for i in range(0, length): + char_value = valobj.GetPointeeData(i, 1).GetUnsignedInt16(error, 0) +- out_string = out_string + unichr(char_value) ++ out_string = out_string + chr(char_value) + + return out_string.encode('utf-8') + +@@ -154,10 +154,10 @@ + else: + length = int(length) + +- out_string = u"" +- for i in xrange(0, length): ++ out_string = "" ++ for i in range(0, length): + char_value = valobj.GetPointeeData(i, 1).GetUnsignedInt8(error, 0) +- out_string = out_string + unichr(char_value) ++ out_string = out_string + chr(char_value) + + return out_string.encode('utf-8') + +@@ -218,7 +218,7 @@ + def to_string(self): + impl = self.stringimpl() + if not impl: +- return u"" ++ return "" + return impl.to_string() + + +--- a/src/3rdparty/chromium/third_party/boringssl/roll_boringssl.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/roll_boringssl.py 2025-01-16 02:26:08.564263337 +0800 +@@ -100,10 +100,10 @@ + return 1 + + if not IsPristine(SRC_PATH): +- print >>sys.stderr, 'Chromium checkout not pristine.' ++ print('Chromium checkout not pristine.', file=sys.stderr) + return 0 + if not IsPristine(BORINGSSL_SRC_PATH): +- print >>sys.stderr, 'BoringSSL checkout not pristine.' ++ print('BoringSSL checkout not pristine.', file=sys.stderr) + return 0 + + if len(sys.argv) > 1: +@@ -114,10 +114,10 @@ + + old_head = RevParse(BORINGSSL_SRC_PATH, 'HEAD') + if old_head == new_head: +- print 'BoringSSL already up to date.' ++ print('BoringSSL already up to date.') + return 0 + +- print 'Rolling BoringSSL from %s to %s...' % (old_head, new_head) ++ print('Rolling BoringSSL from %s to %s...' % (old_head, new_head)) + + # Look for commits with associated Chromium bugs. + crbugs = set() +@@ -199,8 +199,8 @@ + ['git', 'log', '--grep', '^Update-Note:', '-i', + '%s..%s' % (old_head, new_head)], cwd=BORINGSSL_SRC_PATH).strip() + if len(notes) > 0: +- print "\x1b[1mThe following changes contain updating notes\x1b[0m:\n\n" +- print notes ++ print("\x1b[1mThe following changes contain updating notes\x1b[0m:\n\n") ++ print(notes) + + return 0 + +--- a/src/3rdparty/chromium/third_party/boringssl/src/crypto/curve25519/make_curve25519_tables.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/crypto/curve25519/make_curve25519_tables.py 2025-01-16 02:26:08.564263337 +0800 +@@ -14,7 +14,7 @@ + # OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-import StringIO ++import io + import subprocess + + # Base field Z_p +@@ -140,7 +140,7 @@ + bi_precomp.append(to_ge_precomp(P)) + + +- buf = StringIO.StringIO() ++ buf = io.StringIO() + buf.write("""/* Copyright (c) 2020, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any +--- a/src/3rdparty/chromium/third_party/boringssl/src/third_party/googletest/scripts/fuse_gtest_files.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/third_party/googletest/scripts/fuse_gtest_files.py 2025-01-16 02:26:08.564263337 +0800 +@@ -93,8 +93,8 @@ + """ + + if not os.path.isfile(os.path.join(directory, relative_path)): +- print('ERROR: Cannot find %s in directory %s.' % (relative_path, +- directory)) ++ print(('ERROR: Cannot find %s in directory %s.' % (relative_path, ++ directory))) + print('Please either specify a valid project root directory ' + 'or omit it on the command line.') + sys.exit(1) +@@ -122,8 +122,8 @@ + # TODO(wan@google.com): The following user-interaction doesn't + # work with automated processes. We should provide a way for the + # Makefile to force overwriting the files. +- print('%s already exists in directory %s - overwrite it? (y/N) ' % +- (relative_path, output_dir)) ++ print(('%s already exists in directory %s - overwrite it? (y/N) ' % ++ (relative_path, output_dir))) + answer = sys.stdin.readline().strip() + if answer not in ['y', 'Y']: + print('ABORTED.') +--- a/src/3rdparty/chromium/third_party/boringssl/src/third_party/googletest/scripts/gen_gtest_pred_impl.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/third_party/googletest/scripts/gen_gtest_pred_impl.py 2025-01-16 02:26:08.564263337 +0800 +@@ -182,7 +182,7 @@ + def OneTo(n): + """Returns the list [1, 2, 3, ..., n].""" + +- return range(1, n + 1) ++ return list(range(1, n + 1)) + + + def Iter(n, format, sep=''): +@@ -304,12 +304,12 @@ + """Given a file path and a content string + overwrites it with the given content. + """ +- print 'Updating file %s . . .' % path ++ print('Updating file %s . . .' % path) + f = file(path, 'w+') +- print >>f, content, ++ print(content, end=' ', file=f) + f.close() + +- print 'File %s has been updated.' % path ++ print('File %s has been updated.' % path) + + + def GenerateHeader(n): +@@ -717,8 +717,8 @@ + unit test.""" + + if len(sys.argv) != 2: +- print __doc__ +- print 'Author: ' + __author__ ++ print(__doc__) ++ print('Author: ' + __author__) + sys.exit(1) + + n = int(sys.argv[1]) +--- a/src/3rdparty/chromium/third_party/boringssl/src/third_party/googletest/scripts/pump.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/third_party/googletest/scripts/pump.py 2025-01-16 02:26:08.564263337 +0800 +@@ -246,7 +246,7 @@ + if m and not m.start(): + return MakeToken(lines, pos, pos + m.end(), token_type) + else: +- print 'ERROR: %s expected at %s.' % (token_type, pos) ++ print('ERROR: %s expected at %s.' % (token_type, pos)) + sys.exit(1) + + +@@ -273,8 +273,8 @@ + if m: + return pos + m.start() + else: +- print ('ERROR: %s expected on line %s after column %s.' % +- (token_type, pos.line + 1, pos.column)) ++ print(('ERROR: %s expected on line %s after column %s.' % ++ (token_type, pos.line + 1, pos.column))) + sys.exit(1) + + +@@ -453,8 +453,8 @@ + def PopToken(a_list, token_type=None): + token = PopFront(a_list) + if token_type is not None and token.token_type != token_type: +- print 'ERROR: %s expected at %s' % (token_type, token.start) +- print 'ERROR: %s found instead' % (token,) ++ print('ERROR: %s expected at %s' % (token_type, token.start)) ++ print('ERROR: %s found instead' % (token,)) + sys.exit(1) + + return token +@@ -616,16 +616,16 @@ + if identifier == var: + return value + +- print 'ERROR: meta variable %s is undefined.' % (identifier,) ++ print('ERROR: meta variable %s is undefined.' % (identifier,)) + sys.exit(1) + + def EvalExp(self, exp): + try: + result = eval(exp.python_exp) +- except Exception, e: +- print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e) +- print ('ERROR: failed to evaluate meta expression %s at %s' % +- (exp.python_exp, exp.token.start)) ++ except Exception as e: ++ print('ERROR: caught exception %s: %s' % (e.__class__.__name__, e)) ++ print(('ERROR: failed to evaluate meta expression %s at %s' % ++ (exp.python_exp, exp.token.start))) + sys.exit(1) + return result + +@@ -634,7 +634,7 @@ + if identifier == var: + return (lower, upper) + +- print 'ERROR: range %s is undefined.' % (identifier,) ++ print('ERROR: range %s is undefined.' % (identifier,)) + sys.exit(1) + + +@@ -694,8 +694,8 @@ + elif isinstance(node, CodeNode): + RunCode(env.Clone(), node, output) + else: +- print 'BAD' +- print node ++ print('BAD') ++ print(node) + sys.exit(1) + + +@@ -830,7 +830,7 @@ + + def main(argv): + if len(argv) == 1: +- print __doc__ ++ print(__doc__) + sys.exit(1) + + file_path = argv[-1] +@@ -840,7 +840,7 @@ + else: + output_file_path = '-' + if output_file_path == '-': +- print output_str, ++ print(output_str, end=' ') + else: + output_file = file(output_file_path, 'w') + output_file.write('// This file was GENERATED by command:\n') +--- a/src/3rdparty/chromium/third_party/boringssl/src/third_party/googletest/scripts/release_docs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/third_party/googletest/scripts/release_docs.py 2025-01-16 02:26:08.564263337 +0800 +@@ -127,11 +127,11 @@ + def BranchFiles(self): + """Branches the .wiki files needed to be branched.""" + +- print 'Branching %d .wiki files:' % (len(self.files_to_branch),) ++ print('Branching %d .wiki files:' % (len(self.files_to_branch),)) + os.chdir(self.wiki_dir) + for f in self.files_to_branch: + command = 'svn cp %s %s%s' % (f, self.version_prefix, f) +- print command ++ print(command) + os.system(command) + + def UpdateLinksInBranchedFiles(self): +@@ -139,7 +139,7 @@ + for f in self.files_to_branch: + source_file = os.path.join(self.wiki_dir, f) + versioned_file = os.path.join(self.wiki_dir, self.version_prefix + f) +- print 'Updating links in %s.' % (versioned_file,) ++ print('Updating links in %s.' % (versioned_file,)) + text = file(source_file, 'r').read() + new_text = self.search_for_re.sub(self.replace_with, text) + file(versioned_file, 'w').write(new_text) +--- a/src/3rdparty/chromium/third_party/boringssl/src/third_party/googletest/scripts/upload.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/third_party/googletest/scripts/upload.py 2025-01-16 02:26:08.564263337 +0800 +@@ -31,7 +31,7 @@ + # This code is derived from appcfg.py in the App Engine SDK (open source), + # and from ASPN recipe #146306. + +-import cookielib ++import http.cookiejar + import getpass + import logging + import md5 +@@ -42,9 +42,9 @@ + import socket + import subprocess + import sys +-import urllib +-import urllib2 +-import urlparse ++import urllib.request, urllib.parse, urllib.error ++import urllib.request, urllib.error, urllib.parse ++import urllib.parse + + try: + import readline +@@ -79,15 +79,15 @@ + last_email = last_email_file.readline().strip("\n") + last_email_file.close() + prompt += " [%s]" % last_email +- except IOError, e: ++ except IOError as e: + pass +- email = raw_input(prompt + ": ").strip() ++ email = input(prompt + ": ").strip() + if email: + try: + last_email_file = open(last_email_file_name, "w") + last_email_file.write(email) + last_email_file.close() +- except IOError, e: ++ except IOError as e: + pass + else: + email = last_email +@@ -103,20 +103,20 @@ + msg: The string to print. + """ + if verbosity > 0: +- print msg ++ print(msg) + + + def ErrorExit(msg): + """Print an error message to stderr and exit.""" +- print >>sys.stderr, msg ++ print(msg, file=sys.stderr) + sys.exit(1) + + +-class ClientLoginError(urllib2.HTTPError): ++class ClientLoginError(urllib.error.HTTPError): + """Raised to indicate there was an error authenticating with ClientLogin.""" + + def __init__(self, url, code, msg, headers, args): +- urllib2.HTTPError.__init__(self, url, code, msg, headers, None) ++ urllib.error.HTTPError.__init__(self, url, code, msg, headers, None) + self.args = args + self.reason = args["Error"] + +@@ -162,10 +162,10 @@ + def _CreateRequest(self, url, data=None): + """Creates a new urllib request.""" + logging.debug("Creating request for: '%s' with payload:\n%s", url, data) +- req = urllib2.Request(url, data=data) ++ req = urllib.request.Request(url, data=data) + if self.host_override: + req.add_header("Host", self.host_override) +- for key, value in self.extra_headers.iteritems(): ++ for key, value in self.extra_headers.items(): + req.add_header(key, value) + return req + +@@ -189,7 +189,7 @@ + account_type = "HOSTED" + req = self._CreateRequest( + url="https://www.google.com/accounts/ClientLogin", +- data=urllib.urlencode({ ++ data=urllib.parse.urlencode({ + "Email": email, + "Passwd": password, + "service": "ah", +@@ -203,7 +203,7 @@ + response_dict = dict(x.split("=") + for x in response_body.split("\n") if x) + return response_dict["Auth"] +- except urllib2.HTTPError, e: ++ except urllib.error.HTTPError as e: + if e.code == 403: + body = e.read() + response_dict = dict(x.split("=", 1) for x in body.split("\n") if x) +@@ -225,14 +225,14 @@ + continue_location = "http://localhost/" + args = {"continue": continue_location, "auth": auth_token} + req = self._CreateRequest("http://%s/_ah/login?%s" % +- (self.host, urllib.urlencode(args))) ++ (self.host, urllib.parse.urlencode(args))) + try: + response = self.opener.open(req) +- except urllib2.HTTPError, e: ++ except urllib.error.HTTPError as e: + response = e + if (response.code != 302 or + response.info()["location"] != continue_location): +- raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, ++ raise urllib.error.HTTPError(req.get_full_url(), response.code, response.msg, + response.headers, response.fp) + self.authenticated = True + +@@ -255,34 +255,34 @@ + credentials = self.auth_function() + try: + auth_token = self._GetAuthToken(credentials[0], credentials[1]) +- except ClientLoginError, e: ++ except ClientLoginError as e: + if e.reason == "BadAuthentication": +- print >>sys.stderr, "Invalid username or password." ++ print("Invalid username or password.", file=sys.stderr) + continue + if e.reason == "CaptchaRequired": +- print >>sys.stderr, ( ++ print(( + "Please go to\n" + "https://www.google.com/accounts/DisplayUnlockCaptcha\n" +- "and verify you are a human. Then try again.") ++ "and verify you are a human. Then try again."), file=sys.stderr) + break + if e.reason == "NotVerified": +- print >>sys.stderr, "Account not verified." ++ print("Account not verified.", file=sys.stderr) + break + if e.reason == "TermsNotAgreed": +- print >>sys.stderr, "User has not agreed to TOS." ++ print("User has not agreed to TOS.", file=sys.stderr) + break + if e.reason == "AccountDeleted": +- print >>sys.stderr, "The user account has been deleted." ++ print("The user account has been deleted.", file=sys.stderr) + break + if e.reason == "AccountDisabled": +- print >>sys.stderr, "The user account has been disabled." ++ print("The user account has been disabled.", file=sys.stderr) + break + if e.reason == "ServiceDisabled": +- print >>sys.stderr, ("The user's access to the service has been " +- "disabled.") ++ print(("The user's access to the service has been " ++ "disabled."), file=sys.stderr) + break + if e.reason == "ServiceUnavailable": +- print >>sys.stderr, "The service is not available; try again later." ++ print("The service is not available; try again later.", file=sys.stderr) + break + raise + self._GetAuthCookie(auth_token) +@@ -319,7 +319,7 @@ + args = dict(kwargs) + url = "http://%s%s" % (self.host, request_path) + if args: +- url += "?" + urllib.urlencode(args) ++ url += "?" + urllib.parse.urlencode(args) + req = self._CreateRequest(url=url, data=payload) + req.add_header("Content-Type", content_type) + try: +@@ -327,7 +327,7 @@ + response = f.read() + f.close() + return response +- except urllib2.HTTPError, e: ++ except urllib.error.HTTPError as e: + if tries > 3: + raise + elif e.code == 401: +@@ -357,35 +357,35 @@ + Returns: + A urllib2.OpenerDirector object. + """ +- opener = urllib2.OpenerDirector() +- opener.add_handler(urllib2.ProxyHandler()) +- opener.add_handler(urllib2.UnknownHandler()) +- opener.add_handler(urllib2.HTTPHandler()) +- opener.add_handler(urllib2.HTTPDefaultErrorHandler()) +- opener.add_handler(urllib2.HTTPSHandler()) ++ opener = urllib.request.OpenerDirector() ++ opener.add_handler(urllib.request.ProxyHandler()) ++ opener.add_handler(urllib.request.UnknownHandler()) ++ opener.add_handler(urllib.request.HTTPHandler()) ++ opener.add_handler(urllib.request.HTTPDefaultErrorHandler()) ++ opener.add_handler(urllib.request.HTTPSHandler()) + opener.add_handler(urllib2.HTTPErrorProcessor()) + if self.save_cookies: + self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies") +- self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file) ++ self.cookie_jar = http.cookiejar.MozillaCookieJar(self.cookie_file) + if os.path.exists(self.cookie_file): + try: + self.cookie_jar.load() + self.authenticated = True + StatusUpdate("Loaded authentication cookies from %s" % + self.cookie_file) +- except (cookielib.LoadError, IOError): ++ except (http.cookiejar.LoadError, IOError): + # Failed to load cookies - just ignore them. + pass + else: + # Create an empty cookie file with mode 600 +- fd = os.open(self.cookie_file, os.O_CREAT, 0600) ++ fd = os.open(self.cookie_file, os.O_CREAT, 0o600) + os.close(fd) + # Always chmod the cookie file +- os.chmod(self.cookie_file, 0600) ++ os.chmod(self.cookie_file, 0o600) + else: + # Don't save cookies across runs of update.py. +- self.cookie_jar = cookielib.CookieJar() +- opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar)) ++ self.cookie_jar = http.cookiejar.CookieJar() ++ opener.add_handler(urllib.request.HTTPCookieProcessor(self.cookie_jar)) + return opener + + +@@ -560,7 +560,7 @@ + line = p.stdout.readline() + if not line: + break +- print line.strip("\n") ++ print(line.strip("\n")) + output_array.append(line) + output = "".join(output_array) + else: +@@ -568,7 +568,7 @@ + p.wait() + errout = p.stderr.read() + if print_output and errout: +- print >>sys.stderr, errout ++ print(errout, file=sys.stderr) + p.stdout.close() + p.stderr.close() + return output, p.returncode +@@ -614,11 +614,11 @@ + """Show an "are you sure?" prompt if there are unknown files.""" + unknown_files = self.GetUnknownFiles() + if unknown_files: +- print "The following files are not added to version control:" ++ print("The following files are not added to version control:") + for line in unknown_files: +- print line ++ print(line) + prompt = "Are you sure to continue?(y/N) " +- answer = raw_input(prompt).strip() ++ answer = input(prompt).strip() + if answer != "y": + ErrorExit("User aborted") + +@@ -670,13 +670,13 @@ + else: + type = "current" + if len(content) > MAX_UPLOAD_SIZE: +- print ("Not uploading the %s file for %s because it's too large." % +- (type, filename)) ++ print(("Not uploading the %s file for %s because it's too large." % ++ (type, filename))) + file_too_large = True + content = "" + checksum = md5.new(content).hexdigest() + if options.verbose > 0 and not file_too_large: +- print "Uploading %s file for %s" % (type, filename) ++ print("Uploading %s file for %s" % (type, filename)) + url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id) + form_fields = [("filename", filename), + ("status", status), +@@ -698,7 +698,7 @@ + + patches = dict() + [patches.setdefault(v, k) for k, v in patch_list] +- for filename in patches.keys(): ++ for filename in list(patches.keys()): + base_content, new_content, is_binary, status = files[filename] + file_id_str = patches.get(filename) + if file_id_str.find("nobase") != -1: +@@ -755,8 +755,8 @@ + words = line.split() + if len(words) == 2 and words[0] == "URL:": + url = words[1] +- scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) +- username, netloc = urllib.splituser(netloc) ++ scheme, netloc, path, params, query, fragment = urllib.parse.urlparse(url) ++ username, netloc = urllib.parse.splituser(netloc) + if username: + logging.info("Removed username from base URL") + if netloc.endswith("svn.python.org"): +@@ -774,12 +774,12 @@ + logging.info("Guessed CollabNet base = %s", base) + elif netloc.endswith(".googlecode.com"): + path = path + "/" +- base = urlparse.urlunparse(("http", netloc, path, params, ++ base = urllib.parse.urlunparse(("http", netloc, path, params, + query, fragment)) + logging.info("Guessed Google Code base = %s", base) + else: + path = path + "/" +- base = urlparse.urlunparse((scheme, netloc, path, params, ++ base = urllib.parse.urlunparse((scheme, netloc, path, params, + query, fragment)) + logging.info("Guessed base = %s", base) + return base +@@ -1187,8 +1187,8 @@ + rv = [] + for patch in patches: + if len(patch[1]) > MAX_UPLOAD_SIZE: +- print ("Not uploading the patch for " + patch[0] + +- " because the file is too large.") ++ print(("Not uploading the patch for " + patch[0] + ++ " because the file is too large.")) + continue + form_fields = [("filename", patch[0])] + if not options.download_base: +@@ -1196,7 +1196,7 @@ + files = [("data", "data.diff", patch[1])] + ctype, body = EncodeMultipartFormData(form_fields, files) + url = "/%d/upload_patch/%d" % (int(issue), int(patchset)) +- print "Uploading patch for " + patch[0] ++ print("Uploading patch for " + patch[0]) + response_body = rpc_server.Send(url, body, content_type=ctype) + lines = response_body.splitlines() + if not lines or lines[0] != "OK": +@@ -1223,7 +1223,8 @@ + out, returncode = RunShellWithReturnCode(["hg", "root"]) + if returncode == 0: + return MercurialVCS(options, out.strip()) +- except OSError, (errno, message): ++ except OSError as xxx_todo_changeme: ++ (errno, message) = xxx_todo_changeme.args + if errno != 2: # ENOENT -- they don't have hg installed. + raise + +@@ -1239,7 +1240,8 @@ + "--is-inside-work-tree"]) + if returncode == 0: + return GitVCS(options) +- except OSError, (errno, message): ++ except OSError as xxx_todo_changeme1: ++ (errno, message) = xxx_todo_changeme1.args + if errno != 2: # ENOENT -- they don't have git installed. + raise + +@@ -1286,12 +1288,12 @@ + data = vcs.GenerateDiff(args) + files = vcs.GetBaseFiles(data) + if verbosity >= 1: +- print "Upload server:", options.server, "(change with -s/--server)" ++ print("Upload server:", options.server, "(change with -s/--server)") + if options.issue: + prompt = "Message describing this patch set: " + else: + prompt = "New issue subject: " +- message = options.message or raw_input(prompt).strip() ++ message = options.message or input(prompt).strip() + if not message: + ErrorExit("A non-empty message is required") + rpc_server = GetRpcServer(options) +@@ -1324,7 +1326,7 @@ + # Send a hash of all the base file so the server can determine if a copy + # already exists in an earlier patchset. + base_hashes = "" +- for file, info in files.iteritems(): ++ for file, info in files.items(): + if not info[0] is None: + checksum = md5.new(info[0]).hexdigest() + if base_hashes: +@@ -1338,7 +1340,7 @@ + if not options.download_base: + form_fields.append(("content_upload", "1")) + if len(data) > MAX_UPLOAD_SIZE: +- print "Patch is large, so uploading file patches separately." ++ print("Patch is large, so uploading file patches separately.") + uploaded_diff_file = [] + form_fields.append(("separate_patches", "1")) + else: +@@ -1378,7 +1380,7 @@ + try: + RealMain(sys.argv) + except KeyboardInterrupt: +- print ++ print() + StatusUpdate("Interrupted.") + sys.exit(1) + +--- a/src/3rdparty/chromium/third_party/boringssl/src/third_party/googletest/xcode/Scripts/versiongenerate.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/third_party/googletest/xcode/Scripts/versiongenerate.py 2025-01-16 02:26:08.564263337 +0800 +@@ -54,7 +54,7 @@ + + # Read the command line argument (the output directory for Version.h) + if (len(sys.argv) < 3): +- print "Usage: versiongenerate.py input_dir output_dir" ++ print("Usage: versiongenerate.py input_dir output_dir") + sys.exit(1) + else: + input_dir = sys.argv[1] +--- a/src/3rdparty/chromium/third_party/boringssl/src/util/generate-asm-lcov.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/util/generate-asm-lcov.py 2025-01-16 02:26:08.564263337 +0800 +@@ -118,7 +118,7 @@ + """Takes a dictionary |data| of filenames and execution counts and generates + a LCOV coverage output.""" + out = '' +- for filename, counts in data.iteritems(): ++ for filename, counts in data.items(): + out += 'SF:%s\n' % (os.path.abspath(filename)) + for line, count in enumerate(counts): + if count != None: +@@ -128,7 +128,7 @@ + + if __name__ == '__main__': + if len(sys.argv) != 3: +- print '%s ' % (__file__) ++ print('%s ' % (__file__)) + sys.exit() + + cg_folder = sys.argv[1] +@@ -149,4 +149,4 @@ + + annotated = merge(cg_files, srcs) + lcov = generate(annotated) +- print output(lcov) ++ print(output(lcov)) +--- a/src/3rdparty/chromium/third_party/boringssl/src/util/generate_build_files.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/util/generate_build_files.py 2025-01-16 02:26:08.564263337 +0800 +@@ -155,7 +155,7 @@ + Returns: + A copy of |asm| with files filtered according to |want_bcm| + """ +- return [(archinfo, filter(lambda p: ("/crypto/fipsmodule/" in p) == want_bcm, files)) ++ return [(archinfo, [p for p in files if ("/crypto/fipsmodule/" in p) == want_bcm]) + for (archinfo, files) in asm] + + +@@ -816,10 +816,10 @@ + perlasm['extra_args'] + extra_args) + asmfiles.setdefault(key, []).append(output) + +- for (key, non_perl_asm_files) in NON_PERL_FILES.iteritems(): ++ for (key, non_perl_asm_files) in NON_PERL_FILES.items(): + asmfiles.setdefault(key, []).extend(non_perl_asm_files) + +- for files in asmfiles.itervalues(): ++ for files in asmfiles.values(): + files.sort() + + return asmfiles +@@ -952,7 +952,7 @@ + 'urandom_test': urandom_test_files, + } + +- asm_outputs = sorted(WriteAsmFiles(ReadPerlAsmOperations()).iteritems()) ++ asm_outputs = sorted(WriteAsmFiles(ReadPerlAsmOperations()).items()) + + for platform in platforms: + platform.WriteFiles(files, asm_outputs) +--- a/src/3rdparty/chromium/third_party/boringssl/src/util/bot/extract.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/util/bot/extract.py 2025-01-16 02:26:08.564263337 +0800 +@@ -109,7 +109,7 @@ + if os.path.exists(stamp_path): + with open(stamp_path) as f: + if f.read().strip() == digest: +- print "Already up-to-date." ++ print("Already up-to-date.") + return 0 + + if archive.endswith('.zip'): +@@ -123,10 +123,10 @@ + + try: + if os.path.exists(output): +- print "Removing %s" % (output, ) ++ print("Removing %s" % (output, )) + shutil.rmtree(output) + +- print "Extracting %s to %s" % (archive, output) ++ print("Extracting %s to %s" % (archive, output)) + prefix = None + num_extracted = 0 + for entry in entries: +@@ -166,14 +166,14 @@ + # Print every 100 files, so bots do not time out on large archives. + num_extracted += 1 + if num_extracted % 100 == 0: +- print "Extracted %d files..." % (num_extracted,) ++ print("Extracted %d files..." % (num_extracted,)) + finally: + entries.close() + + with open(stamp_path, 'w') as f: + f.write(digest) + +- print "Done. Extracted %d files." % (num_extracted,) ++ print("Done. Extracted %d files." % (num_extracted,)) + return 0 + + +--- a/src/3rdparty/chromium/third_party/boringssl/src/util/bot/update_clang.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/util/bot/update_clang.py 2025-01-16 02:26:08.564263337 +0800 +@@ -13,7 +13,7 @@ + import tarfile + import tempfile + import time +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + + # CLANG_REVISION and CLANG_SUB_REVISION determine the build of clang +@@ -54,7 +54,7 @@ + try: + sys.stdout.write('Downloading %s ' % url) + sys.stdout.flush() +- response = urllib2.urlopen(url) ++ response = urllib.request.urlopen(url) + total_size = int(response.info().getheader('Content-Length').strip()) + bytes_done = 0 + dots_printed = 0 +@@ -69,24 +69,24 @@ + sys.stdout.flush() + dots_printed = num_dots + if bytes_done != total_size: +- raise urllib2.URLError("only got %d of %d bytes" % ++ raise urllib.error.URLError("only got %d of %d bytes" % + (bytes_done, total_size)) +- print ' Done.' ++ print(' Done.') + return +- except urllib2.URLError as e: ++ except urllib.error.URLError as e: + sys.stdout.write('\n') +- print e +- if num_retries == 0 or isinstance(e, urllib2.HTTPError) and e.code == 404: ++ print(e) ++ if num_retries == 0 or isinstance(e, urllib.error.HTTPError) and e.code == 404: + raise e + num_retries -= 1 +- print 'Retrying in %d s ...' % retry_wait_s ++ print('Retrying in %d s ...' % retry_wait_s) + time.sleep(retry_wait_s) + retry_wait_s *= 2 + + + def EnsureDirExists(path): + if not os.path.exists(path): +- print "Creating directory %s" % path ++ print("Creating directory %s" % path) + os.makedirs(path) + + +@@ -129,7 +129,7 @@ + + def CopyFile(src, dst): + """Copy a file from src to dst.""" +- print "Copying %s to %s" % (src, dst) ++ print("Copying %s to %s" % (src, dst)) + shutil.copy(src, dst) + + +@@ -170,28 +170,28 @@ + else: + return 0 + +- print 'Updating Clang to %s...' % PACKAGE_VERSION ++ print('Updating Clang to %s...' % PACKAGE_VERSION) + + if ReadStampFile() == PACKAGE_VERSION: +- print 'Clang is already up to date.' ++ print('Clang is already up to date.') + return 0 + + # Reset the stamp file in case the build is unsuccessful. + WriteStampFile('') + +- print 'Downloading prebuilt clang' ++ print('Downloading prebuilt clang') + if os.path.exists(LLVM_BUILD_DIR): + RmTree(LLVM_BUILD_DIR) + try: + DownloadAndUnpack(cds_full_url, LLVM_BUILD_DIR) +- print 'clang %s unpacked' % PACKAGE_VERSION ++ print('clang %s unpacked' % PACKAGE_VERSION) + if sys.platform == 'win32': + CopyDiaDllTo(os.path.join(LLVM_BUILD_DIR, 'bin')) + WriteStampFile(PACKAGE_VERSION) + return 0 +- except urllib2.URLError: +- print 'Failed to download prebuilt clang %s' % cds_file +- print 'Exiting.' ++ except urllib.error.URLError: ++ print('Failed to download prebuilt clang %s' % cds_file) ++ print('Exiting.') + return 1 + + +--- a/src/3rdparty/chromium/third_party/boringssl/src/util/bot/vs_env.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/util/bot/vs_env.py 2025-01-16 02:26:08.564263337 +0800 +@@ -20,7 +20,7 @@ + import gyp.MSVSVersion + + if len(sys.argv) < 2: +- print >>sys.stderr, "Usage: vs_env.py TARGET_ARCH CMD..." ++ print("Usage: vs_env.py TARGET_ARCH CMD...", file=sys.stderr) + sys.exit(1) + + target_arch = sys.argv[1] +--- a/src/3rdparty/chromium/third_party/boringssl/src/util/bot/vs_toolchain.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/util/bot/vs_toolchain.py 2025-01-16 02:26:08.564263337 +0800 +@@ -56,7 +56,7 @@ + gyp_defines_dict = gyp.NameValueListToDict(gyp.ShlexEnv('GYP_DEFINES')) + gyp_defines_dict['windows_sdk_path'] = win_sdk + os.environ['GYP_DEFINES'] = ' '.join('%s=%s' % (k, pipes.quote(str(v))) +- for k, v in gyp_defines_dict.iteritems()) ++ for k, v in gyp_defines_dict.items()) + os.environ['WINDOWSSDKDIR'] = win_sdk + os.environ['WDK_DIR'] = wdk + # Include the VS runtime in the PATH in case it's not machine-installed. +@@ -125,7 +125,7 @@ + 'update': Update, + } + if len(sys.argv) < 2 or sys.argv[1] not in commands: +- print >>sys.stderr, 'Expected one of: %s' % ', '.join(commands) ++ print('Expected one of: %s' % ', '.join(commands), file=sys.stderr) + return 1 + return commands[sys.argv[1]](*sys.argv[2:]) + +--- a/src/3rdparty/chromium/third_party/boringssl/src/util/bot/go/bootstrap.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/util/bot/go/bootstrap.py 2025-01-16 02:26:08.564263337 +0800 +@@ -23,7 +23,7 @@ + import sys + import tarfile + import tempfile +-import urllib ++import urllib.request, urllib.parse, urllib.error + import zipfile + + # TODO(vadimsh): Migrate to new golang.org/x/ paths once Golang moves to +@@ -147,10 +147,10 @@ + def report(a, b, c): + progress = int(a * b * 100.0 / c) + if progress != last_progress[0]: +- print >> sys.stderr, 'Downloading... %d%%' % progress ++ print('Downloading... %d%%' % progress, file=sys.stderr) + last_progress[0] = progress + # TODO(vadimsh): Use something less crippled, something that validates SSL. +- urllib.urlretrieve(url, path, reporthook=report) ++ urllib.request.urlretrieve(url, path, reporthook=report) + + + @contextlib.contextmanager +@@ -286,7 +286,7 @@ + + def main(args): + if args: +- print >> sys.stderr, sys.modules[__name__].__doc__, ++ print(sys.modules[__name__].__doc__, end=' ', file=sys.stderr) + return 2 + bootstrap(logging.DEBUG) + return 0 +--- a/src/3rdparty/chromium/third_party/boringssl/src/util/bot/go/env.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/boringssl/src/util/bot/go/env.py 2025-01-16 02:26:08.564263337 +0800 +@@ -34,9 +34,9 @@ + new = bootstrap.prepare_go_environ() + + if len(sys.argv) == 1: +- for key, value in sorted(new.iteritems()): ++ for key, value in sorted(new.items()): + if old.get(key) != value: +- print 'export %s="%s"' % (key, value) ++ print('export %s="%s"' % (key, value)) + else: + exe = sys.argv[1] + if exe == 'python': +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/tools/python/deps-to-manifest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/tools/python/deps-to-manifest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -30,7 +30,7 @@ + + """Convert gclient's DEPS file to repo's manifest xml file.""" + +-from __future__ import print_function ++ + + import argparse + import os +@@ -77,7 +77,7 @@ + """Convert the |deps| file to the |manifest|.""" + # Load the DEPS file data. + ctx = {} +- execfile(deps, ctx) ++ exec(compile(open(deps, "rb").read(), deps, 'exec'), ctx) + + new_contents = '' + +@@ -88,7 +88,7 @@ + new_contents += MANIFEST_HEAD % data + + # Write out the sections. +- for name, fetch in REMOTES.items(): ++ for name, fetch in list(REMOTES.items()): + data = { + 'name': name, + 'fetch': fetch, +@@ -106,8 +106,8 @@ + new_contents += MANIFEST_PROJECT % data + + # Write out the sections. +- for path, url in ctx['deps'].items(): +- for name, fetch in REMOTES.items(): ++ for path, url in list(ctx['deps'].items()): ++ for name, fetch in list(REMOTES.items()): + if url.startswith(fetch): + remote = name + break +--- a/src/3rdparty/chromium/third_party/breakpad/breakpad/src/tools/python/filter_syms.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/breakpad/breakpad/src/tools/python/filter_syms.py 2025-01-16 02:26:08.564263337 +0800 +@@ -133,8 +133,8 @@ + Returns: + The actual path to use when writing the FILE record. + """ +- return path[len(filter(path.startswith, +- self.ignored_prefixes + [''])[0]):] ++ return path[len(list(filter(path.startswith, ++ self.ignored_prefixes + ['']))[0]):] + + def _ParseFileRecord(self, file_record): + """Parses and corrects a FILE record.""" +@@ -194,9 +194,9 @@ + symbol_parser = SymbolFileParser(sys.stdin, sys.stdout, options.prefixes, + path_handler) + symbol_parser.Process() +- except BreakpadParseError, e: +- print >> sys.stderr, 'Got an error while processing symbol file' +- print >> sys.stderr, str(e) ++ except BreakpadParseError as e: ++ print('Got an error while processing symbol file', file=sys.stderr) ++ print(str(e), file=sys.stderr) + return 1 + return 0 + +--- a/src/3rdparty/chromium/third_party/catapult/catapult_build/appengine_deploy.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/catapult_build/appengine_deploy.py 2025-01-16 02:26:08.564263337 +0800 +@@ -21,7 +21,7 @@ + version = _VersionName() + with temp_deployment_dir.TempDeploymentDir( + paths, use_symlinks=False) as temp_dir: +- print 'Deploying from "%s".' % temp_dir ++ print('Deploying from "%s".' % temp_dir) + + # google-cloud-sdk/bin/gcloud is a shell script, which we can't subprocess + # on Windows with shell=False. So, execute the Python script directly. +@@ -30,9 +30,9 @@ + else: + script_path = _FindScriptInPath('gcloud') + if not script_path: +- print 'This script requires the Google Cloud SDK to be in PATH.' +- print 'Install at https://cloud.google.com/sdk and then run' +- print '`gcloud components install app-engine-python`' ++ print('This script requires the Google Cloud SDK to be in PATH.') ++ print('Install at https://cloud.google.com/sdk and then run') ++ print('`gcloud components install app-engine-python`') + sys.exit(1) + + subprocess.check_call([script_path, 'app', 'deploy', '--no-promote', +--- a/src/3rdparty/chromium/third_party/catapult/catapult_build/appengine_dev_server.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/catapult_build/appengine_dev_server.py 2025-01-16 02:26:08.564263337 +0800 +@@ -24,11 +24,11 @@ + """ + with temp_deployment_dir.TempDeploymentDir( + paths, reuse_path=reuse_path) as temp_dir: +- print 'Running dev server on "%s".' % temp_dir ++ print('Running dev server on "%s".' % temp_dir) + + script_path = _FindScriptInPath('dev_appserver.py') + if not script_path: +- print 'This script requires the App Engine SDK to be in PATH.' ++ print('This script requires the App Engine SDK to be in PATH.') + sys.exit(1) + + subprocess.call([sys.executable, script_path] + +--- a/src/3rdparty/chromium/third_party/catapult/catapult_build/dev_server.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/catapult_build/dev_server.py 2025-01-16 02:26:08.564263337 +0800 +@@ -2,13 +2,13 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + import json + import os + import sys +-import urlparse ++import urllib.parse + + from hooks import install + +@@ -169,7 +169,7 @@ + class TestOverviewHandler(webapp2.RequestHandler): + def get(self, *args, **kwargs): # pylint: disable=unused-argument + test_links = [] +- for name, path in kwargs.pop('pds').iteritems(): ++ for name, path in kwargs.pop('pds').items(): + test_links.append(_LINK_ITEM % (path, name)) + quick_links = [] + for name, path in _QUICK_LINKS: +@@ -259,14 +259,14 @@ + continue + rel = os.path.relpath(filename, source_path) + unix_rel = _RelPathToUnixPath(rel) +- url = urlparse.urljoin(mapped_path, unix_rel) ++ url = urllib.parse.urljoin(mapped_path, unix_rel) + return url + + path = SourcePathsHandler.GetServingPathForAbsFilename( + self._all_source_paths, filename) + if path is None: + return None +- return urlparse.urljoin('/', path) ++ return urllib.parse.urljoin('/', path) + + + def _AddPleaseExitMixinToServer(server): +--- a/src/3rdparty/chromium/third_party/catapult/catapult_build/html_checks.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/catapult_build/html_checks.py 2025-01-16 02:26:08.564263337 +0800 +@@ -60,7 +60,7 @@ + + grouped_hrefs[','.join(link.get('rel'))].append(link.get('href')) + +- for rel, actual_hrefs in grouped_hrefs.iteritems(): ++ for rel, actual_hrefs in grouped_hrefs.items(): + expected_hrefs = list(sorted(set(actual_hrefs))) + if actual_hrefs != expected_hrefs: + error_text = ( +--- a/src/3rdparty/chromium/third_party/catapult/catapult_build/run_dev_server_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/catapult_build/run_dev_server_tests.py 2025-01-16 02:26:08.564263337 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + import json +--- a/src/3rdparty/chromium/third_party/catapult/catapult_build/test_runner.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/catapult_build/test_runner.py 2025-01-16 02:26:08.564263337 +0800 +@@ -8,8 +8,8 @@ + import subprocess + import sys + +-FAIL_EMOJI = u'\U0001F631'.encode('utf-8') +-PASS_EMOJI = u'\U0001F601'.encode('utf-8') ++FAIL_EMOJI = '\U0001F631'.encode('utf-8') ++PASS_EMOJI = '\U0001F601'.encode('utf-8') + + GREEN = '\033[92m' + RED = '\033[91m' +@@ -53,9 +53,9 @@ + os.path.basename(test['path']), test['path']) + + if exit_code: +- print _Color('Oops! Some tests failed.', RED), FAIL_EMOJI ++ print(_Color('Oops! Some tests failed.', RED), FAIL_EMOJI) + sys.stderr.writelines(errors) + else: +- print _Color('Woho! All tests passed.', GREEN), PASS_EMOJI ++ print(_Color('Woho! All tests passed.', GREEN), PASS_EMOJI) + + sys.exit(exit_code) +--- a/src/3rdparty/chromium/third_party/catapult/catapult_build/perfbot_stats/chrome_perf_stats.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/catapult_build/perfbot_stats/chrome_perf_stats.py 2025-01-16 02:26:08.564263337 +0800 +@@ -18,8 +18,8 @@ + import datetime + import json + import sys +-import urllib +-import urllib2 ++import urllib.request, urllib.parse, urllib.error ++import urllib.request, urllib.error, urllib.parse + + BUILDER_LIST_URL = ('https://chrome-infra-stats.appspot.com/' + '_ah/api/stats/v1/masters/chromium.perf') +@@ -33,7 +33,7 @@ + + def main(): + if len(sys.argv) == 2 and sys.argv[0] == '--help': +- print USAGE ++ print(USAGE) + sys.exit(0) + year = None + month = None +@@ -41,22 +41,22 @@ + if len(sys.argv) == 4 or len(sys.argv) == 3: + year = int(sys.argv[1]) + if year > 2016 or year < 2014: +- print USAGE ++ print(USAGE) + sys.exit(0) + month = int(sys.argv[2]) + if month > 12 or month <= 0: +- print USAGE ++ print(USAGE) + sys.exit(0) + if len(sys.argv) == 3: +- days = range(1, calendar.monthrange(year, month)[1] + 1) ++ days = list(range(1, calendar.monthrange(year, month)[1] + 1)) + else: + day = int(sys.argv[3]) + if day > 31 or day <= 0: +- print USAGE ++ print(USAGE) + sys.exit(0) + days = [day] + elif len(sys.argv) != 1: +- print USAGE ++ print(USAGE) + sys.exit(0) + else: + yesterday = datetime.date.today() - datetime.timedelta(days=1) +@@ -64,7 +64,7 @@ + month = yesterday.month + days = [yesterday.day] + +- response = urllib2.urlopen(BUILDER_LIST_URL) ++ response = urllib.request.urlopen(BUILDER_LIST_URL) + builders = [builder['name'] for builder in json.load(response)['builders']] + success_rates = CalculateSuccessRates(year, month, days, builders) + UploadToPerfDashboard(success_rates) +@@ -87,10 +87,10 @@ + + def _SummarizeSuccessRates(success_rates): + overall_success_rates = [] +- for day, results in success_rates.iteritems(): ++ for day, results in success_rates.items(): + success_rate_sum = 0 + success_rate_count = 0 +- for rates in results.values(): ++ for rates in list(results.values()): + if rates['count'] == 0: + continue + success_rate_sum += ( +@@ -131,8 +131,8 @@ + } + } + url = 'https://chromeperf.appspot.com/add_point' +- data = urllib.urlencode({'data': json.dumps(dashboard_data)}) +- urllib2.urlopen(url=url, data=data).read() ++ data = urllib.parse.urlencode({'data': json.dumps(dashboard_data)}) ++ urllib.request.urlopen(url=url, data=data).read() + + + def CalculateSuccessRates(year, month, days, builders): +@@ -143,8 +143,8 @@ + date_dict_str = '%d%02d%02d' % (year, month, day) + for builder in builders: + url = BUILDER_STATS_URL % ( +- urllib.quote(builder), urllib.quote(date_str)) +- response = urllib2.urlopen(url) ++ urllib.parse.quote(builder), urllib.parse.quote(date_str)) ++ response = urllib.request.urlopen(url) + results = json.load(response) + _UpdateSuccessRatesWithResult( + success_rates, results, date_dict_str, builder) +--- a/src/3rdparty/chromium/third_party/catapult/catapult_build/perfbot_stats/chrome_perf_step_timings.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/catapult_build/perfbot_stats/chrome_perf_step_timings.py 2025-01-16 02:26:08.564263337 +0800 +@@ -17,8 +17,8 @@ + import datetime + import json + import sys +-import urllib +-import urllib2 ++import urllib.request, urllib.parse, urllib.error ++import urllib.request, urllib.error, urllib.parse + + + BUILDER_STEPS_URL = ('https://chrome-infra-stats.appspot.com/_ah/api/stats/v1/' +@@ -115,7 +115,7 @@ + + def main(): + if len(sys.argv) != 2: +- print USAGE ++ print(USAGE) + sys.exit(0) + outfilename = sys.argv[1] + +@@ -129,18 +129,18 @@ + + for builder in KNOWN_TESTERS_LIST: + step_timings = [] +- url = BUILDER_STEPS_URL % urllib.quote(builder) +- response = urllib2.urlopen(url) ++ url = BUILDER_STEPS_URL % urllib.parse.quote(builder) ++ response = urllib.request.urlopen(url) + results = json.load(response) + steps = results['steps'] + steps.sort() # to group tests and their references together. + for step in steps: + if step in IGNORED_STEPS: + continue +- url = STEP_ACTIVE_URL % (urllib.quote(builder), urllib.quote(step)) +- response = urllib2.urlopen(url) ++ url = STEP_ACTIVE_URL % (urllib.parse.quote(builder), urllib.parse.quote(step)) ++ response = urllib.request.urlopen(url) + results = json.load(response) +- if ('step_records' not in results.keys() or ++ if ('step_records' not in list(results.keys()) or + len(results['step_records']) == 0): + continue + first_record = results['step_records'][0] +@@ -149,8 +149,8 @@ + # ignore steps that did not run for more than 2 days + if last_step_time < threshold_time: + continue +- url = STEP_STATS_URL % (urllib.quote(builder), urllib.quote(step)) +- response = urllib2.urlopen(url) ++ url = STEP_STATS_URL % (urllib.parse.quote(builder), urllib.parse.quote(step)) ++ response = urllib.request.urlopen(url) + results = json.load(response) + step_timings.append( + [builder, step, results['count'], results['stddev'], +--- a/src/3rdparty/chromium/third_party/catapult/common/bin/update_chrome_reference_binaries.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/bin/update_chrome_reference_binaries.py 2025-01-16 02:26:08.564263337 +0800 +@@ -20,7 +20,7 @@ + import subprocess + import sys + import tempfile +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + import zipfile + + sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'py_utils')) +@@ -170,7 +170,7 @@ + + def _OmahaReportVersionInfo(channel): + url ='https://omahaproxy.appspot.com/all?channel=%s' % channel +- lines = urllib2.urlopen(url).readlines() ++ lines = urllib.request.urlopen(url).readlines() + return [l.split(',') for l in lines] + + +@@ -263,10 +263,10 @@ + closest_snapshot = _FindClosestChromiumSnapshot( + branch_base_position, build_dir) + if closest_snapshot != branch_base_position: +- print ('Channel %s corresponds to commit position ' % channel + ++ print(('Channel %s corresponds to commit position ' % channel + + '%d on %s, ' % (branch_base_position, platform) + + 'but closest chromium snapshot available on ' + +- '%s is %d' % (_CHROMIUM_GS_BUCKET, closest_snapshot)) ++ '%s is %d' % (_CHROMIUM_GS_BUCKET, closest_snapshot))) + return RemotePath(bucket=_CHROMIUM_GS_BUCKET, + path = ('%s/%s/%s' % (build_dir, closest_snapshot, + platform_info.chromium_info.zip_name))) +@@ -348,11 +348,11 @@ + + def _NeedsUpdate(config, binary, channel, platform, version_info): + channel_version = version_info.version +- print 'Checking %s (%s channel) on %s' % (binary, channel, platform) ++ print('Checking %s (%s channel) on %s' % (binary, channel, platform)) + current_version = config.GetVersion('%s_%s' % (binary, channel), platform) +- print 'current: %s, channel: %s' % (current_version, channel_version) ++ print('current: %s, channel: %s' % (current_version, channel_version)) + if current_version and current_version == channel_version: +- print 'Already up to date.' ++ print('Already up to date.') + return False + return True + +@@ -372,7 +372,7 @@ + _QueuePlatformUpdate('chromium', platform, version_info, + config, channel) + +- print 'Updating builds with downloaded binaries' ++ print('Updating builds with downloaded binaries') + config.ExecuteUpdateJobs(force=True) + + +--- a/src/3rdparty/chromium/third_party/catapult/common/lab/commits.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/lab/commits.py 2025-01-16 02:26:08.564263337 +0800 +@@ -9,8 +9,8 @@ + import itertools + import json + import math +-import urllib +-import urllib2 ++import urllib.request, urllib.parse, urllib.error ++import urllib.request, urllib.error, urllib.parse + + + _BASE_URL = 'https://chromium.googlesource.com' +@@ -29,7 +29,7 @@ + """s -> (s0,s1), (s1,s2), (s2, s3), ...""" + a, b = itertools.tee(iterable) + next(b, None) +- return itertools.izip(a, b) ++ return zip(a, b) + + + def Percentile(data, percentile): +@@ -58,9 +58,9 @@ + + + def CommitTimes(repository, revision_count): +- parameters = urllib.urlencode((('n', revision_count), ('format', 'JSON'))) +- url = '%s/%s/+log?%s' % (_BASE_URL, urllib.quote(repository), parameters) +- data = json.loads(''.join(urllib2.urlopen(url).read().splitlines()[1:])) ++ parameters = urllib.parse.urlencode((('n', revision_count), ('format', 'JSON'))) ++ url = '%s/%s/+log?%s' % (_BASE_URL, urllib.parse.quote(repository), parameters) ++ data = json.loads(''.join(urllib.request.urlopen(url).read().splitlines()[1:])) + + commit_times = [] + for revision in data['log']: +@@ -87,18 +87,18 @@ + commit_durations.append((time1 - time2).total_seconds() / 60.) + commit_durations.sort() + +- print 'REPOSITORY:', repository +- print 'Start Date:', min(commit_times), 'PDT' +- print ' End Date:', max(commit_times), 'PDT' +- print ' Duration:', max(commit_times) - min(commit_times) +- print ' n:', len(commit_times) ++ print('REPOSITORY:', repository) ++ print('Start Date:', min(commit_times), 'PDT') ++ print(' End Date:', max(commit_times), 'PDT') ++ print(' Duration:', max(commit_times) - min(commit_times)) ++ print(' n:', len(commit_times)) + + for p in (0.25, 0.50, 0.90): + percentile = Percentile(commit_durations, p) +- print '%3d%% commit duration:' % (p * 100), '%6.1fm' % percentile ++ print('%3d%% commit duration:' % (p * 100), '%6.1fm' % percentile) + mean = math.fsum(commit_durations) / len(commit_durations) +- print 'Mean commit duration:', '%6.1fm' % mean +- print ++ print('Mean commit duration:', '%6.1fm' % mean) ++ print() + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/catapult/common/lab/hardware.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/lab/hardware.py 2025-01-16 02:26:08.564263337 +0800 +@@ -9,7 +9,7 @@ + import json + import logging + import sys +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + + _MASTERS = [ +@@ -50,11 +50,11 @@ + writer.writeheader() + + for master_name in _MASTERS: +- master_data = json.load(urllib2.urlopen( ++ master_data = json.load(urllib.request.urlopen( + 'http://build.chromium.org/p/%s/json/slaves' % master_name)) + +- slaves = sorted(master_data.iteritems(), +- key=lambda x: (x[1]['builders'].keys(), x[0])) ++ slaves = sorted(iter(master_data.items()), ++ key=lambda x: (list(x[1]['builders'].keys()), x[0])) + for slave_name, slave_data in slaves: + for builder_name in slave_data['builders']: + row = { +@@ -76,7 +76,7 @@ + row[key] = value + + # Munge keys. +- row = {key.replace('_', ' '): value for key, value in row.iteritems()} ++ row = {key.replace('_', ' '): value for key, value in row.items()} + if 'osfamily' in row: + row['os family'] = row.pop('osfamily') + if 'product name' not in row and slave_name.startswith('slave'): +--- a/src/3rdparty/chromium/third_party/catapult/common/node_runner/node_runner/node_util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/node_runner/node_runner/node_util.py 2025-01-16 02:26:08.564263337 +0800 +@@ -56,5 +56,5 @@ + 'node_modules')) + if sys.platform.startswith('win'): + # Escape path on Windows because it's very long and must be passed to NTFS. +- path = u'\\\\?\\' + path ++ path = '\\\\?\\' + path + return path +--- a/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event.py 2025-01-16 02:26:08.564263337 +0800 +@@ -48,7 +48,7 @@ + """ + + try: +- import trace_event_impl ++ from . import trace_event_impl + except ImportError: + trace_event_impl = None + +@@ -88,7 +88,7 @@ + trace_event_impl.trace_flush() + + def trace_begin(name, **kwargs): +- args_to_log = {key: repr(value) for key, value in kwargs.iteritems()} ++ args_to_log = {key: repr(value) for key, value in kwargs.items()} + trace_event_impl.add_trace_event("B", trace_time.Now(), "python", name, + args_to_log) + +--- a/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -54,13 +54,13 @@ + with self._test_trace(): + with open(self._log_path, 'r') as f: + log_output = json.loads(f.read() + ']') +- self.assertEquals(len(log_output), 1) ++ self.assertEqual(len(log_output), 1) + self.assertTrue(trace_event.trace_is_enabled()) + log_output = log_output.pop() +- self.assertEquals(log_output['category'], 'process_argv') +- self.assertEquals(log_output['name'], 'process_argv') ++ self.assertEqual(log_output['category'], 'process_argv') ++ self.assertEqual(log_output['name'], 'process_argv') + self.assertTrue(log_output['args']['argv']) +- self.assertEquals(log_output['ph'], 'M') ++ self.assertEqual(log_output['ph'], 'M') + + def testDoubleEnable(self): + try: +@@ -81,7 +81,7 @@ + trace_event.trace_disable() + self.assertEqual( + multiprocessing.Process, _old_multiprocessing_process) +- self.assertEquals(len(json.loads(f.read() + ']')), 1) ++ self.assertEqual(len(json.loads(f.read() + ']')), 1) + self.assertFalse(trace_event.trace_is_enabled()) + + def testDoubleDisable(self): +@@ -93,28 +93,28 @@ + with self._test_trace(): + with open(self._log_path, 'r') as f: + trace_event.clock_sync('1') +- self.assertEquals(len(json.loads(f.read() + ']')), 1) ++ self.assertEqual(len(json.loads(f.read() + ']')), 1) + f.seek(0) + trace_event.trace_flush() +- self.assertEquals(len(json.loads(f.read() + ']')), 2) ++ self.assertEqual(len(json.loads(f.read() + ']')), 2) + + def testFlushNoChanges(self): + with self._test_trace(): + with open(self._log_path, 'r') as f: +- self.assertEquals(len(json.loads(f.read() + ']')),1) ++ self.assertEqual(len(json.loads(f.read() + ']')),1) + f.seek(0) + trace_event.trace_flush() +- self.assertEquals(len(json.loads(f.read() + ']')), 1) ++ self.assertEqual(len(json.loads(f.read() + ']')), 1) + + def testDoubleFlush(self): + with self._test_trace(): + with open(self._log_path, 'r') as f: + trace_event.clock_sync('1') +- self.assertEquals(len(json.loads(f.read() + ']')), 1) ++ self.assertEqual(len(json.loads(f.read() + ']')), 1) + f.seek(0) + trace_event.trace_flush() + trace_event.trace_flush() +- self.assertEquals(len(json.loads(f.read() + ']')), 2) ++ self.assertEqual(len(json.loads(f.read() + ']')), 2) + + def testTraceBegin(self): + with self._test_trace(): +@@ -122,17 +122,17 @@ + trace_event.trace_begin('test_event', this='that') + trace_event.trace_flush() + log_output = json.loads(f.read() + ']') +- self.assertEquals(len(log_output), 2) ++ self.assertEqual(len(log_output), 2) + current_entry = log_output.pop(0) +- self.assertEquals(current_entry['category'], 'process_argv') +- self.assertEquals(current_entry['name'], 'process_argv') ++ self.assertEqual(current_entry['category'], 'process_argv') ++ self.assertEqual(current_entry['name'], 'process_argv') + self.assertTrue( current_entry['args']['argv']) +- self.assertEquals( current_entry['ph'], 'M') ++ self.assertEqual( current_entry['ph'], 'M') + current_entry = log_output.pop(0) +- self.assertEquals(current_entry['category'], 'python') +- self.assertEquals(current_entry['name'], 'test_event') +- self.assertEquals(current_entry['args']['this'], '\'that\'') +- self.assertEquals(current_entry['ph'], 'B') ++ self.assertEqual(current_entry['category'], 'python') ++ self.assertEqual(current_entry['name'], 'test_event') ++ self.assertEqual(current_entry['args']['this'], '\'that\'') ++ self.assertEqual(current_entry['ph'], 'B') + + def testTraceEnd(self): + with self._test_trace(): +@@ -140,17 +140,17 @@ + trace_event.trace_end('test_event') + trace_event.trace_flush() + log_output = json.loads(f.read() + ']') +- self.assertEquals(len(log_output), 2) ++ self.assertEqual(len(log_output), 2) + current_entry = log_output.pop(0) +- self.assertEquals(current_entry['category'], 'process_argv') +- self.assertEquals(current_entry['name'], 'process_argv') ++ self.assertEqual(current_entry['category'], 'process_argv') ++ self.assertEqual(current_entry['name'], 'process_argv') + self.assertTrue(current_entry['args']['argv']) +- self.assertEquals(current_entry['ph'], 'M') ++ self.assertEqual(current_entry['ph'], 'M') + current_entry = log_output.pop(0) +- self.assertEquals(current_entry['category'], 'python') +- self.assertEquals(current_entry['name'], 'test_event') +- self.assertEquals(current_entry['args'], {}) +- self.assertEquals(current_entry['ph'], 'E') ++ self.assertEqual(current_entry['category'], 'python') ++ self.assertEqual(current_entry['name'], 'test_event') ++ self.assertEqual(current_entry['args'], {}) ++ self.assertEqual(current_entry['ph'], 'E') + + def testTrace(self): + with self._test_trace(): +@@ -159,22 +159,22 @@ + trace_event.trace_flush() + with open(self._log_path, 'r') as f: + log_output = json.loads(f.read() + ']') +- self.assertEquals(len(log_output), 3) ++ self.assertEqual(len(log_output), 3) + current_entry = log_output.pop(0) +- self.assertEquals(current_entry['category'], 'process_argv') +- self.assertEquals(current_entry['name'], 'process_argv') ++ self.assertEqual(current_entry['category'], 'process_argv') ++ self.assertEqual(current_entry['name'], 'process_argv') + self.assertTrue(current_entry['args']['argv']) +- self.assertEquals(current_entry['ph'], 'M') ++ self.assertEqual(current_entry['ph'], 'M') + current_entry = log_output.pop(0) +- self.assertEquals(current_entry['category'], 'python') +- self.assertEquals(current_entry['name'], 'test_event') +- self.assertEquals(current_entry['args']['this'], '\'that\'') +- self.assertEquals(current_entry['ph'], 'B') +- current_entry = log_output.pop(0) +- self.assertEquals(current_entry['category'], 'python') +- self.assertEquals(current_entry['name'], 'test_event') +- self.assertEquals(current_entry['args'], {}) +- self.assertEquals(current_entry['ph'], 'E') ++ self.assertEqual(current_entry['category'], 'python') ++ self.assertEqual(current_entry['name'], 'test_event') ++ self.assertEqual(current_entry['args']['this'], '\'that\'') ++ self.assertEqual(current_entry['ph'], 'B') ++ current_entry = log_output.pop(0) ++ self.assertEqual(current_entry['category'], 'python') ++ self.assertEqual(current_entry['name'], 'test_event') ++ self.assertEqual(current_entry['args'], {}) ++ self.assertEqual(current_entry['ph'], 'E') + + def testTracedDecorator(self): + @trace_event.traced("this") +@@ -186,23 +186,23 @@ + trace_event.trace_flush() + with open(self._log_path, 'r') as f: + log_output = json.loads(f.read() + ']') +- self.assertEquals(len(log_output), 3) ++ self.assertEqual(len(log_output), 3) + expected_name = __name__ + '.test_decorator' + current_entry = log_output.pop(0) +- self.assertEquals(current_entry['category'], 'process_argv') +- self.assertEquals(current_entry['name'], 'process_argv') ++ self.assertEqual(current_entry['category'], 'process_argv') ++ self.assertEqual(current_entry['name'], 'process_argv') + self.assertTrue(current_entry['args']['argv']) +- self.assertEquals(current_entry['ph'], 'M') ++ self.assertEqual(current_entry['ph'], 'M') + current_entry = log_output.pop(0) +- self.assertEquals(current_entry['category'], 'python') +- self.assertEquals(current_entry['name'], expected_name) +- self.assertEquals(current_entry['args']['this'], '\'that\'') +- self.assertEquals(current_entry['ph'], 'B') +- current_entry = log_output.pop(0) +- self.assertEquals(current_entry['category'], 'python') +- self.assertEquals(current_entry['name'], expected_name) +- self.assertEquals(current_entry['args'], {}) +- self.assertEquals(current_entry['ph'], 'E') ++ self.assertEqual(current_entry['category'], 'python') ++ self.assertEqual(current_entry['name'], expected_name) ++ self.assertEqual(current_entry['args']['this'], '\'that\'') ++ self.assertEqual(current_entry['ph'], 'B') ++ current_entry = log_output.pop(0) ++ self.assertEqual(current_entry['category'], 'python') ++ self.assertEqual(current_entry['name'], expected_name) ++ self.assertEqual(current_entry['args'], {}) ++ self.assertEqual(current_entry['ph'], 'E') + + def testClockSyncWithTs(self): + with self._test_trace(): +@@ -210,17 +210,17 @@ + trace_event.clock_sync('id', issue_ts=trace_time.Now()) + trace_event.trace_flush() + log_output = json.loads(f.read() + ']') +- self.assertEquals(len(log_output), 2) ++ self.assertEqual(len(log_output), 2) + current_entry = log_output.pop(0) +- self.assertEquals(current_entry['category'], 'process_argv') +- self.assertEquals(current_entry['name'], 'process_argv') ++ self.assertEqual(current_entry['category'], 'process_argv') ++ self.assertEqual(current_entry['name'], 'process_argv') + self.assertTrue(current_entry['args']['argv']) +- self.assertEquals(current_entry['ph'], 'M') ++ self.assertEqual(current_entry['ph'], 'M') + current_entry = log_output.pop(0) +- self.assertEquals(current_entry['category'], 'python') +- self.assertEquals(current_entry['name'], 'clock_sync') ++ self.assertEqual(current_entry['category'], 'python') ++ self.assertEqual(current_entry['name'], 'clock_sync') + self.assertTrue(current_entry['args']['issue_ts']) +- self.assertEquals(current_entry['ph'], 'c') ++ self.assertEqual(current_entry['ph'], 'c') + + def testClockSyncWithoutTs(self): + with self._test_trace(): +@@ -228,17 +228,17 @@ + trace_event.clock_sync('id') + trace_event.trace_flush() + log_output = json.loads(f.read() + ']') +- self.assertEquals(len(log_output), 2) ++ self.assertEqual(len(log_output), 2) + current_entry = log_output.pop(0) +- self.assertEquals(current_entry['category'], 'process_argv') +- self.assertEquals(current_entry['name'], 'process_argv') ++ self.assertEqual(current_entry['category'], 'process_argv') ++ self.assertEqual(current_entry['name'], 'process_argv') + self.assertTrue(current_entry['args']['argv']) +- self.assertEquals(current_entry['ph'], 'M') ++ self.assertEqual(current_entry['ph'], 'M') + current_entry = log_output.pop(0) +- self.assertEquals(current_entry['category'], 'python') +- self.assertEquals(current_entry['name'], 'clock_sync') ++ self.assertEqual(current_entry['category'], 'python') ++ self.assertEqual(current_entry['name'], 'clock_sync') + self.assertFalse(current_entry['args'].get('issue_ts')) +- self.assertEquals(current_entry['ph'], 'c') ++ self.assertEqual(current_entry['ph'], 'c') + + def testTime(self): + actual_diff = [] +@@ -258,20 +258,20 @@ + trace_event.trace_flush() + with open(self._log_path, 'r') as f: + log_output = json.loads(f.read() + ']') +- self.assertEquals(len(log_output), 3) ++ self.assertEqual(len(log_output), 3) + meta_data = log_output[0] + open_data = log_output[1] + close_data = log_output[2] +- self.assertEquals(meta_data['category'], 'process_argv') +- self.assertEquals(meta_data['name'], 'process_argv') ++ self.assertEqual(meta_data['category'], 'process_argv') ++ self.assertEqual(meta_data['name'], 'process_argv') + self.assertTrue(meta_data['args']['argv']) +- self.assertEquals(meta_data['ph'], 'M') +- self.assertEquals(open_data['category'], 'python') +- self.assertEquals(open_data['name'], 'test') +- self.assertEquals(open_data['ph'], 'B') +- self.assertEquals(close_data['category'], 'python') +- self.assertEquals(close_data['name'], 'test') +- self.assertEquals(close_data['ph'], 'E') ++ self.assertEqual(meta_data['ph'], 'M') ++ self.assertEqual(open_data['category'], 'python') ++ self.assertEqual(open_data['name'], 'test') ++ self.assertEqual(open_data['ph'], 'B') ++ self.assertEqual(close_data['category'], 'python') ++ self.assertEqual(close_data['name'], 'test') ++ self.assertEqual(close_data['ph'], 'E') + event_time_diff = close_data['ts'] - open_data['ts'] + recorded_time_diff = (end_ts - start_ts) * 1000000 + self.assertLess(math.fabs(event_time_diff - recorded_time_diff), 1000) +@@ -285,30 +285,30 @@ + trace_event.trace_flush() + with open(self._log_path, 'r') as f: + log_output = json.loads(f.read() + ']') +- self.assertEquals(len(log_output), 5) ++ self.assertEqual(len(log_output), 5) + meta_data = log_output[0] + one_open = log_output[1] + two_open = log_output[2] + two_close = log_output[3] + one_close = log_output[4] +- self.assertEquals(meta_data['category'], 'process_argv') +- self.assertEquals(meta_data['name'], 'process_argv') ++ self.assertEqual(meta_data['category'], 'process_argv') ++ self.assertEqual(meta_data['name'], 'process_argv') + self.assertTrue(meta_data['args']['argv']) +- self.assertEquals(meta_data['ph'], 'M') ++ self.assertEqual(meta_data['ph'], 'M') + +- self.assertEquals(one_open['category'], 'python') +- self.assertEquals(one_open['name'], 'one') +- self.assertEquals(one_open['ph'], 'B') +- self.assertEquals(one_close['category'], 'python') +- self.assertEquals(one_close['name'], 'one') +- self.assertEquals(one_close['ph'], 'E') +- +- self.assertEquals(two_open['category'], 'python') +- self.assertEquals(two_open['name'], 'two') +- self.assertEquals(two_open['ph'], 'B') +- self.assertEquals(two_close['category'], 'python') +- self.assertEquals(two_close['name'], 'two') +- self.assertEquals(two_close['ph'], 'E') ++ self.assertEqual(one_open['category'], 'python') ++ self.assertEqual(one_open['name'], 'one') ++ self.assertEqual(one_open['ph'], 'B') ++ self.assertEqual(one_close['category'], 'python') ++ self.assertEqual(one_close['name'], 'one') ++ self.assertEqual(one_close['ph'], 'E') ++ ++ self.assertEqual(two_open['category'], 'python') ++ self.assertEqual(two_open['name'], 'two') ++ self.assertEqual(two_open['ph'], 'B') ++ self.assertEqual(two_close['category'], 'python') ++ self.assertEqual(two_close['name'], 'two') ++ self.assertEqual(two_close['ph'], 'E') + + self.assertLessEqual(one_open['ts'], two_open['ts']) + self.assertGreaterEqual(one_close['ts'], two_close['ts']) +@@ -322,30 +322,30 @@ + trace_event.trace_flush() + with open(self._log_path, 'r') as f: + log_output = json.loads(f.read() + ']') +- self.assertEquals(len(log_output), 5) ++ self.assertEqual(len(log_output), 5) + meta_data = log_output[0] + one_open = log_output[1] + two_open = log_output[2] + two_close = log_output[4] + one_close = log_output[3] +- self.assertEquals(meta_data['category'], 'process_argv') +- self.assertEquals(meta_data['name'], 'process_argv') ++ self.assertEqual(meta_data['category'], 'process_argv') ++ self.assertEqual(meta_data['name'], 'process_argv') + self.assertTrue(meta_data['args']['argv']) +- self.assertEquals(meta_data['ph'], 'M') ++ self.assertEqual(meta_data['ph'], 'M') + +- self.assertEquals(one_open['category'], 'python') +- self.assertEquals(one_open['name'], 'one') +- self.assertEquals(one_open['ph'], 'B') +- self.assertEquals(one_close['category'], 'python') +- self.assertEquals(one_close['name'], 'one') +- self.assertEquals(one_close['ph'], 'E') +- +- self.assertEquals(two_open['category'], 'python') +- self.assertEquals(two_open['name'], 'two') +- self.assertEquals(two_open['ph'], 'B') +- self.assertEquals(two_close['category'], 'python') +- self.assertEquals(two_close['name'], 'two') +- self.assertEquals(two_close['ph'], 'E') ++ self.assertEqual(one_open['category'], 'python') ++ self.assertEqual(one_open['name'], 'one') ++ self.assertEqual(one_open['ph'], 'B') ++ self.assertEqual(one_close['category'], 'python') ++ self.assertEqual(one_close['name'], 'one') ++ self.assertEqual(one_close['ph'], 'E') ++ ++ self.assertEqual(two_open['category'], 'python') ++ self.assertEqual(two_open['name'], 'two') ++ self.assertEqual(two_open['ph'], 'B') ++ self.assertEqual(two_close['category'], 'python') ++ self.assertEqual(two_close['name'], 'two') ++ self.assertEqual(two_close['ph'], 'E') + + self.assertLessEqual(one_open['ts'], two_open['ts']) + self.assertLessEqual(one_close['ts'], two_close['ts']) +@@ -367,32 +367,32 @@ + trace_event.trace_flush() + with open(self._log_path, 'r') as f: + log_output = json.loads(f.read() + ']') +- self.assertEquals(len(log_output), 5) ++ self.assertEqual(len(log_output), 5) + meta_data = log_output[0] + parent_open = log_output[1] + child_open = log_output[2] + child_close = log_output[3] + parent_close = log_output[4] +- self.assertEquals(meta_data['category'], 'process_argv') +- self.assertEquals(meta_data['name'], 'process_argv') ++ self.assertEqual(meta_data['category'], 'process_argv') ++ self.assertEqual(meta_data['name'], 'process_argv') + self.assertTrue(meta_data['args']['argv']) +- self.assertEquals(meta_data['ph'], 'M') ++ self.assertEqual(meta_data['ph'], 'M') + +- self.assertEquals(parent_open['category'], 'python') +- self.assertEquals(parent_open['name'], 'parent_event') +- self.assertEquals(parent_open['ph'], 'B') +- +- self.assertEquals(child_open['category'], 'python') +- self.assertEquals(child_open['name'], 'child_event') +- self.assertEquals(child_open['ph'], 'B') +- +- self.assertEquals(child_close['category'], 'python') +- self.assertEquals(child_close['name'], 'child_event') +- self.assertEquals(child_close['ph'], 'E') +- +- self.assertEquals(parent_close['category'], 'python') +- self.assertEquals(parent_close['name'], 'parent_event') +- self.assertEquals(parent_close['ph'], 'E') ++ self.assertEqual(parent_open['category'], 'python') ++ self.assertEqual(parent_open['name'], 'parent_event') ++ self.assertEqual(parent_open['ph'], 'B') ++ ++ self.assertEqual(child_open['category'], 'python') ++ self.assertEqual(child_open['name'], 'child_event') ++ self.assertEqual(child_open['ph'], 'B') ++ ++ self.assertEqual(child_close['category'], 'python') ++ self.assertEqual(child_close['name'], 'child_event') ++ self.assertEqual(child_close['ph'], 'E') ++ ++ self.assertEqual(parent_close['category'], 'python') ++ self.assertEqual(parent_close['name'], 'parent_event') ++ self.assertEqual(parent_close['ph'], 'E') + + @unittest.skipIf(sys.platform == 'win32', 'crbug.com/945819') + def testTracingControlDisabledInChildButNotInParent(self): +@@ -424,34 +424,34 @@ + trace_event.trace_flush() + with open(self._log_path, 'r') as f: + log_output = json.loads(f.read() + ']') +- self.assertEquals(len(log_output), 3) ++ self.assertEqual(len(log_output), 3) + meta_data = log_output[0] + parent_open = log_output[1] + parent_close = log_output[2] +- self.assertEquals(parent_open['category'], 'python') +- self.assertEquals(parent_open['name'], 'parent') +- self.assertEquals(parent_open['ph'], 'B') +- self.assertEquals(parent_close['category'], 'python') +- self.assertEquals(parent_close['name'], 'parent') +- self.assertEquals(parent_close['ph'], 'E') ++ self.assertEqual(parent_open['category'], 'python') ++ self.assertEqual(parent_open['name'], 'parent') ++ self.assertEqual(parent_open['ph'], 'B') ++ self.assertEqual(parent_close['category'], 'python') ++ self.assertEqual(parent_close['name'], 'parent') ++ self.assertEqual(parent_close['ph'], 'E') + + def testFormatJson(self): + with self._test_trace(format=trace_event.JSON): + trace_event.trace_flush() + with open(self._log_path, 'r') as f: + log_output = json.loads(f.read() + ']') +- self.assertEquals(len(log_output), 1) +- self.assertEquals(log_output[0]['ph'], 'M') ++ self.assertEqual(len(log_output), 1) ++ self.assertEqual(log_output[0]['ph'], 'M') + + def testFormatJsonWithMetadata(self): + with self._test_trace(format=trace_event.JSON_WITH_METADATA): + trace_event.trace_disable() + with open(self._log_path, 'r') as f: + log_output = json.load(f) +- self.assertEquals(len(log_output), 2) ++ self.assertEqual(len(log_output), 2) + events = log_output['traceEvents'] +- self.assertEquals(len(events), 1) +- self.assertEquals(events[0]['ph'], 'M') ++ self.assertEqual(len(events), 1) ++ self.assertEqual(events[0]['ph'], 'M') + + def testFormatProtobuf(self): + with self._test_trace(format=trace_event.PROTOBUF): +@@ -473,16 +473,16 @@ + trace_event.trace_disable() + with open(self._log_path, 'r') as f: + log_output = json.load(f) +- self.assertEquals(len(log_output), 2) ++ self.assertEqual(len(log_output), 2) + telemetry_metadata = log_output['metadata']['telemetry'] +- self.assertEquals(len(telemetry_metadata), 7) +- self.assertEquals(telemetry_metadata['benchmarkStart'], 1) +- self.assertEquals(telemetry_metadata['traceStart'], 2) +- self.assertEquals(telemetry_metadata['benchmarks'], ['benchmark']) +- self.assertEquals(telemetry_metadata['benchmarkDescriptions'], ['desc']) +- self.assertEquals(telemetry_metadata['stories'], ['story']) +- self.assertEquals(telemetry_metadata['storyTags'], ['tag1', 'tag2']) +- self.assertEquals(telemetry_metadata['storysetRepeats'], [0]) ++ self.assertEqual(len(telemetry_metadata), 7) ++ self.assertEqual(telemetry_metadata['benchmarkStart'], 1) ++ self.assertEqual(telemetry_metadata['traceStart'], 2) ++ self.assertEqual(telemetry_metadata['benchmarks'], ['benchmark']) ++ self.assertEqual(telemetry_metadata['benchmarkDescriptions'], ['desc']) ++ self.assertEqual(telemetry_metadata['stories'], ['story']) ++ self.assertEqual(telemetry_metadata['storyTags'], ['tag1', 'tag2']) ++ self.assertEqual(telemetry_metadata['storysetRepeats'], [0]) + + def testAddMetadataProtobuf(self): + with self._test_trace(format=trace_event.PROTOBUF): +--- a/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_time_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_time_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -98,21 +98,21 @@ + + # Linux tests. + def testGetClockGetTimeClockNumber_linux(self): +- self.assertEquals(trace_time.GetClockGetTimeClockNumber('linux'), 1) ++ self.assertEqual(trace_time.GetClockGetTimeClockNumber('linux'), 1) + + def testGetClockGetTimeClockNumber_freebsd(self): +- self.assertEquals(trace_time.GetClockGetTimeClockNumber('freebsd'), 4) ++ self.assertEqual(trace_time.GetClockGetTimeClockNumber('freebsd'), 4) + + def testGetClockGetTimeClockNumber_bsd(self): +- self.assertEquals(trace_time.GetClockGetTimeClockNumber('bsd'), 3) ++ self.assertEqual(trace_time.GetClockGetTimeClockNumber('bsd'), 3) + + def testGetClockGetTimeClockNumber_sunos(self): +- self.assertEquals(trace_time.GetClockGetTimeClockNumber('sunos5'), 4) ++ self.assertEqual(trace_time.GetClockGetTimeClockNumber('sunos5'), 4) + + # Smoke Test. + def testMonotonic(self): + time_one = trace_time.Now() +- for _ in xrange(1000): ++ for _ in range(1000): + time_two = trace_time.Now() + self.assertLessEqual(time_one, time_two) + time_one = time_two +--- a/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/__init__.py 2025-01-16 02:26:08.564263337 +0800 +@@ -1,7 +1,7 @@ + # Copyright 2016 The Chromium Authors. All rights reserved. + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. +-from log import * +-from decorators import * +-from meta_class import * +-import multiprocessing_shim ++from .log import * ++from .decorators import * ++from .meta_class import * ++from . import multiprocessing_shim +--- a/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/decorators.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/decorators.py 2025-01-16 02:26:08.564263337 +0800 +@@ -6,7 +6,7 @@ + import time + import functools + +-import log ++from . import log + from py_trace_event import trace_time + + +@@ -14,7 +14,7 @@ + def trace(name, **kwargs): + category = "python" + start = trace_time.Now() +- args_to_log = {key: repr(value) for key, value in kwargs.iteritems()} ++ args_to_log = {key: repr(value) for key, value in kwargs.items()} + log.add_trace_event("B", start, category, name, args_to_log) + try: + yield +@@ -42,7 +42,7 @@ + default = None + return (name, arg_index, default) + +- args_to_log = map(arg_spec_tuple, arg_names) ++ args_to_log = list(map(arg_spec_tuple, arg_names)) + + @functools.wraps(func) + def traced_function(*args, **kwargs): +--- a/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/decorators_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/decorators_test.py 2025-01-16 02:26:08.564263337 +0800 +@@ -2,11 +2,11 @@ + # Copyright 2016 The Chromium Authors. All rights reserved. + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. +-import decorators ++from . import decorators + import logging + import unittest + +-from trace_test import TraceTest ++from .trace_test import TraceTest + #from .trace_test import TraceTest + + def generator(): +@@ -41,21 +41,21 @@ + events = res.findEventsOnThread(res.findThreadIds()[0]) + + # Sanity checks. +- self.assertEquals(2, len(events)) +- self.assertEquals(events[0]["name"], events[1]["name"]) ++ self.assertEqual(2, len(events)) ++ self.assertEqual(events[0]["name"], events[1]["name"]) + return events[1]["name"] + + + def test_func_names_work(self): + expected_method_name = __name__ + '.traced_func' +- self.assertEquals(expected_method_name, ++ self.assertEqual(expected_method_name, + self._get_decorated_method_name(traced_func)) + + def test_method_names_work(self): + ctt = ClassToTest() +- self.assertEquals('ClassToTest.method1', ++ self.assertEqual('ClassToTest.method1', + self._get_decorated_method_name(ctt.method1)) +- self.assertEquals('ClassToTest.method2', ++ self.assertEqual('ClassToTest.method2', + self._get_decorated_method_name(ctt.method2)) + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/log.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/log.py 2025-01-16 02:26:08.564263337 +0800 +@@ -8,7 +8,7 @@ + import time + import threading + import multiprocessing +-import multiprocessing_shim ++from . import multiprocessing_shim + + from py_trace_event.trace_event_impl import perfetto_trace_writer + from py_trace_event import trace_time +@@ -170,7 +170,7 @@ + log_file = open("%s.pb" % n, "ab", False) + else: + log_file = open("%s.json" % n, "ab", False) +- elif isinstance(log_file, basestring): ++ elif isinstance(log_file, str): + log_file = open("%s" % log_file, "ab", False) + elif not hasattr(log_file, 'fileno'): + raise TraceException( +--- a/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/log_io_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/log_io_test.py 2025-01-16 02:26:08.564263337 +0800 +@@ -7,8 +7,8 @@ + import sys + import unittest + +-from log import * +-from parsed_trace_events import * ++from .log import * ++from .parsed_trace_events import * + from py_utils import tempfile_ext + + +--- a/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/meta_class.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/meta_class.py 2025-01-16 02:26:08.564263337 +0800 +@@ -9,7 +9,7 @@ + + class TracedMetaClass(type): + def __new__(cls, name, bases, attrs): +- for attr_name, attr_value in attrs.iteritems(): ++ for attr_name, attr_value in attrs.items(): + if (not attr_name.startswith('_') and + isinstance(attr_value, types.FunctionType)): + attrs[attr_name] = decorators.traced(attr_value) +--- a/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/multiprocessing_shim.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/multiprocessing_shim.py 2025-01-16 02:26:08.564263337 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + import multiprocessing +-import log ++from . import log + import time + + +--- a/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/parsed_trace_events.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/parsed_trace_events.py 2025-01-16 02:26:08.564263337 +0800 +@@ -47,7 +47,7 @@ + events = events['traceEvents'] + + if not hasattr(events, '__iter__'): +- raise Exception, 'events must be iteraable.' ++ raise Exception('events must be iteraable.') + self.events = events + self.pids = None + self.tids = None +--- a/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/perfetto_trace_writer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/perfetto_trace_writer.py 2025-01-16 02:26:08.564263337 +0800 +@@ -7,7 +7,7 @@ + + import collections + +-import perfetto_proto_classes as proto ++from . import perfetto_proto_classes as proto + + CLOCK_BOOTTIME = 6 + CLOCK_TELEMETRY = 64 +@@ -128,7 +128,7 @@ + legacy_event.name_iid = _intern_event_name(name, packet, tid) + packet.track_event.legacy_event = legacy_event + +- for name, value in args.iteritems(): ++ for name, value in args.items(): + debug_annotation = proto.DebugAnnotation() + debug_annotation.name = name + if isinstance(value, int): +--- a/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/perfetto_trace_writer_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/perfetto_trace_writer_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -4,7 +4,7 @@ + # found in the LICENSE file. + + import unittest +-import StringIO ++import io + + from py_trace_event.trace_event_impl import perfetto_trace_writer + +@@ -19,7 +19,7 @@ + perfetto_trace_writer.reset_global_state() + + def testWriteThreadDescriptorEvent(self): +- result = StringIO.StringIO() ++ result = io.StringIO() + perfetto_trace_writer.write_thread_descriptor_event( + output=result, + pid=1, +@@ -33,7 +33,7 @@ + self.assertEqual(expected_output, result.getvalue()) + + def testWriteTwoEvents(self): +- result = StringIO.StringIO() ++ result = io.StringIO() + perfetto_trace_writer.write_thread_descriptor_event( + output=result, + pid=1, +@@ -59,7 +59,7 @@ + self.assertEqual(expected_output, result.getvalue()) + + def testWriteMetadata(self): +- result = StringIO.StringIO() ++ result = io.StringIO() + perfetto_trace_writer.write_metadata( + output=result, + benchmark_start_time_us=1556716807306000, +@@ -79,7 +79,7 @@ + self.assertEqual(expected_output, result.getvalue()) + + def testWriteArgs(self): +- result = StringIO.StringIO() ++ result = io.StringIO() + perfetto_trace_writer.write_thread_descriptor_event( + output=result, + pid=1, +@@ -105,7 +105,7 @@ + self.assertEqual(expected_output, result.getvalue()) + + def testWriteChromeMetadata(self): +- result = StringIO.StringIO() ++ result = io.StringIO() + perfetto_trace_writer.write_chrome_metadata( + output=result, + clock_domain='FOO', +@@ -116,7 +116,7 @@ + self.assertEqual(expected_output, result.getvalue()) + + def testWriteClockSnapshot(self): +- result = StringIO.StringIO() ++ result = io.StringIO() + perfetto_trace_writer.write_clock_snapshot( + output=result, + tid=1, +--- a/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/trace_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_trace_event/py_trace_event/trace_event_impl/trace_test.py 2025-01-16 02:26:08.564263337 +0800 +@@ -6,8 +6,8 @@ + #from .log import * + #from .parsed_trace_events import * + +-from log import * +-from parsed_trace_events import * ++from .log import * ++from .parsed_trace_events import * + from py_utils import tempfile_ext + + class TraceTest(unittest.TestCase): +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/__init__.py 2025-01-16 02:26:08.564263337 +0800 +@@ -4,7 +4,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import functools + import inspect +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/camel_case.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/camel_case.py 2025-01-16 02:26:08.564263337 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + import re + import six + +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/discover_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/discover_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -1,9 +1,9 @@ + # Copyright 2013 The Chromium Authors. All rights reserved. + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import os + import unittest +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/exc_util_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/exc_util_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -55,7 +55,7 @@ + + class ReraiseTests(unittest.TestCase): + def assertLogMatches(self, pattern): +- self.assertRegexpMatches( ++ self.assertRegex( + sys.stderr.getvalue(), pattern) # pylint: disable=no-member + + def assertLogNotMatches(self, pattern): +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/expectations_parser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/expectations_parser.py 2025-01-16 02:26:08.564263337 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + import re + import six + +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/expectations_parser_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/expectations_parser_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -3,9 +3,9 @@ + # found in the LICENSE file. + + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import unittest + +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/lock_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/lock_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import multiprocessing + import os +@@ -102,7 +102,7 @@ + + # temp_write_file should contains 10 copy of temp_file_path's content. + with open(temp_write_file, 'r') as f: +- self.assertEquals('0123456789'*10, f.read()) ++ self.assertEqual('0123456789'*10, f.read()) + finally: + os.remove(temp_write_file) + +@@ -119,7 +119,7 @@ + p.start() + p.join() + with open(temp_status_file, 'r') as f: +- self.assertEquals('LockException raised', f.read()) ++ self.assertEqual('LockException raised', f.read()) + finally: + os.remove(temp_status_file) + +@@ -137,7 +137,7 @@ + p.start() + p.join() + with open(temp_status_file, 'r') as f: +- self.assertEquals('LockException was not raised', f.read()) ++ self.assertEqual('LockException was not raised', f.read()) + finally: + os.remove(temp_status_file) + +@@ -156,7 +156,7 @@ + p.start() + p.join() + with open(temp_status_file, 'r') as f: +- self.assertEquals('LockException raised', f.read()) ++ self.assertEqual('LockException raised', f.read()) + + # Accessing self.temp_file_path here should not raise exception. + p = multiprocessing.Process( +@@ -165,6 +165,6 @@ + p.start() + p.join() + with open(temp_status_file, 'r') as f: +- self.assertEquals('LockException was not raised', f.read()) ++ self.assertEqual('LockException was not raised', f.read()) + finally: + os.remove(temp_status_file) +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/retry_util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/retry_util.py 2025-01-16 02:26:08.564263337 +0800 +@@ -1,9 +1,9 @@ + # Copyright 2018 The Chromium Authors. All rights reserved. + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + import functools + import logging + import time +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/shell_util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/shell_util.py 2025-01-16 02:26:08.564263337 +0800 +@@ -4,7 +4,7 @@ + # + # Shell scripting helpers (created for Telemetry dependency roll scripts). + +-from __future__ import print_function ++ + + import os as _os + import shutil as _shutil +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/slots_metaclass_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/slots_metaclass_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import unittest + +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/tempfile_ext_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/tempfile_ext_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -37,7 +37,7 @@ + test_dir = '/baz' + self.fs.CreateDirectory(test_dir) + with tempfile_ext.NamedTemporaryDirectory(dir=test_dir) as d: +- self.assertEquals(test_dir, os.path.dirname(d)) ++ self.assertEqual(test_dir, os.path.dirname(d)) + + + class TemporaryFilesTest(fake_filesystem_unittest.TestCase): +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/ts_proxy_server_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/ts_proxy_server_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -8,15 +8,15 @@ + + class TsProxyServerTest(unittest.TestCase): + def testParseTsProxyPort(self): +- self.assertEquals( ++ self.assertEqual( + ts_proxy_server.ParseTsProxyPortFromOutput( + 'Started Socks5 proxy server on 127.0.0.1:54430 \n'), + 54430) +- self.assertEquals( ++ self.assertEqual( + ts_proxy_server.ParseTsProxyPortFromOutput( + 'Started Socks5 proxy server on foo.bar.com:430 \n'), + 430) +- self.assertEquals( ++ self.assertEqual( + ts_proxy_server.ParseTsProxyPortFromOutput( + 'Failed to start sock5 proxy.'), + None) +@@ -44,13 +44,13 @@ + server.UpdateTrafficSettings(download_bandwidth_kbps=5000) + server.UpdateTrafficSettings(upload_bandwidth_kbps=2000) + +- self.assertEquals(server._rtt, 100) +- self.assertEquals(server._inbkps, 5000) +- self.assertEquals(server._outkbps, 2000) ++ self.assertEqual(server._rtt, 100) ++ self.assertEqual(server._inbkps, 5000) ++ self.assertEqual(server._outkbps, 2000) + + server.UpdateTrafficSettings( + round_trip_latency_ms=200, download_bandwidth_kbps=500, + upload_bandwidth_kbps=2000) +- self.assertEquals(server._rtt, 200) +- self.assertEquals(server._inbkps, 500) +- self.assertEquals(server._outkbps, 2000) ++ self.assertEqual(server._rtt, 200) ++ self.assertEqual(server._inbkps, 500) ++ self.assertEqual(server._outkbps, 2000) +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/webpagereplay_go_server.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/webpagereplay_go_server.py 2025-01-16 02:26:08.564263337 +0800 +@@ -11,7 +11,7 @@ + import subprocess + import sys + import tempfile +-import urllib ++import urllib.request, urllib.parse, urllib.error + + import py_utils + from py_utils import atexit_with_log +@@ -136,7 +136,7 @@ + cur_cwd = os.getcwd() + os.chdir(go_folder) + try: +- print subprocess.check_output(['go', 'build', os.path.join(go_folder, 'wpr.go')]) ++ print(subprocess.check_output(['go', 'build', os.path.join(go_folder, 'wpr.go')])) + except subprocess.CalledProcessError: + exit(1) + os.chdir(cur_cwd) +@@ -392,7 +392,7 @@ + if logging.getLogger('').isEnabledFor(log_level): + logging.log(log_level, output) + else: +- print output ++ print(output) + + os.remove(self._temp_log_file_path) + self._temp_log_file_path = None +@@ -420,7 +420,7 @@ + """ + url = '%s://%s:%s/%s' % ( + protocol, self._replay_host, self._started_ports[protocol], url_path) +- return urllib.urlopen(url, proxies={}) ++ return urllib.request.urlopen(url, proxies={}) + + def _ResetInterruptHandler(): + """Reset the interrupt handler back to the default. +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/refactor/offset_token.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/refactor/offset_token.py 2025-01-16 02:26:08.564263337 +0800 +@@ -3,9 +3,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + import collections + import itertools + import token +@@ -17,7 +17,7 @@ + """s -> (None, s0), (s0, s1), (s1, s2), (s2, s3), ...""" + a, b = itertools.tee(iterable) + a = itertools.chain((None,), a) +- return zip(a, b) ++ return list(zip(a, b)) + + + class OffsetToken(object): +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/refactor/snippet.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/refactor/snippet.py 2025-01-16 02:26:08.564263337 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import parser + import symbol +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/refactor/annotated_symbol/base_symbol.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/refactor/annotated_symbol/base_symbol.py 2025-01-16 02:26:08.564263337 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + from six.moves import range # pylint: disable=redefined-builtin + +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/refactor/annotated_symbol/import_statement.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/refactor/annotated_symbol/import_statement.py 2025-01-16 02:26:08.564263337 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import keyword + import symbol +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/refactor/annotated_symbol/reference.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/refactor/annotated_symbol/reference.py 2025-01-16 02:26:08.564263337 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import symbol + import token +--- a/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/refactor_util/move.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_utils/py_utils/refactor_util/move.py 2025-01-16 02:26:08.564263337 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import functools + import os +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/fake_fs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/fake_fs.py 2025-01-16 02:26:08.564263337 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import codecs + import collections +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/fake_fs_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/fake_fs_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -15,7 +15,7 @@ + fs.AddFile('/blah/x', 'foobar') + with fs: + assert os.path.exists(os.path.normpath('/blah/x')) +- self.assertEquals( ++ self.assertEqual( + 'foobar', + open(os.path.normpath('/blah/x'), 'r').read()) + +@@ -24,7 +24,7 @@ + fs.AddFile('/blah/x', 'foobar') + with fs: + with open(os.path.normpath('/blah/x'), 'r') as f: +- self.assertEquals('foobar', f.read()) ++ self.assertEqual('foobar', f.read()) + + def testWalk(self): + fs = fake_fs.FakeFS() +@@ -35,18 +35,18 @@ + with fs: + gen = os.walk(os.path.normpath('/')) + r = next(gen) +- self.assertEquals((os.path.normpath('/'), ['x'], ['a.txt']), r) ++ self.assertEqual((os.path.normpath('/'), ['x'], ['a.txt']), r) + + r = next(gen) +- self.assertEquals((os.path.normpath('/x'), ['w', 'w2'], ['y.txt']), r) ++ self.assertEqual((os.path.normpath('/x'), ['w', 'w2'], ['y.txt']), r) + + r = next(gen) +- self.assertEquals((os.path.normpath('/x/w'), [], ['z.txt']), r) ++ self.assertEqual((os.path.normpath('/x/w'), [], ['z.txt']), r) + + r = next(gen) +- self.assertEquals((os.path.normpath('/x/w2'), ['w3'], []), r) ++ self.assertEqual((os.path.normpath('/x/w2'), ['w3'], []), r) + + r = next(gen) +- self.assertEquals((os.path.normpath('/x/w2/w3'), [], ['z3.txt']), r) ++ self.assertEqual((os.path.normpath('/x/w2/w3'), [], ['z3.txt']), r) + +- self.assertRaises(StopIteration, gen.next) ++ self.assertRaises(StopIteration, gen.__next__) +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/generate.py 2025-01-15 07:12:14.981258977 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/generate.py 2025-01-16 02:36:17.788059915 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import os + import subprocess +@@ -83,7 +83,7 @@ + + with tempfile.NamedTemporaryFile() as _: + args = [ +- 'python2', ++ 'python3', + rjsmin_path + ] + p = subprocess.Popen(args, +@@ -203,12 +203,13 @@ + os.path.join(py_vulcanize_path, 'third_party', 'rcssmin', 'rcssmin.py')) + + with tempfile.NamedTemporaryFile() as _: +- rcssmin_args = ['python2', rcssmin_path] ++ rcssmin_args = ['python3', rcssmin_path] + p = subprocess.Popen(rcssmin_args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) +- res = p.communicate(input=css_text) ++ #res = p.communicate(input=css_text) ++ res = p.communicate(input=css_text.encode('utf-8')) + errorcode = p.wait() + if errorcode != 0: + sys.stderr.write('rCSSmin exited with error code %d' % errorcode) +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/html_module_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/html_module_unittest.py 2025-01-16 02:26:08.564263337 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import os + import unittest +@@ -91,8 +91,8 @@ + '/tmp/a/b/', + is_component=False, + parser_results=parse_results) +- self.assertEquals([], metadata.dependent_module_names) +- self.assertEquals( ++ self.assertEqual([], metadata.dependent_module_names) ++ self.assertEqual( + ['a/foo.js'], metadata.dependent_raw_script_relative_paths) + + def testExternalScriptReferenceToModuleOutsideScriptPath(self): +@@ -141,7 +141,7 @@ + '/tmp/a/b/', + is_component=False, + parser_results=parse_results) +- self.assertEquals(['a.foo'], metadata.dependent_module_names) ++ self.assertEqual(['a.foo'], metadata.dependent_module_names) + + def testStyleSheetImport(self): + parse_results = parse_html_deps.HTMLModuleParserResults(""" +@@ -157,8 +157,8 @@ + '/tmp/a/b/', + is_component=False, + parser_results=parse_results) +- self.assertEquals([], metadata.dependent_module_names) +- self.assertEquals(['a.foo'], metadata.style_sheet_names) ++ self.assertEqual([], metadata.dependent_module_names) ++ self.assertEqual(['a.foo'], metadata.style_sheet_names) + + def testUsingAbsoluteHref(self): + parse_results = parse_html_deps.HTMLModuleParserResults(""" +@@ -175,7 +175,7 @@ + "/tmp/a/b/", + is_component=False, + parser_results=parse_results) +- self.assertEquals(['foo.js'], metadata.dependent_raw_script_relative_paths) ++ self.assertEqual(['foo.js'], metadata.dependent_raw_script_relative_paths) + + + class HTMLModuleTests(unittest.TestCase): +@@ -231,7 +231,7 @@ + + # Check load sequence names. + load_sequence_names = [x.name for x in load_sequence] +- self.assertEquals(['py_vulcanize', ++ self.assertEqual(['py_vulcanize', + 'widget', + 'a.b.start'], load_sequence_names) + +@@ -288,7 +288,7 @@ + is: "my-component" + }); + """.rstrip() +- self.assertEquals(expected_js, js) ++ self.assertEqual(expected_js, js) + + def testInlineStylesheetURLs(self): + file_contents = {} +@@ -309,7 +309,7 @@ + + computed_deps = [] + my_component.AppendDirectlyDependentFilenamesTo(computed_deps) +- self.assertEquals(set(computed_deps), ++ self.assertEqual(set(computed_deps), + set([os.path.normpath('/tmp/a/b/my_component.html'), + os.path.normpath('/tmp/a/something.jpg')])) + +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/module.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/module.py 2025-01-16 02:26:08.569679911 +0800 +@@ -11,9 +11,9 @@ + Other resources include HTML templates, raw JavaScript files, and stylesheets. + """ + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import codecs + import inspect +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/module_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/module_unittest.py 2025-01-16 02:26:08.569679911 +0800 +@@ -39,7 +39,7 @@ + loader = resource_loader.ResourceLoader(project) + x_module = loader.LoadModule('x') + +- self.assertEquals([loader.loaded_modules['y'], ++ self.assertEqual([loader.loaded_modules['y'], + loader.loaded_modules['z']], + x_module.dependent_modules) + +@@ -47,7 +47,7 @@ + load_sequence = [] + x_module.ComputeLoadSequenceRecursive(load_sequence, already_loaded_set) + +- self.assertEquals([loader.loaded_modules['z'], ++ self.assertEqual([loader.loaded_modules['z'], + loader.loaded_modules['y'], + x_module], + load_sequence) +@@ -68,7 +68,7 @@ + with fs: + my_module = loader.LoadModule(module_name='src.my_module') + dep_names = [x.name for x in my_module.dependent_modules] +- self.assertEquals(['py_vulcanize.foo'], dep_names) ++ self.assertEqual(['py_vulcanize.foo'], dep_names) + + def testDepsExceptionContext(self): + fs = fake_fs.FakeFS() +@@ -89,7 +89,7 @@ + assert False, 'Expected an exception' + except module.DepsException as e: + exc = e +- self.assertEquals( ++ self.assertEqual( + ['src.my_module', 'py_vulcanize.foo'], + exc.context) + +@@ -116,10 +116,10 @@ + loader = resource_loader.ResourceLoader(project) + with fs: + my_module = loader.LoadModule(module_name='z.foo') +- self.assertEquals(1, len(my_module.dependent_raw_scripts)) ++ self.assertEqual(1, len(my_module.dependent_raw_scripts)) + + dependent_filenames = my_module.GetAllDependentFilenamesRecursive() +- self.assertEquals( ++ self.assertEqual( + [ + os.path.normpath('/x/y/z/foo.html'), + os.path.normpath('/x/raw/bar.js'), +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/parse_html_deps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/parse_html_deps.py 2025-01-16 02:26:08.569679911 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import os + import sys +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/parse_html_deps_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/parse_html_deps_unittest.py 2025-01-16 02:26:08.569679911 +0800 +@@ -15,18 +15,18 @@ + def test_parse_empty(self): + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse('') +- self.assertEquals([], module.scripts_external) +- self.assertEquals([], module.inline_scripts) +- self.assertEquals([], module.stylesheets) +- self.assertEquals([], module.imports) ++ self.assertEqual([], module.scripts_external) ++ self.assertEqual([], module.inline_scripts) ++ self.assertEqual([], module.stylesheets) ++ self.assertEqual([], module.imports) + + def test_parse_none(self): + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse(None) +- self.assertEquals([], module.scripts_external) +- self.assertEquals([], module.inline_scripts) +- self.assertEquals([], module.stylesheets) +- self.assertEquals([], module.imports) ++ self.assertEqual([], module.scripts_external) ++ self.assertEqual([], module.inline_scripts) ++ self.assertEqual([], module.stylesheets) ++ self.assertEqual([], module.imports) + + def test_parse_script_src_basic(self): + html = """ +@@ -40,10 +40,10 @@ + """ + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse(html) +- self.assertEquals(['polymer.min.js', 'foo.js'], module.scripts_external) +- self.assertEquals([], module.inline_scripts) +- self.assertEquals([], module.stylesheets) +- self.assertEquals([], module.imports) ++ self.assertEqual(['polymer.min.js', 'foo.js'], module.scripts_external) ++ self.assertEqual([], module.inline_scripts) ++ self.assertEqual([], module.stylesheets) ++ self.assertEqual([], module.imports) + self.assertNotIn( + 'DOCTYPE html', + module.html_contents_without_links_and_script) +@@ -59,10 +59,10 @@ + """ + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse(html) +- self.assertEquals([], module.scripts_external) +- self.assertEquals([], module.inline_scripts) +- self.assertEquals([], module.stylesheets) +- self.assertEquals(['x-foo.html'], module.imports) ++ self.assertEqual([], module.scripts_external) ++ self.assertEqual([], module.inline_scripts) ++ self.assertEqual([], module.stylesheets) ++ self.assertEqual(['x-foo.html'], module.imports) + + def test_parse_script_inline(self): + html = """ +@@ -76,18 +76,18 @@ + + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse(html) +- self.assertEquals([], module.scripts_external) +- self.assertEquals(1, len(module.inline_scripts)) +- self.assertEquals([], module.stylesheets) +- self.assertEquals([], module.imports) ++ self.assertEqual([], module.scripts_external) ++ self.assertEqual(1, len(module.inline_scripts)) ++ self.assertEqual([], module.stylesheets) ++ self.assertEqual([], module.imports) + + script0 = module.inline_scripts[0] + val = re.sub(r'\s+', '', script0.contents) + inner_script = """py_vulcanize.require("foo");py_vulcanize.require('bar');""" +- self.assertEquals(inner_script, val) ++ self.assertEqual(inner_script, val) + +- self.assertEquals(3, len(script0.open_tags)) +- self.assertEquals('polymer-element', script0.open_tags[2].tag) ++ self.assertEqual(3, len(script0.open_tags)) ++ self.assertEqual('polymer-element', script0.open_tags[2].tag) + + self.assertNotIn( + 'py_vulcanize.require("foo");', +@@ -104,19 +104,19 @@ + + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse(html) +- self.assertEquals(3, len(module.scripts)) +- self.assertEquals('window = {}', module.scripts[0].contents) +- self.assertEquals("foo.js",module.scripts[1].src) ++ self.assertEqual(3, len(module.scripts)) ++ self.assertEqual('window = {}', module.scripts[0].contents) ++ self.assertEqual("foo.js",module.scripts[1].src) + self.assertTrue(module.scripts[1].is_external) +- self.assertEquals('window = undefined', module.scripts[2].contents) +- self.assertEquals([], module.imports) ++ self.assertEqual('window = undefined', module.scripts[2].contents) ++ self.assertEqual([], module.imports) + + def test_parse_script_src_sripping(self): + html = """ + + """ + module = parse_html_deps.HTMLModuleParser().Parse(html) +- self.assertEquals('', ++ self.assertEqual('', + module.html_contents_without_links_and_script) + + def test_parse_link_rel_stylesheet(self): +@@ -127,10 +127,10 @@ + """ + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse(html) +- self.assertEquals([], module.scripts_external) +- self.assertEquals([], module.inline_scripts) +- self.assertEquals(['frameworkstyles.css'], module.stylesheets) +- self.assertEquals([], module.imports) ++ self.assertEqual([], module.scripts_external) ++ self.assertEqual([], module.inline_scripts) ++ self.assertEqual(['frameworkstyles.css'], module.stylesheets) ++ self.assertEqual([], module.imports) + + class Ctl(html_generation_controller.HTMLGenerationController): + +@@ -145,14 +145,14 @@ + + + """ +- self.assertEquals(ghtm, gen_html) ++ self.assertEqual(ghtm, gen_html) + + def test_parse_inline_style(self): + html = """""" + module = parse_html_deps.HTMLModuleParser().Parse(html) +- self.assertEquals(html, module.html_contents_without_links_and_script) ++ self.assertEqual(html, module.html_contents_without_links_and_script) + + class Ctl(html_generation_controller.HTMLGenerationController): + +@@ -165,7 +165,7 @@ + ghtm = """""" +- self.assertEquals(ghtm, gen_html) ++ self.assertEqual(ghtm, gen_html) + + def test_parse_style_import(self): + html = """ +@@ -187,40 +187,40 @@ + parser = parse_html_deps.HTMLModuleParser() + res = parser.Parse(orig_html) + html = res.html_contents_without_links_and_script +- self.assertEquals(html, orig_html) ++ self.assertEqual(html, orig_html) + + def test_html_contents_basic(self): + html = """
d""" + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse(html) +- self.assertEquals(html, module.html_contents_without_links_and_script) ++ self.assertEqual(html, module.html_contents_without_links_and_script) + + def test_html_contents_with_entity(self): + html = """""" + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse(html) +- self.assertEquals(u'\u2192', ++ self.assertEqual('\u2192', + module.html_contents_without_links_and_script) + + def test_html_content_with_charref(self): + html = """>""" + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse(html) +- self.assertEquals('>', ++ self.assertEqual('>', + module.html_contents_without_links_and_script) + + def test_html_content_start_end_br(self): + html = """
""" + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse(html) +- self.assertEquals('
', ++ self.assertEqual('
', + module.html_contents_without_links_and_script) + + def test_html_content_start_end_img(self): + html = """""" + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse(html) +- self.assertEquals('', ++ self.assertEqual('', + module.html_contents_without_links_and_script) + + def test_html_contents_with_link_stripping(self): +@@ -228,7 +228,7 @@ + """ + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse(html) +- self.assertEquals("""d""", ++ self.assertEqual("""d""", + module.html_contents_without_links_and_script.strip()) + + def test_html_contents_with_style_link_stripping(self): +@@ -236,7 +236,7 @@ + """ + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse(html) +- self.assertEquals("""d""", ++ self.assertEqual("""d""", + module.html_contents_without_links_and_script.strip()) + + def test_br_does_not_raise(self): +@@ -288,5 +288,5 @@ + html = """""" + parser = parse_html_deps.HTMLModuleParser() + module = parser.Parse(html) +- self.assertEquals(1, len(module.inline_scripts)) +- self.assertEquals('', module.inline_scripts[0].contents) ++ self.assertEqual(1, len(module.inline_scripts)) ++ self.assertEqual('', module.inline_scripts[0].contents) +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/project.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/project.py 2025-01-16 02:26:08.569679911 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + import collections + import os + +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/resource_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/resource_unittest.py 2025-01-16 02:26:08.569679911 +0800 +@@ -12,6 +12,6 @@ + + def testBasic(self): + r = resource.Resource('/a', '/a/b/c.js') +- self.assertEquals('b.c', r.name) +- self.assertEquals(os.path.join('b', 'c.js'), r.relative_path) +- self.assertEquals('b/c.js', r.unix_style_relative_path) ++ self.assertEqual('b.c', r.name) ++ self.assertEqual(os.path.join('b', 'c.js'), r.relative_path) ++ self.assertEqual('b/c.js', r.unix_style_relative_path) +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/strip_js_comments_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/strip_js_comments_unittest.py 2025-01-16 02:26:08.569679911 +0800 +@@ -16,38 +16,38 @@ + """Test case for _strip_js_comments and _TokenizeJS.""" + + def test_strip_comments(self): +- self.assertEquals( ++ self.assertEqual( + 'A ', strip_js_comments.StripJSComments('A // foo')) +- self.assertEquals( ++ self.assertEqual( + 'A bar', strip_js_comments.StripJSComments('A // foo\nbar')) +- self.assertEquals( ++ self.assertEqual( + 'A b', strip_js_comments.StripJSComments('A /* foo */ b')) +- self.assertEquals( ++ self.assertEqual( + 'A b', strip_js_comments.StripJSComments('A /* foo\n */ b')) + + def test_tokenize_empty(self): + tokens = list(strip_js_comments._TokenizeJS('')) +- self.assertEquals([], tokens) ++ self.assertEqual([], tokens) + + def test_tokenize_nl(self): + tokens = list(strip_js_comments._TokenizeJS('\n')) +- self.assertEquals(['\n'], tokens) ++ self.assertEqual(['\n'], tokens) + + def test_tokenize_slashslash_comment(self): + tokens = list(strip_js_comments._TokenizeJS('A // foo')) +- self.assertEquals(['A ', '//', ' foo'], tokens) ++ self.assertEqual(['A ', '//', ' foo'], tokens) + + def test_tokenize_slashslash_comment_then_newline(self): + tokens = list(strip_js_comments._TokenizeJS('A // foo\nbar')) +- self.assertEquals(['A ', '//', ' foo', '\n', 'bar'], tokens) ++ self.assertEqual(['A ', '//', ' foo', '\n', 'bar'], tokens) + + def test_tokenize_cstyle_comment_one_line(self): + tokens = list(strip_js_comments._TokenizeJS('A /* foo */')) +- self.assertEquals(['A ', '/*', ' foo ', '*/'], tokens) ++ self.assertEqual(['A ', '/*', ' foo ', '*/'], tokens) + + def test_tokenize_cstyle_comment_multi_line(self): + tokens = list(strip_js_comments._TokenizeJS('A /* foo\n*bar\n*/')) +- self.assertEquals(['A ', '/*', ' foo', '\n', '*bar', '\n', '*/'], tokens) ++ self.assertEqual(['A ', '/*', ' foo', '\n', '*bar', '\n', '*/'], tokens) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/style_sheet_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/style_sheet_unittest.py 2025-01-16 02:26:08.569679911 +0800 +@@ -27,14 +27,14 @@ + loader = resource_loader.ResourceLoader(project) + + foo_x = loader.LoadStyleSheet('foo.x') +- self.assertEquals(1, len(foo_x.images)) ++ self.assertEqual(1, len(foo_x.images)) + + r0 = foo_x.images[0] +- self.assertEquals(os.path.normpath('/src/images/bar.jpeg'), ++ self.assertEqual(os.path.normpath('/src/images/bar.jpeg'), + r0.absolute_path) + + inlined = foo_x.contents_with_inlined_images +- self.assertEquals(""" ++ self.assertEqual(""" + .x .y { + background-image: url(data:image/jpeg;base64,%s); + } +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/resource_loader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/py_vulcanize/resource_loader.py 2025-01-16 02:34:58.256497019 +0800 +@@ -59,7 +59,8 @@ + return None + + # Sort by length. Longest match wins. +- candidate_paths.sort(lambda x, y: len(x) - len(y)) ++ #candidate_paths.sort(lambda x, y: len(x) - len(y)) ++ candidate_paths.sort(key=lambda x: len(x)) + longest_candidate = candidate_paths[-1] + return resource_module.Resource(longest_candidate, absolute_path, binary) + +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/__init__.py 2025-01-16 02:26:08.569679911 +0800 +@@ -21,7 +21,7 @@ + + This package provides tools for main package setup. + """ +-__author__ = u"Andr\xe9 Malo" ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + + from _setup.setup import run # pylint: disable = W0611 +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/commands.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/commands.py 2025-01-16 02:26:08.569679911 +0800 +@@ -21,7 +21,7 @@ + + Command extenders. + """ +-__author__ = u"Andr\xe9 Malo" ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + __test__ = False + +@@ -65,17 +65,17 @@ + attr_name = _fancy_getopt.translate_longopt(long_name) + else: + attr_name = _fancy_getopt.translate_longopt(long_name[:-1]) +- if not _option_defaults.has_key(command): ++ if command not in _option_defaults: + _option_defaults[command] = [] + if inherit is not None: +- if isinstance(inherit, (str, unicode)): ++ if isinstance(inherit, str): + inherit = [inherit] + for i_inherit in inherit: + add_option( + i_inherit, long_name, help_text, short_name, default + ) + default = None +- if not _option_inherits.has_key(command): ++ if command not in _option_inherits: + _option_inherits[command] = [] + for i_inherit in inherit: + for i_command, opt_name in _option_inherits[command]: +@@ -88,9 +88,9 @@ + + def add_finalizer(command, key, func): + """ Add finalizer """ +- if not _option_finalizers.has_key(command): ++ if command not in _option_finalizers: + _option_finalizers[command] = {} +- if not _option_finalizers[command].has_key(key): ++ if key not in _option_finalizers[command]: + _option_finalizers[command][key] = func + + +@@ -108,18 +108,18 @@ + """ Prepare for new options """ + _install.install.initialize_options(self) + self.single_version_externally_managed = None +- if _option_defaults.has_key('install'): ++ if 'install' in _option_defaults: + for opt_name, default in _option_defaults['install']: + setattr(self, opt_name, default) + + def finalize_options(self): + """ Finalize options """ + _install.install.finalize_options(self) +- if _option_inherits.has_key('install'): ++ if 'install' in _option_inherits: + for parent, opt_name in _option_inherits['install']: + self.set_undefined_options(parent, (opt_name, opt_name)) +- if _option_finalizers.has_key('install'): +- for func in _option_finalizers['install'].values(): ++ if 'install' in _option_finalizers: ++ for func in list(_option_finalizers['install'].values()): + func(self) + + +@@ -131,18 +131,18 @@ + def initialize_options(self): + """ Prepare for new options """ + _install_data.install_data.initialize_options(self) +- if _option_defaults.has_key('install_data'): ++ if 'install_data' in _option_defaults: + for opt_name, default in _option_defaults['install_data']: + setattr(self, opt_name, default) + + def finalize_options(self): + """ Finalize options """ + _install_data.install_data.finalize_options(self) +- if _option_inherits.has_key('install_data'): ++ if 'install_data' in _option_inherits: + for parent, opt_name in _option_inherits['install_data']: + self.set_undefined_options(parent, (opt_name, opt_name)) +- if _option_finalizers.has_key('install_data'): +- for func in _option_finalizers['install_data'].values(): ++ if 'install_data' in _option_finalizers: ++ for func in list(_option_finalizers['install_data'].values()): + func(self) + + +@@ -154,18 +154,18 @@ + def initialize_options(self): + """ Prepare for new options """ + _install_lib.install_lib.initialize_options(self) +- if _option_defaults.has_key('install_lib'): ++ if 'install_lib' in _option_defaults: + for opt_name, default in _option_defaults['install_lib']: + setattr(self, opt_name, default) + + def finalize_options(self): + """ Finalize options """ + _install_lib.install_lib.finalize_options(self) +- if _option_inherits.has_key('install_lib'): ++ if 'install_lib' in _option_inherits: + for parent, opt_name in _option_inherits['install_lib']: + self.set_undefined_options(parent, (opt_name, opt_name)) +- if _option_finalizers.has_key('install_lib'): +- for func in _option_finalizers['install_lib'].values(): ++ if 'install_lib' in _option_finalizers: ++ for func in list(_option_finalizers['install_lib'].values()): + func(self) + + +@@ -182,18 +182,18 @@ + def initialize_options(self): + """ Prepare for new options """ + _build_ext.build_ext.initialize_options(self) +- if _option_defaults.has_key('build_ext'): ++ if 'build_ext' in _option_defaults: + for opt_name, default in _option_defaults['build_ext']: + setattr(self, opt_name, default) + + def finalize_options(self): + """ Finalize options """ + _build_ext.build_ext.finalize_options(self) +- if _option_inherits.has_key('build_ext'): ++ if 'build_ext' in _option_inherits: + for parent, opt_name in _option_inherits['build_ext']: + self.set_undefined_options(parent, (opt_name, opt_name)) +- if _option_finalizers.has_key('build_ext'): +- for func in _option_finalizers['build_ext'].values(): ++ if 'build_ext' in _option_finalizers: ++ for func in list(_option_finalizers['build_ext'].values()): + func(self) + + def build_extension(self, ext): +@@ -252,16 +252,16 @@ + def initialize_options(self): + """ Prepare for new options """ + _build.build.initialize_options(self) +- if _option_defaults.has_key('build'): ++ if 'build' in _option_defaults: + for opt_name, default in _option_defaults['build']: + setattr(self, opt_name, default) + + def finalize_options(self): + """ Finalize options """ + _build.build.finalize_options(self) +- if _option_inherits.has_key('build'): ++ if 'build' in _option_inherits: + for parent, opt_name in _option_inherits['build']: + self.set_undefined_options(parent, (opt_name, opt_name)) +- if _option_finalizers.has_key('build'): +- for func in _option_finalizers['build'].values(): ++ if 'build' in _option_finalizers: ++ for func in list(_option_finalizers['build'].values()): + func(self) +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/data.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/data.py 2025-01-16 02:26:08.569679911 +0800 +@@ -21,7 +21,7 @@ + + This module provides tools to simplify data distribution. + """ +-__author__ = u"Andr\xe9 Malo" ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + + from distutils import filelist as _filelist +@@ -121,7 +121,7 @@ + if len(name) > 1: + target = telems + name[:-1] + tmap.setdefault(_posixpath.join(*target), []).append(fname) +- return tmap.items() ++ return list(tmap.items()) + + + class Documentation(Data): +@@ -155,7 +155,7 @@ + mpmap.setdefault(ext, []).append(manpage) + return [cls(manpages, prefix=_posixpath.join( + 'share', 'man', 'man%s' % section, +- )) for section, manpages in mpmap.items()] ++ )) for section, manpages in list(mpmap.items())] + dispatch = classmethod(dispatch) + + def flatten(self, installer): +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/dist.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/dist.py 2025-01-16 02:26:08.571846541 +0800 +@@ -21,7 +21,7 @@ + + dist utilities. + """ +-__author__ = u"Andr\xe9 Malo" ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + + import sys as _sys +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/ext.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/ext.py 2025-01-16 02:26:08.571846541 +0800 +@@ -21,7 +21,7 @@ + + C extension tools. + """ +-__author__ = u"Andr\xe9 Malo" ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + __test__ = False + +@@ -60,7 +60,7 @@ + + def __init__(self, *args, **kwargs): + """ Initialization """ +- if kwargs.has_key('depends'): ++ if 'depends' in kwargs: + self.depends = kwargs['depends'] or [] + else: + self.depends = [] +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/setup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/setup.py 2025-01-16 02:26:08.571846541 +0800 +@@ -21,10 +21,10 @@ + + This module provides a wrapper around the distutils core setup. + """ +-__author__ = u"Andr\xe9 Malo" ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + +-import ConfigParser as _config_parser ++import configparser as _config_parser + from distutils import core as _core + import os as _os + import posixpath as _posixpath +@@ -52,16 +52,16 @@ + else: + raise AssertionError("impl not in ('python', 'pypy', 'jython')") + +- pyversion = map(int, version_info[:3]) ++ pyversion = list(map(int, version_info[:3])) + if version_min: + min_required = \ +- map(int, '.'.join((version_min, '0.0.0')).split('.')[:3]) ++ list(map(int, '.'.join((version_min, '0.0.0')).split('.')[:3])) + if pyversion < min_required: + raise EnvironmentError("Need at least %s %s (vs. %s)" % ( + impl, version_min, '.'.join(map(str, pyversion)) + )) + if version_max: +- max_required = map(int, version_max.split('.')) ++ max_required = list(map(int, version_max.split('.'))) + max_required[-1] += 1 + if pyversion >= max_required: + raise EnvironmentError("Need at max %s %s (vs. %s)" % ( +@@ -188,7 +188,7 @@ + packages[ + _os.path.normpath(dirpath).replace(sep, '.') + ] = None +- packages = packages.keys() ++ packages = list(packages.keys()) + packages.sort() + return packages + +@@ -311,9 +311,9 @@ + cmd.ensure_finalized() + #from pprint import pprint; pprint(("install_data", cmd.get_inputs())) + try: +- strings = basestring ++ strings = str + except NameError: +- strings = (str, unicode) ++ strings = (str, str) + + for item in cmd.get_inputs(): + if isinstance(item, strings): +@@ -327,7 +327,7 @@ + for filename in _shell.files(item): + result.append(filename) + +- result = dict([(item, None) for item in result]).keys() ++ result = list(dict([(item, None) for item in result]).keys()) + result.sort() + return result + +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/shell.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/shell.py 2025-01-16 02:26:08.571846541 +0800 +@@ -21,9 +21,9 @@ + + Shell utilities. + """ +-from __future__ import generators + +-__author__ = u"Andr\xe9 Malo" ++ ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + + import errno as _errno +@@ -49,7 +49,7 @@ + ExitError.__init__(self, code) + import signal as _signal + self.signal = signal +- for key, val in vars(_signal).iteritems(): ++ for key, val in vars(_signal).items(): + if key.startswith('SIG') and not key.startswith('SIG_'): + if val == signal: + self.signalstr = key[3:] +@@ -78,7 +78,7 @@ + """ Remove a file """ + try: + _os.unlink(native(dest)) +- except OSError, e: ++ except OSError as e: + if _errno.ENOENT != e.errno: + raise + +@@ -87,7 +87,7 @@ + dest = native(dest) + if _os.path.exists(dest): + for path in files(dest, '*'): +- _os.chmod(native(path), 0644) ++ _os.chmod(native(path), 0o644) + _shutil.rmtree(dest) + + +@@ -136,15 +136,15 @@ + j = _tempfile._counter.get_next() # pylint: disable = E1101, W0212 + fname = _os.path.join(dir, prefix + str(j) + suffix) + try: +- fd = _os.open(fname, flags, 0600) +- except OSError, e: ++ fd = _os.open(fname, flags, 0o600) ++ except OSError as e: + if e.errno == _errno.EEXIST: + count -= 1 + continue + raise + _set_cloexec(fd) + return fd, _os.path.abspath(fname) +- raise IOError, (_errno.EEXIST, "No usable temporary file name found") ++ raise IOError(_errno.EEXIST, "No usable temporary file name found") + + + def _pipespawn(argv, env): +@@ -234,7 +234,7 @@ + res = proc.wait() + if res != 0: + if res == 2: +- signal, code = map(int, result.splitlines()[-1].split()) ++ signal, code = list(map(int, result.splitlines()[-1].split())) + raise SignalError(code, signal) + elif res == 3: + code = int(result.splitlines()[-1].strip()) +@@ -346,7 +346,7 @@ + + echo = kwargs.get('echo') + if echo: +- print ' '.join(argv) ++ print(' '.join(argv)) + filepipe = kwargs.get('filepipe') + if filepipe: + return _filepipespawn( +@@ -376,7 +376,7 @@ + + try: + names = listdir(top) +- except error, err: ++ except error as err: + if onerror is not None: + onerror(err) + return +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/util.py 2025-01-16 02:26:08.571846541 +0800 +@@ -21,7 +21,7 @@ + + Setup utilities. + """ +-__author__ = u"Andr\xe9 Malo" ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + + try: +@@ -29,16 +29,16 @@ + except ImportError: + class log(object): + def info(self, value): +- print value ++ print(value) + def debug(self, value): + pass + log = log() + + from distutils import util as _util + try: +- from ConfigParser import SafeConfigParser ++ from configparser import SafeConfigParser + except ImportError: +- import ConfigParser as _config_parser ++ import configparser as _config_parser + class SafeConfigParser(_config_parser.ConfigParser): + """ Safe config parser """ + def _interpolate(self, section, option, rawval, vars): +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/term/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/term/__init__.py 2025-01-16 02:26:08.571846541 +0800 +@@ -21,7 +21,7 @@ + + Terminal tools, not distributed. + """ +-__author__ = u"Andr\xe9 Malo" ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + + # pylint: disable = W0611 +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/term/_term.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py2/term/_term.py 2025-01-16 02:26:08.571846541 +0800 +@@ -19,7 +19,7 @@ + Terminal writer + ================= + """ +-__author__ = u"Andr\xe9 Malo" ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + + import sys as _sys +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py3/shell.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/_setup/py3/shell.py 2025-01-16 02:26:08.571846541 +0800 +@@ -48,7 +48,7 @@ + ExitError.__init__(self, code) + import signal as _signal + self.signal = signal +- for key, val in vars(_signal).items(): ++ for key, val in list(vars(_signal).items()): + if key.startswith('SIG') and not key.startswith('SIG_'): + if val == signal: + self.signalstr = key[3:] +@@ -255,7 +255,7 @@ + + echo = kwargs.get('echo') + if echo: +- print(' '.join(argv)) ++ print((' '.join(argv))) + filepipe = kwargs.get('filepipe') + if filepipe: + return _filepipespawn( +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/bench/cssmin.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/bench/cssmin.py 2025-01-16 02:26:08.571846541 +0800 +@@ -25,7 +25,7 @@ + """ + + try: +- from StringIO import StringIO # The pure-Python StringIO supports unicode. ++ from io import StringIO # The pure-Python StringIO supports unicode. + except ImportError: + from io import StringIO + import re +@@ -129,7 +129,7 @@ + regex = re.compile(r"rgb\s*\(\s*([0-9,\s]+)\s*\)") + match = regex.search(css) + while match: +- colors = map(lambda s: s.strip(), match.group(1).split(",")) ++ colors = [s.strip() for s in match.group(1).split(",")] + hexcolor = '#%.2x%.2x%.2x' % tuple(map(int, colors)) + css = css.replace(match.group(), hexcolor) + match = regex.search(css) +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/bench/main.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/bench/main.py 2025-01-16 02:26:08.571846541 +0800 +@@ -64,9 +64,9 @@ + p_03__rcssmin = _p_03__rcssmin() + except ImportError: + import_notes.append("_rcssmin (C-Port) not available") +- print(import_notes[-1]) ++ print((import_notes[-1])) + +-print("Python Release: %s" % ".".join(map(str, _sys.version_info[:3]))) ++print(("Python Release: %s" % ".".join(map(str, _sys.version_info[:3])))) + print("") + + +@@ -113,7 +113,7 @@ + + ports = [item for item in dir(cssmins) if item.startswith('p_')] + ports.sort() +- space = max(map(len, ports)) - 4 ++ space = max(list(map(len, ports))) - 4 + ports = [(item[5:], getattr(cssmins, item).cssmin) for item in ports] + flush = _sys.stdout.flush + +@@ -159,7 +159,7 @@ + + xcount = count + while True: +- counted = [None for _ in xrange(xcount)] ++ counted = [None for _ in range(xcount)] + start = _time.time() + for _ in counted: + cssmin(style) +@@ -199,20 +199,20 @@ + opts, args = _getopt.getopt(argv, "hc:p:", ["help"]) + except getopt.GetoptError: + e = _sys.exc_info()[0](_sys.exc_info()[1]) +- print >> _sys.stderr, "%s\nTry %s -mbench.main --help" % ( ++ print("%s\nTry %s -mbench.main --help" % ( + e, + _os.path.basename(_sys.executable), +- ) ++ ), file=_sys.stderr) + _sys.exit(2) + + count, pickle = 10, None + for key, value in opts: + if key in ("-h", "--help"): +- print >> _sys.stderr, ( ++ print(( + "%s -mbench.main [-c count] [-p file] cssfile ..." % ( + _os.path.basename(_sys.executable), + ) +- ) ++ ), file=_sys.stderr) + _sys.exit(0) + elif key == '-c': + count = int(value) +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/bench/write.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rcssmin/bench/write.py 2025-01-16 02:26:08.571846541 +0800 +@@ -47,7 +47,7 @@ + + + try: +- unicode ++ str + except NameError: + def uni(v): + if hasattr(v, 'decode'): +@@ -55,7 +55,7 @@ + return str(v) + else: + def uni(v): +- if isinstance(v, unicode): ++ if isinstance(v, str): + return v.encode('utf-8') + return str(v) + +@@ -140,10 +140,10 @@ + + # calculate column widths (global for all tables) + for idx, row in enumerate(rows): +- widths[idx] = max(widths[idx], max(map(len, row))) ++ widths[idx] = max(widths[idx], max(list(map(len, row)))) + + # ... and transpose it back. +- tables.append(zip(*rows)) ++ tables.append(list(zip(*rows))) + pythons.append((version, tables)) + + if last_version.startswith('2.'): +@@ -302,20 +302,20 @@ + opts, args = _getopt.getopt(argv, "hp:t:", ["help"]) + except getopt.GetoptError: + e = _sys.exc_info()[0](_sys.exc_info()[1]) +- print >> _sys.stderr, "%s\nTry %s -mbench.write --help" % ( ++ print("%s\nTry %s -mbench.write --help" % ( + e, + _os.path.basename(_sys.executable), +- ) ++ ), file=_sys.stderr) + _sys.exit(2) + + plain, table = None, None + for key, value in opts: + if key in ("-h", "--help"): +- print >> _sys.stderr, ( ++ print(( + "%s -mbench.write [-p plain] [-t table] 1: + target = telems + name[:-1] + tmap.setdefault(_posixpath.join(*target), []).append(fname) +- return tmap.items() ++ return list(tmap.items()) + + + class Documentation(Data): +@@ -155,7 +155,7 @@ + mpmap.setdefault(ext, []).append(manpage) + return [cls(manpages, prefix=_posixpath.join( + 'share', 'man', 'man%s' % section, +- )) for section, manpages in mpmap.items()] ++ )) for section, manpages in list(mpmap.items())] + dispatch = classmethod(dispatch) + + def flatten(self, installer): +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/_setup/py2/dist.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/_setup/py2/dist.py 2025-01-16 02:26:08.571846541 +0800 +@@ -21,7 +21,7 @@ + + dist utilities. + """ +-__author__ = u"Andr\xe9 Malo" ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + + import sys as _sys +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/_setup/py2/ext.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/_setup/py2/ext.py 2025-01-16 02:26:08.571846541 +0800 +@@ -21,7 +21,7 @@ + + C extension tools. + """ +-__author__ = u"Andr\xe9 Malo" ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + __test__ = False + +@@ -60,7 +60,7 @@ + + def __init__(self, *args, **kwargs): + """ Initialization """ +- if kwargs.has_key('depends'): ++ if 'depends' in kwargs: + self.depends = kwargs['depends'] or [] + else: + self.depends = [] +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/_setup/py2/setup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/_setup/py2/setup.py 2025-01-16 02:26:08.571846541 +0800 +@@ -21,10 +21,10 @@ + + This module provides a wrapper around the distutils core setup. + """ +-__author__ = u"Andr\xe9 Malo" ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + +-import ConfigParser as _config_parser ++import configparser as _config_parser + from distutils import core as _core + import os as _os + import posixpath as _posixpath +@@ -52,16 +52,16 @@ + else: + raise AssertionError("impl not in ('python', 'pypy', 'jython')") + +- pyversion = map(int, version_info[:3]) ++ pyversion = list(map(int, version_info[:3])) + if version_min: + min_required = \ +- map(int, '.'.join((version_min, '0.0.0')).split('.')[:3]) ++ list(map(int, '.'.join((version_min, '0.0.0')).split('.')[:3])) + if pyversion < min_required: + raise EnvironmentError("Need at least %s %s (vs. %s)" % ( + impl, version_min, '.'.join(map(str, pyversion)) + )) + if version_max: +- max_required = map(int, version_max.split('.')) ++ max_required = list(map(int, version_max.split('.'))) + max_required[-1] += 1 + if pyversion >= max_required: + raise EnvironmentError("Need at max %s %s (vs. %s)" % ( +@@ -188,7 +188,7 @@ + packages[ + _os.path.normpath(dirpath).replace(sep, '.') + ] = None +- packages = packages.keys() ++ packages = list(packages.keys()) + packages.sort() + return packages + +@@ -311,9 +311,9 @@ + cmd.ensure_finalized() + #from pprint import pprint; pprint(("install_data", cmd.get_inputs())) + try: +- strings = basestring ++ strings = str + except NameError: +- strings = (str, unicode) ++ strings = (str, str) + + for item in cmd.get_inputs(): + if isinstance(item, strings): +@@ -327,7 +327,7 @@ + for filename in _shell.files(item): + result.append(filename) + +- result = dict([(item, None) for item in result]).keys() ++ result = list(dict([(item, None) for item in result]).keys()) + result.sort() + return result + +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/_setup/py2/shell.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/_setup/py2/shell.py 2025-01-16 02:26:08.571846541 +0800 +@@ -21,9 +21,9 @@ + + Shell utilities. + """ +-from __future__ import generators + +-__author__ = u"Andr\xe9 Malo" ++ ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + + import errno as _errno +@@ -49,7 +49,7 @@ + ExitError.__init__(self, code) + import signal as _signal + self.signal = signal +- for key, val in vars(_signal).iteritems(): ++ for key, val in vars(_signal).items(): + if key.startswith('SIG') and not key.startswith('SIG_'): + if val == signal: + self.signalstr = key[3:] +@@ -78,7 +78,7 @@ + """ Remove a file """ + try: + _os.unlink(native(dest)) +- except OSError, e: ++ except OSError as e: + if _errno.ENOENT != e.errno: + raise + +@@ -87,7 +87,7 @@ + dest = native(dest) + if _os.path.exists(dest): + for path in files(dest, '*'): +- _os.chmod(native(path), 0644) ++ _os.chmod(native(path), 0o644) + _shutil.rmtree(dest) + + +@@ -136,15 +136,15 @@ + j = _tempfile._counter.get_next() # pylint: disable = E1101, W0212 + fname = _os.path.join(dir, prefix + str(j) + suffix) + try: +- fd = _os.open(fname, flags, 0600) +- except OSError, e: ++ fd = _os.open(fname, flags, 0o600) ++ except OSError as e: + if e.errno == _errno.EEXIST: + count -= 1 + continue + raise + _set_cloexec(fd) + return fd, _os.path.abspath(fname) +- raise IOError, (_errno.EEXIST, "No usable temporary file name found") ++ raise IOError(_errno.EEXIST, "No usable temporary file name found") + + + def _pipespawn(argv, env): +@@ -234,7 +234,7 @@ + res = proc.wait() + if res != 0: + if res == 2: +- signal, code = map(int, result.splitlines()[-1].split()) ++ signal, code = list(map(int, result.splitlines()[-1].split())) + raise SignalError(code, signal) + elif res == 3: + code = int(result.splitlines()[-1].strip()) +@@ -346,7 +346,7 @@ + + echo = kwargs.get('echo') + if echo: +- print ' '.join(argv) ++ print(' '.join(argv)) + filepipe = kwargs.get('filepipe') + if filepipe: + return _filepipespawn( +@@ -376,7 +376,7 @@ + + try: + names = listdir(top) +- except error, err: ++ except error as err: + if onerror is not None: + onerror(err) + return +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/_setup/py2/util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/_setup/py2/util.py 2025-01-16 02:26:08.571846541 +0800 +@@ -21,7 +21,7 @@ + + Setup utilities. + """ +-__author__ = u"Andr\xe9 Malo" ++__author__ = "Andr\xe9 Malo" + __docformat__ = "restructuredtext en" + + try: +@@ -29,16 +29,16 @@ + except ImportError: + class log(object): + def info(self, value): +- print value ++ print(value) + def debug(self, value): + pass + log = log() + + from distutils import util as _util + try: +- from ConfigParser import SafeConfigParser ++ from configparser import SafeConfigParser + except ImportError: +- import ConfigParser as _config_parser ++ import configparser as _config_parser + class SafeConfigParser(_config_parser.ConfigParser): + """ Safe config parser """ + def _interpolate(self, section, option, rawval, vars): +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/_setup/py3/shell.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/_setup/py3/shell.py 2025-01-16 02:26:08.571846541 +0800 +@@ -48,7 +48,7 @@ + ExitError.__init__(self, code) + import signal as _signal + self.signal = signal +- for key, val in vars(_signal).items(): ++ for key, val in list(vars(_signal).items()): + if key.startswith('SIG') and not key.startswith('SIG_'): + if val == signal: + self.signalstr = key[3:] +@@ -255,7 +255,7 @@ + + echo = kwargs.get('echo') + if echo: +- print(' '.join(argv)) ++ print((' '.join(argv))) + filepipe = kwargs.get('filepipe') + if filepipe: + return _filepipespawn( +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/bench/jsmin.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/bench/jsmin.py 2025-01-16 02:26:08.571846541 +0800 +@@ -32,10 +32,10 @@ + + # imports adjusted for speed (cStringIO) and python 3 (io) -- nd + try: +- from cStringIO import StringIO ++ from io import StringIO + except ImportError: + try: +- from StringIO import StringIO ++ from io import StringIO + except ImportError: + from io import StringIO + +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/bench/jsmin_2_0_9.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/bench/jsmin_2_0_9.py 2025-01-16 02:26:08.571846541 +0800 +@@ -29,9 +29,9 @@ + if is_3: + import io + else: +- import StringIO ++ import io + try: +- import cStringIO ++ import io + except ImportError: + cStringIO = None + +@@ -45,12 +45,12 @@ + returns a minified version of the javascript string + """ + if not is_3: +- if cStringIO and not isinstance(js, unicode): ++ if cStringIO and not isinstance(js, str): + # strings can use cStringIO for a 3x performance + # improvement, but unicode (in python2) cannot +- klass = cStringIO.StringIO ++ klass = io.StringIO + else: +- klass = StringIO.StringIO ++ klass = io.StringIO + else: + klass = io.StringIO + ins = klass(js) +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/bench/main.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/bench/main.py 2025-01-16 02:26:08.571846541 +0800 +@@ -53,18 +53,18 @@ + import_notes.append( + "jsmin_2_0_9 available for python 2.4 and later..." + ) +- print(import_notes[-1]) ++ print((import_notes[-1])) + + import rjsmin as p_05_rjsmin + try: + import _rjsmin as p_06__rjsmin + except ImportError: + import_notes.append("_rjsmin (C-Port) not available") +- print(import_notes[-1]) ++ print((import_notes[-1])) + jsmins.p_05_rjsmin.jsmin = jsmins.p_05_rjsmin._make_jsmin( + python_only=True + ) +-print("Python Release: %s" % ".".join(map(str, _sys.version_info[:3]))) ++print(("Python Release: %s" % ".".join(map(str, _sys.version_info[:3])))) + print("") + + +@@ -111,7 +111,7 @@ + + ports = [item for item in dir(jsmins) if item.startswith('p_')] + ports.sort() +- space = max(map(len, ports)) - 4 ++ space = max(list(map(len, ports))) - 4 + ports = [(item[5:], getattr(jsmins, item).jsmin) for item in ports] + flush = _sys.stdout.flush + +@@ -157,7 +157,7 @@ + + xcount = count + while True: +- counted = [None for _ in xrange(xcount)] ++ counted = [None for _ in range(xcount)] + start = _time.time() + for _ in counted: + jsmin(script) +@@ -197,20 +197,20 @@ + opts, args = _getopt.getopt(argv, "hc:p:", ["help"]) + except getopt.GetoptError: + e = _sys.exc_info()[0](_sys.exc_info()[1]) +- print >> _sys.stderr, "%s\nTry %s -mbench.main --help" % ( ++ print("%s\nTry %s -mbench.main --help" % ( + e, + _os.path.basename(_sys.executable), +- ) ++ ), file=_sys.stderr) + _sys.exit(2) + + count, pickle = 10, None + for key, value in opts: + if key in ("-h", "--help"): +- print >> _sys.stderr, ( ++ print(( + "%s -mbench.main [-c count] [-p file] cssfile ..." % ( + _os.path.basename(_sys.executable), + ) +- ) ++ ), file=_sys.stderr) + _sys.exit(0) + elif key == '-c': + count = int(value) +--- a/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/bench/write.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/common/py_vulcanize/third_party/rjsmin/bench/write.py 2025-01-16 02:26:08.571846541 +0800 +@@ -47,7 +47,7 @@ + + + try: +- unicode ++ str + except NameError: + def uni(v): + if hasattr(v, 'decode'): +@@ -55,7 +55,7 @@ + return str(v) + else: + def uni(v): +- if isinstance(v, unicode): ++ if isinstance(v, str): + return v.encode('utf-8') + return str(v) + +@@ -141,10 +141,10 @@ + + # calculate column widths (global for all tables) + for idx, row in enumerate(rows): +- widths[idx] = max(widths[idx], max(map(len, row))) ++ widths[idx] = max(widths[idx], max(list(map(len, row)))) + + # ... and transpose it back. +- tables.append(zip(*rows)) ++ tables.append(list(zip(*rows))) + pythons.append((version, tables)) + + if last_version.startswith('2.'): +@@ -303,20 +303,20 @@ + opts, args = _getopt.getopt(argv, "hp:t:", ["help"]) + except getopt.GetoptError: + e = _sys.exc_info()[0](_sys.exc_info()[1]) +- print >> _sys.stderr, "%s\nTry %s -mbench.write --help" % ( ++ print("%s\nTry %s -mbench.write --help" % ( + e, + _os.path.basename(_sys.executable), +- ) ++ ), file=_sys.stderr) + _sys.exit(2) + + plain, table = None, None + for key, value in opts: + if key in ("-h", "--help"): +- print >> _sys.stderr, ( ++ print(( + "%s -mbench.write [-p plain] [-t table] tag), call handle_starttag and then + handle_endtag. + """ +- ROOT_TAG_NAME = u'[document]' ++ ROOT_TAG_NAME = '[document]' + + # If the end-user gives no indication which tree builder they + # want, look for one with these features. +@@ -135,12 +135,12 @@ + "fromEncoding", "from_encoding") + + if len(kwargs) > 0: +- arg = kwargs.keys().pop() ++ arg = list(kwargs.keys()).pop() + raise TypeError( + "__init__() got an unexpected keyword argument '%s'" % arg) + + if builder is None: +- if isinstance(features, basestring): ++ if isinstance(features, str): + features = [features] + if features is None or len(features) == 0: + features = self.DEFAULT_BUILDER_FEATURES +@@ -164,7 +164,7 @@ + # involving passing non-markup to Beautiful Soup. + # Beautiful Soup will still parse the input as markup, + # just in case that's what the user really wants. +- if (isinstance(markup, unicode) ++ if (isinstance(markup, str) + and not os.path.supports_unicode_filenames): + possible_filename = markup.encode("utf8") + else: +@@ -172,7 +172,7 @@ + is_file = False + try: + is_file = os.path.exists(possible_filename) +- except Exception, e: ++ except Exception as e: + # This is almost certainly a problem involving + # characters not valid in filenames on this + # system. Just let it go. +@@ -184,7 +184,7 @@ + # TODO: This is ugly but I couldn't get it to work in + # Python 3 otherwise. + if ((isinstance(markup, bytes) and not b' ' in markup) +- or (isinstance(markup, unicode) and not u' ' in markup)): ++ or (isinstance(markup, str) and not ' ' in markup)): + warnings.warn( + '"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup) + +@@ -259,7 +259,7 @@ + + def endData(self, containerClass=NavigableString): + if self.current_data: +- current_data = u''.join(self.current_data) ++ current_data = ''.join(self.current_data) + # If whitespace is not preserved, and this string contains + # nothing but ASCII spaces, replace it with a single space + # or newline. +@@ -367,9 +367,9 @@ + encoding_part = '' + if eventual_encoding != None: + encoding_part = ' encoding="%s"' % eventual_encoding +- prefix = u'\n' % encoding_part ++ prefix = '\n' % encoding_part + else: +- prefix = u'' ++ prefix = '' + if not pretty_print: + indent_level = None + else: +@@ -403,4 +403,4 @@ + if __name__ == '__main__': + import sys + soup = BeautifulSoup(sys.stdin) +- print soup.prettify() ++ print(soup.prettify()) +--- a/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/dammit.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/dammit.py 2025-01-16 02:26:08.571846541 +0800 +@@ -8,7 +8,7 @@ + """ + + import codecs +-from htmlentitydefs import codepoint2name ++from html.entities import codepoint2name + import re + import logging + import string +@@ -56,7 +56,7 @@ + reverse_lookup = {} + characters_for_re = [] + for codepoint, name in list(codepoint2name.items()): +- character = unichr(codepoint) ++ character = chr(codepoint) + if codepoint != 34: + # There's no point in turning the quotation mark into + # ", unless it happens within an attribute value, which +@@ -340,9 +340,9 @@ + self.detector = EncodingDetector(markup, override_encodings, is_html) + + # Short-circuit if the data is in Unicode to begin with. +- if isinstance(markup, unicode) or markup == '': ++ if isinstance(markup, str) or markup == '': + self.markup = markup +- self.unicode_markup = unicode(markup) ++ self.unicode_markup = str(markup) + self.original_encoding = None + return + +@@ -425,7 +425,7 @@ + def _to_unicode(self, data, encoding, errors="strict"): + '''Given a string and its encoding, decodes the string into Unicode. + %encoding is a string recognized by encodings.aliases''' +- return unicode(data, encoding, errors) ++ return str(data, encoding, errors) + + @property + def declared_html_encoding(self): +--- a/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/diagnose.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/diagnose.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,7 +1,7 @@ + """Diagnostic functions, mainly for use when doing tech support.""" + import cProfile +-from StringIO import StringIO +-from HTMLParser import HTMLParser ++from io import StringIO ++from html.parser import HTMLParser + import bs4 + from bs4 import BeautifulSoup, __version__ + from bs4.builder import builder_registry +@@ -17,8 +17,8 @@ + + def diagnose(data): + """Diagnostic suite for isolating common problems.""" +- print "Diagnostic running on Beautiful Soup %s" % __version__ +- print "Python version %s" % sys.version ++ print("Diagnostic running on Beautiful Soup %s" % __version__) ++ print("Python version %s" % sys.version) + + basic_parsers = ["html.parser", "html5lib", "lxml"] + for name in basic_parsers: +@@ -27,44 +27,44 @@ + break + else: + basic_parsers.remove(name) +- print ( ++ print(( + "I noticed that %s is not installed. Installing it may help." % +- name) ++ name)) + + if 'lxml' in basic_parsers: + basic_parsers.append(["lxml", "xml"]) + from lxml import etree +- print "Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION)) ++ print("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))) + + if 'html5lib' in basic_parsers: + import html5lib +- print "Found html5lib version %s" % html5lib.__version__ ++ print("Found html5lib version %s" % html5lib.__version__) + + if hasattr(data, 'read'): + data = data.read() + elif os.path.exists(data): +- print '"%s" looks like a filename. Reading data from the file.' % data ++ print('"%s" looks like a filename. Reading data from the file.' % data) + data = open(data).read() + elif data.startswith("http:") or data.startswith("https:"): +- print '"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data +- print "You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup." ++ print('"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data) ++ print("You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup.") + return +- print ++ print() + + for parser in basic_parsers: +- print "Trying to parse your markup with %s" % parser ++ print("Trying to parse your markup with %s" % parser) + success = False + try: + soup = BeautifulSoup(data, parser) + success = True +- except Exception, e: +- print "%s could not parse the markup." % parser ++ except Exception as e: ++ print("%s could not parse the markup." % parser) + traceback.print_exc() + if success: +- print "Here's what %s did with the markup:" % parser +- print soup.prettify() ++ print("Here's what %s did with the markup:" % parser) ++ print(soup.prettify()) + +- print "-" * 80 ++ print("-" * 80) + + def lxml_trace(data, html=True, **kwargs): + """Print out the lxml events that occur during parsing. +@@ -74,7 +74,7 @@ + """ + from lxml import etree + for event, element in etree.iterparse(StringIO(data), html=html, **kwargs): +- print("%s, %4s, %s" % (event, element.tag, element.text)) ++ print(("%s, %4s, %s" % (event, element.tag, element.text))) + + class AnnouncingParser(HTMLParser): + """Announces HTMLParser parse events, without doing anything else.""" +@@ -156,9 +156,9 @@ + + def benchmark_parsers(num_elements=100000): + """Very basic head-to-head performance benchmark.""" +- print "Comparative parser benchmark on Beautiful Soup %s" % __version__ ++ print("Comparative parser benchmark on Beautiful Soup %s" % __version__) + data = rdoc(num_elements) +- print "Generated a large invalid HTML document (%d bytes)." % len(data) ++ print("Generated a large invalid HTML document (%d bytes)." % len(data)) + + for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]: + success = False +@@ -167,24 +167,24 @@ + soup = BeautifulSoup(data, parser) + b = time.time() + success = True +- except Exception, e: +- print "%s could not parse the markup." % parser ++ except Exception as e: ++ print("%s could not parse the markup." % parser) + traceback.print_exc() + if success: +- print "BS4+%s parsed the markup in %.2fs." % (parser, b-a) ++ print("BS4+%s parsed the markup in %.2fs." % (parser, b-a)) + + from lxml import etree + a = time.time() + etree.HTML(data) + b = time.time() +- print "Raw lxml parsed the markup in %.2fs." % (b-a) ++ print("Raw lxml parsed the markup in %.2fs." % (b-a)) + + import html5lib + parser = html5lib.HTMLParser() + a = time.time() + parser.parse(data) + b = time.time() +- print "Raw html5lib parsed the markup in %.2fs." % (b-a) ++ print("Raw html5lib parsed the markup in %.2fs." % (b-a)) + + def profile(num_elements=100000, parser="lxml"): + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/element.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/element.py 2025-01-16 02:29:53.222105922 +0800 +@@ -1,4 +1,5 @@ + import collections ++import collections.abc + import re + import sys + import warnings +@@ -21,22 +22,22 @@ + return alias + + +-class NamespacedAttribute(unicode): ++class NamespacedAttribute(str): + + def __new__(cls, prefix, name, namespace=None): + if name is None: +- obj = unicode.__new__(cls, prefix) ++ obj = str.__new__(cls, prefix) + elif prefix is None: + # Not really namespaced. +- obj = unicode.__new__(cls, name) ++ obj = str.__new__(cls, name) + else: +- obj = unicode.__new__(cls, prefix + ":" + name) ++ obj = str.__new__(cls, prefix + ":" + name) + obj.prefix = prefix + obj.name = name + obj.namespace = namespace + return obj + +-class AttributeValueWithCharsetSubstitution(unicode): ++class AttributeValueWithCharsetSubstitution(str): + """A stand-in object for a character encoding specified in HTML.""" + + class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution): +@@ -47,7 +48,7 @@ + """ + + def __new__(cls, original_value): +- obj = unicode.__new__(cls, original_value) ++ obj = str.__new__(cls, original_value) + obj.original_value = original_value + return obj + +@@ -70,9 +71,9 @@ + match = cls.CHARSET_RE.search(original_value) + if match is None: + # No substitution necessary. +- return unicode.__new__(unicode, original_value) ++ return str.__new__(str, original_value) + +- obj = unicode.__new__(cls, original_value) ++ obj = str.__new__(cls, original_value) + obj.original_value = original_value + return obj + +@@ -272,7 +273,7 @@ + def insert(self, position, new_child): + if new_child is self: + raise ValueError("Cannot insert a tag into itself.") +- if (isinstance(new_child, basestring) ++ if (isinstance(new_child, str) + and not isinstance(new_child, NavigableString)): + new_child = NavigableString(new_child) + +@@ -489,7 +490,7 @@ + result = (element for element in generator + if isinstance(element, Tag)) + return ResultSet(strainer, result) +- elif isinstance(name, basestring): ++ elif isinstance(name, str): + # Optimization to find all tags with a given name. + result = (element for element in generator + if isinstance(element, Tag) +@@ -640,7 +641,7 @@ + return self.parents + + +-class NavigableString(unicode, PageElement): ++class NavigableString(str, PageElement): + + PREFIX = '' + SUFFIX = '' +@@ -653,15 +654,15 @@ + passed in to the superclass's __new__ or the superclass won't know + how to handle non-ASCII characters. + """ +- if isinstance(value, unicode): +- return unicode.__new__(cls, value) +- return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) ++ if isinstance(value, str): ++ return str.__new__(cls, value) ++ return str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) + + def __copy__(self): + return self + + def __getnewargs__(self): +- return (unicode(self),) ++ return (str(self),) + + def __getattr__(self, attr): + """text.string gives you text. This is for backwards +@@ -701,23 +702,23 @@ + + class CData(PreformattedString): + +- PREFIX = u'' ++ PREFIX = '' + + class ProcessingInstruction(PreformattedString): + +- PREFIX = u'' ++ PREFIX = '' + + class Comment(PreformattedString): + +- PREFIX = u'' ++ PREFIX = '' + + + class Declaration(PreformattedString): +- PREFIX = u'' ++ PREFIX = '' + + + class Doctype(PreformattedString): +@@ -734,8 +735,8 @@ + + return Doctype(value) + +- PREFIX = u'\n' ++ PREFIX = '\n' + + + class Tag(PageElement): +@@ -843,7 +844,7 @@ + for string in self._all_strings(True): + yield string + +- def get_text(self, separator=u"", strip=False, ++ def get_text(self, separator="", strip=False, + types=(NavigableString, CData)): + """ + Get all child strings, concatenated using the given separator. +@@ -915,7 +916,7 @@ + def __contains__(self, x): + return x in self.contents + +- def __nonzero__(self): ++ def __bool__(self): + "A tag is non-None even if it has no contents." + return True + +@@ -1025,8 +1026,8 @@ + else: + if isinstance(val, list) or isinstance(val, tuple): + val = ' '.join(val) +- elif not isinstance(val, basestring): +- val = unicode(val) ++ elif not isinstance(val, str): ++ val = str(val) + elif ( + isinstance(val, AttributeValueWithCharsetSubstitution) + and eventual_encoding is not None): +@@ -1034,7 +1035,7 @@ + + text = self.format_string(val, formatter) + decoded = ( +- unicode(key) + '=' ++ str(key) + '=' + + EntitySubstitution.quoted_attribute_value(text)) + attrs.append(decoded) + close = '' +@@ -1210,16 +1211,16 @@ + raise ValueError( + 'Final combinator "%s" is missing an argument.' % tokens[-1]) + if self._select_debug: +- print 'Running CSS selector "%s"' % selector ++ print('Running CSS selector "%s"' % selector) + for index, token in enumerate(tokens): + if self._select_debug: +- print ' Considering token "%s"' % token ++ print(' Considering token "%s"' % token) + recursive_candidate_generator = None + tag_name = None + if tokens[index-1] in self._selector_combinators: + # This token was consumed by the previous combinator. Skip it. + if self._select_debug: +- print ' Token was consumed by the previous combinator.' ++ print(' Token was consumed by the previous combinator.') + continue + # Each operation corresponds to a checker function, a rule + # for determining whether a candidate matches the +@@ -1325,14 +1326,14 @@ + next_token = tokens[index+1] + def recursive_select(tag): + if self._select_debug: +- print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs) +- print '-' * 40 ++ print(' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)) ++ print('-' * 40) + for i in tag.select(next_token, recursive_candidate_generator): + if self._select_debug: +- print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs) ++ print('(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)) + yield i + if self._select_debug: +- print '-' * 40 ++ print('-' * 40) + _use_candidate_generator = recursive_select + elif _candidate_generator is None: + # By default, a tag's candidates are all of its +@@ -1343,7 +1344,7 @@ + check = "[any]" + else: + check = tag_name +- print ' Default candidate generator, tag name="%s"' % check ++ print(' Default candidate generator, tag name="%s"' % check) + if self._select_debug: + # This is redundant with later code, but it stops + # a bunch of bogus tags from cluttering up the +@@ -1365,8 +1366,8 @@ + new_context_ids = set([]) + for tag in current_context: + if self._select_debug: +- print " Running candidate generator on %s %s" % ( +- tag.name, repr(tag.attrs)) ++ print(" Running candidate generator on %s %s" % ( ++ tag.name, repr(tag.attrs))) + for candidate in _use_candidate_generator(tag): + if not isinstance(candidate, Tag): + continue +@@ -1381,21 +1382,21 @@ + break + if checker is None or result: + if self._select_debug: +- print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs)) ++ print(" SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))) + if id(candidate) not in new_context_ids: + # If a tag matches a selector more than once, + # don't include it in the context more than once. + new_context.append(candidate) + new_context_ids.add(id(candidate)) + elif self._select_debug: +- print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs)) ++ print(" FAILURE %s %s" % (candidate.name, repr(candidate.attrs))) + + current_context = new_context + + if self._select_debug: +- print "Final verdict:" ++ print("Final verdict:") + for i in current_context: +- print " %s %s" % (i.name, i.attrs) ++ print(" %s %s" % (i.name, i.attrs)) + return current_context + + # Old names for backwards compatibility +@@ -1439,7 +1440,7 @@ + else: + attrs = kwargs + normalized_attrs = {} +- for key, value in attrs.items(): ++ for key, value in list(attrs.items()): + normalized_attrs[key] = self._normalize_search_value(value) + + self.attrs = normalized_attrs +@@ -1448,7 +1449,7 @@ + def _normalize_search_value(self, value): + # Leave it alone if it's a Unicode string, a callable, a + # regular expression, a boolean, or None. +- if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match') ++ if (isinstance(value, str) or callable(value) or hasattr(value, 'match') + or isinstance(value, bool) or value is None): + return value + +@@ -1461,7 +1462,7 @@ + new_value = [] + for v in value: + if (hasattr(v, '__iter__') and not isinstance(v, bytes) +- and not isinstance(v, unicode)): ++ and not isinstance(v, str)): + # This is almost certainly the user's mistake. In the + # interests of avoiding infinite loops, we'll let + # it through as-is rather than doing a recursive call. +@@ -1473,7 +1474,7 @@ + # Otherwise, convert it into a Unicode string. + # The unicode(str()) thing is so this will do the same thing on Python 2 + # and Python 3. +- return unicode(str(value)) ++ return str(str(value)) + + def __str__(self): + if self.text: +@@ -1488,7 +1489,7 @@ + markup = markup_name + markup_attrs = markup + call_function_with_tag_data = ( +- isinstance(self.name, collections.Callable) ++ isinstance(self.name, collections.abc.Callable) + and not isinstance(markup_name, Tag)) + + if ((not self.name) +@@ -1527,7 +1528,7 @@ + found = None + # If given a list of items, scan it for a text element that + # matches. +- if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)): ++ if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, str)): + for element in markup: + if isinstance(element, NavigableString) \ + and self.search(element): +@@ -1540,7 +1541,7 @@ + found = self.search_tag(markup) + # If it's text, make sure the text matches. + elif isinstance(markup, NavigableString) or \ +- isinstance(markup, basestring): ++ isinstance(markup, str): + if not self.name and not self.attrs and self._matches(markup, self.text): + found = markup + else: +@@ -1554,7 +1555,7 @@ + if isinstance(markup, list) or isinstance(markup, tuple): + # This should only happen when searching a multi-valued attribute + # like 'class'. +- if (isinstance(match_against, unicode) ++ if (isinstance(match_against, str) + and ' ' in match_against): + # A bit of a special case. If they try to match "foo + # bar" on a multivalue attribute's value, only accept +@@ -1574,7 +1575,7 @@ + # True matches any non-None value. + return markup is not None + +- if isinstance(match_against, collections.Callable): ++ if isinstance(match_against, collections.abc.Callable): + return match_against(markup) + + # Custom callables take the tag as an argument, but all +@@ -1589,7 +1590,7 @@ + # None matches None, False, an empty string, an empty list, and so on. + return not match_against + +- if isinstance(match_against, unicode): ++ if isinstance(match_against, str): + # Exact string match + return markup == match_against + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/testing.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/testing.py 2025-01-16 02:26:08.574013171 +0800 +@@ -225,14 +225,14 @@ + self.assertSoupEquals('', '') + + def test_entities_in_attributes_converted_to_unicode(self): +- expect = u'

' ++ expect = '

' + self.assertSoupEquals('

', expect) + self.assertSoupEquals('

', expect) + self.assertSoupEquals('

', expect) + self.assertSoupEquals('

', expect) + + def test_entities_in_text_converted_to_unicode(self): +- expect = u'

pi\N{LATIN SMALL LETTER N WITH TILDE}ata

' ++ expect = '

pi\N{LATIN SMALL LETTER N WITH TILDE}ata

' + self.assertSoupEquals("

piñata

", expect) + self.assertSoupEquals("

piñata

", expect) + self.assertSoupEquals("

piñata

", expect) +@@ -243,7 +243,7 @@ + '

I said "good day!"

') + + def test_out_of_range_entity(self): +- expect = u"\N{REPLACEMENT CHARACTER}" ++ expect = "\N{REPLACEMENT CHARACTER}" + self.assertSoupEquals("�", expect) + self.assertSoupEquals("�", expect) + self.assertSoupEquals("�", expect) +@@ -285,9 +285,9 @@ + # A seemingly innocuous document... but it's in Unicode! And + # it contains characters that can't be represented in the + # encoding found in the declaration! The horror! +- markup = u'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' ++ markup = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' + soup = self.soup(markup) +- self.assertEqual(u'Sacr\xe9 bleu!', soup.body.string) ++ self.assertEqual('Sacr\xe9 bleu!', soup.body.string) + + def test_soupstrainer(self): + """Parsers should be able to work with SoupStrainers.""" +@@ -327,7 +327,7 @@ + # Both XML and HTML entities are converted to Unicode characters + # during parsing. + text = "

<<sacré bleu!>>

" +- expected = u"

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

" ++ expected = "

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

" + self.assertSoupEquals(text, expected) + + def test_smart_quotes_converted_on_the_way_in(self): +@@ -337,15 +337,15 @@ + soup = self.soup(quote) + self.assertEqual( + soup.p.string, +- u"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}") ++ "\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}") + + def test_non_breaking_spaces_converted_on_the_way_in(self): + soup = self.soup("  ") +- self.assertEqual(soup.a.string, u"\N{NO-BREAK SPACE}" * 2) ++ self.assertEqual(soup.a.string, "\N{NO-BREAK SPACE}" * 2) + + def test_entities_converted_on_the_way_out(self): + text = "

<<sacré bleu!>>

" +- expected = u"

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

".encode("utf-8") ++ expected = "

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

".encode("utf-8") + soup = self.soup(text) + self.assertEqual(soup.p.encode("utf-8"), expected) + +@@ -354,7 +354,7 @@ + # easy-to-understand document. + + # Here it is in Unicode. Note that it claims to be in ISO-Latin-1. +- unicode_html = u'

Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!

' ++ unicode_html = '

Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!

' + + # That's because we're going to encode it into ISO-Latin-1, and use + # that to test. +@@ -493,15 +493,15 @@ + self.assertTrue(b"< < hey > >" in encoded) + + def test_can_parse_unicode_document(self): +- markup = u'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' ++ markup = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' + soup = self.soup(markup) +- self.assertEqual(u'Sacr\xe9 bleu!', soup.root.string) ++ self.assertEqual('Sacr\xe9 bleu!', soup.root.string) + + def test_popping_namespaced_tag(self): + markup = 'b2012-07-02T20:33:42Zcd' + soup = self.soup(markup) + self.assertEqual( +- unicode(soup.rss), markup) ++ str(soup.rss), markup) + + def test_docstring_includes_correct_encoding(self): + soup = self.soup("") +@@ -532,17 +532,17 @@ + def test_closing_namespaced_tag(self): + markup = '

20010504

' + soup = self.soup(markup) +- self.assertEqual(unicode(soup.p), markup) ++ self.assertEqual(str(soup.p), markup) + + def test_namespaced_attributes(self): + markup = '' + soup = self.soup(markup) +- self.assertEqual(unicode(soup.foo), markup) ++ self.assertEqual(str(soup.foo), markup) + + def test_namespaced_attributes_xml_namespace(self): + markup = 'bar' + soup = self.soup(markup) +- self.assertEqual(unicode(soup.foo), markup) ++ self.assertEqual(str(soup.foo), markup) + + class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): + """Smoke test for a tree builder that supports HTML5.""" +--- a/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/builder/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/builder/__init__.py 2025-01-16 02:26:08.574013171 +0800 +@@ -153,13 +153,13 @@ + universal = self.cdata_list_attributes.get('*', []) + tag_specific = self.cdata_list_attributes.get( + tag_name.lower(), None) +- for attr in attrs.keys(): ++ for attr in list(attrs.keys()): + if attr in universal or (tag_specific and attr in tag_specific): + # We have a "class"-type attribute whose string + # value is a whitespace-separated list of + # values. Split it into a list. + value = attrs[attr] +- if isinstance(value, basestring): ++ if isinstance(value, str): + values = whitespace_re.split(value) + else: + # html5lib sometimes calls setAttributes twice +--- a/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/builder/_html5lib.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/builder/_html5lib.py 2025-01-16 02:26:08.574013171 +0800 +@@ -37,7 +37,7 @@ + doc = parser.parse(markup, encoding=self.user_specified_encoding) + + # Set the character encoding detected by the tokenizer. +- if isinstance(markup, unicode): ++ if isinstance(markup, str): + # We need to special-case this because html5lib sets + # charEncoding to UTF-8 if it gets Unicode input. + doc.original_encoding = None +@@ -51,7 +51,7 @@ + + def test_fragment_to_document(self, fragment): + """See `TreeBuilder`.""" +- return u'%s' % fragment ++ return '%s' % fragment + + + class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder): +@@ -124,7 +124,7 @@ + + def appendChild(self, node): + string_child = child = None +- if isinstance(node, basestring): ++ if isinstance(node, str): + # Some other piece of code decided to pass in a string + # instead of creating a TextElement object to contain the + # string. +@@ -139,7 +139,7 @@ + else: + child = node.element + +- if not isinstance(child, basestring) and child.parent is not None: ++ if not isinstance(child, str) and child.parent is not None: + node.element.extract() + + if (string_child and self.element.contents +@@ -152,7 +152,7 @@ + old_element.replace_with(new_element) + self.soup._most_recent_element = new_element + else: +- if isinstance(node, basestring): ++ if isinstance(node, str): + # Create a brand new NavigableString from this string. + child = self.soup.new_string(node) + +@@ -183,7 +183,7 @@ + + self.soup.builder._replace_cdata_list_attribute_values( + self.name, attributes) +- for name, value in attributes.items(): ++ for name, value in list(attributes.items()): + self.element[name] = value + + # The attributes may contain variables that need substitution. +--- a/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/builder/_htmlparser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/builder/_htmlparser.py 2025-01-16 03:20:11.562602638 +0800 +@@ -4,10 +4,7 @@ + 'HTMLParserTreeBuilder', + ] + +-from HTMLParser import ( +- HTMLParser, +- HTMLParseError, +- ) ++from html.parser import HTMLParser + import sys + import warnings + +@@ -72,9 +69,9 @@ + real_name = int(name) + + try: +- data = unichr(real_name) +- except (ValueError, OverflowError), e: +- data = u"\N{REPLACEMENT CHARACTER}" ++ data = chr(real_name) ++ except (ValueError, OverflowError) as e: ++ data = "\N{REPLACEMENT CHARACTER}" + + self.handle_data(data) + +@@ -142,7 +139,7 @@ + declared within markup, whether any characters had to be + replaced with REPLACEMENT CHARACTER). + """ +- if isinstance(markup, unicode): ++ if isinstance(markup, str): + yield (markup, None, None, False) + return + +@@ -158,7 +155,7 @@ + parser.soup = self.soup + try: + parser.feed(markup) +- except HTMLParseError, e: ++ except HTMLParser.error as e: + warnings.warn(RuntimeWarning( + "Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help.")) + raise e +--- a/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/builder/_lxml.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/builder/_lxml.py 2025-01-16 02:30:55.741291732 +0800 +@@ -4,8 +4,9 @@ + ] + + from io import BytesIO +-from StringIO import StringIO ++from io import StringIO + import collections ++import collections.abc + from lxml import etree + from bs4.element import Comment, Doctype, NamespacedAttribute + from bs4.builder import ( +@@ -46,7 +47,7 @@ + # Use the default parser. + parser = self.default_parser(encoding) + +- if isinstance(parser, collections.Callable): ++ if isinstance(parser, collections.abc.Callable): + # Instantiate the parser with default arguments + parser = parser(target=self, strip_cdata=False, encoding=encoding) + return parser +@@ -78,12 +79,12 @@ + + Each 4-tuple represents a strategy for parsing the document. + """ +- if isinstance(markup, unicode): ++ if isinstance(markup, str): + # We were given Unicode. Maybe lxml can parse Unicode on + # this system? + yield markup, None, document_declared_encoding, False + +- if isinstance(markup, unicode): ++ if isinstance(markup, str): + # No, apparently not. Convert the Unicode to UTF-8 and + # tell lxml to parse it as UTF-8. + yield (markup.encode("utf8"), "utf8", +@@ -102,7 +103,7 @@ + def feed(self, markup): + if isinstance(markup, bytes): + markup = BytesIO(markup) +- elif isinstance(markup, unicode): ++ elif isinstance(markup, str): + markup = StringIO(markup) + + # Call feed() at least once, even if the markup is empty, +@@ -117,7 +118,7 @@ + if len(data) != 0: + self.parser.feed(data) + self.parser.close() +- except (UnicodeDecodeError, LookupError, etree.ParserError), e: ++ except (UnicodeDecodeError, LookupError, etree.ParserError) as e: + raise ParserRejectedMarkup(str(e)) + + def close(self): +@@ -135,12 +136,12 @@ + self.nsmaps.append(None) + elif len(nsmap) > 0: + # A new namespace mapping has come into play. +- inverted_nsmap = dict((value, key) for key, value in nsmap.items()) ++ inverted_nsmap = dict((value, key) for key, value in list(nsmap.items())) + self.nsmaps.append(inverted_nsmap) + # Also treat the namespace mapping as a set of attributes on the + # tag, so we can recreate it later. + attrs = attrs.copy() +- for prefix, namespace in nsmap.items(): ++ for prefix, namespace in list(nsmap.items()): + attribute = NamespacedAttribute( + "xmlns", prefix, "http://www.w3.org/2000/xmlns/") + attrs[attribute] = namespace +@@ -149,7 +150,7 @@ + # from lxml with namespaces attached to their names, and + # turn then into NamespacedAttribute objects. + new_attrs = {} +- for attr, value in attrs.items(): ++ for attr, value in list(attrs.items()): + namespace, attr = self._getNsTag(attr) + if namespace is None: + new_attrs[attr] = value +@@ -207,7 +208,7 @@ + + def test_fragment_to_document(self, fragment): + """See `TreeBuilder`.""" +- return u'\n%s' % fragment ++ return '\n%s' % fragment + + + class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML): +@@ -224,10 +225,10 @@ + self.parser = self.parser_for(encoding) + self.parser.feed(markup) + self.parser.close() +- except (UnicodeDecodeError, LookupError, etree.ParserError), e: ++ except (UnicodeDecodeError, LookupError, etree.ParserError) as e: + raise ParserRejectedMarkup(str(e)) + + + def test_fragment_to_document(self, fragment): + """See `TreeBuilder`.""" +- return u'%s' % fragment ++ return '%s' % fragment +--- a/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/tests/test_html5lib.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/tests/test_html5lib.py 2025-01-16 02:26:08.574013171 +0800 +@@ -5,7 +5,7 @@ + try: + from bs4.builder import HTML5TreeBuilder + HTML5LIB_PRESENT = True +-except ImportError, e: ++except ImportError as e: + HTML5LIB_PRESENT = False + from bs4.element import SoupStrainer + from bs4.testing import ( +@@ -74,12 +74,12 @@ + def test_reparented_markup(self): + markup = '

foo

\n

bar

' + soup = self.soup(markup) +- self.assertEqual(u"

foo

\n

bar

", soup.body.decode()) ++ self.assertEqual("

foo

\n

bar

", soup.body.decode()) + self.assertEqual(2, len(soup.find_all('p'))) + + + def test_reparented_markup_ends_with_whitespace(self): + markup = '

foo

\n

bar

\n' + soup = self.soup(markup) +- self.assertEqual(u"

foo

\n

bar

\n", soup.body.decode()) ++ self.assertEqual("

foo

\n

bar

\n", soup.body.decode()) + self.assertEqual(2, len(soup.find_all('p'))) +--- a/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/tests/test_lxml.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/tests/test_lxml.py 2025-01-16 02:26:08.574013171 +0800 +@@ -7,7 +7,7 @@ + import lxml.etree + LXML_PRESENT = True + LXML_VERSION = lxml.etree.LXML_VERSION +-except ImportError, e: ++except ImportError as e: + LXML_PRESENT = False + LXML_VERSION = (0,) + +@@ -62,7 +62,7 @@ + # if one is installed. + with warnings.catch_warnings(record=True) as w: + soup = BeautifulStoneSoup("") +- self.assertEqual(u"", unicode(soup.b)) ++ self.assertEqual("", str(soup.b)) + self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message)) + + def test_real_xhtml_document(self): +--- a/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/tests/test_soup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/tests/test_soup.py 2025-01-16 02:26:08.574013171 +0800 +@@ -30,7 +30,7 @@ + try: + from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML + LXML_PRESENT = True +-except ImportError, e: ++except ImportError as e: + LXML_PRESENT = False + + PYTHON_2_PRE_2_7 = (sys.version_info < (2,7)) +@@ -39,14 +39,14 @@ + class TestConstructor(SoupTest): + + def test_short_unicode_input(self): +- data = u"

éé

" ++ data = "

éé

" + soup = self.soup(data) +- self.assertEqual(u"éé", soup.h1.string) ++ self.assertEqual("éé", soup.h1.string) + + def test_embedded_null(self): +- data = u"

foo\0bar

" ++ data = "

foo\0bar

" + soup = self.soup(data) +- self.assertEqual(u"foo\0bar", soup.h1.string) ++ self.assertEqual("foo\0bar", soup.h1.string) + + + class TestDeprecatedConstructorArguments(SoupTest): +@@ -117,9 +117,9 @@ + def test_simple_html_substitution(self): + # Unicode characters corresponding to named HTML entites + # are substituted, and no others. +- s = u"foo\u2200\N{SNOWMAN}\u00f5bar" ++ s = "foo\u2200\N{SNOWMAN}\u00f5bar" + self.assertEqual(self.sub.substitute_html(s), +- u"foo∀\N{SNOWMAN}õbar") ++ "foo∀\N{SNOWMAN}õbar") + + def test_smart_quote_substitution(self): + # MS smart quotes are a common source of frustration, so we +@@ -184,7 +184,7 @@ + + def setUp(self): + super(TestEncodingConversion, self).setUp() +- self.unicode_data = u'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' ++ self.unicode_data = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' + self.utf8_data = self.unicode_data.encode("utf-8") + # Just so you know what it looks like. + self.assertEqual( +@@ -204,7 +204,7 @@ + ascii = b"a" + soup_from_ascii = self.soup(ascii) + unicode_output = soup_from_ascii.decode() +- self.assertTrue(isinstance(unicode_output, unicode)) ++ self.assertTrue(isinstance(unicode_output, str)) + self.assertEqual(unicode_output, self.document_for(ascii.decode())) + self.assertEqual(soup_from_ascii.original_encoding.lower(), "utf-8") + finally: +@@ -216,7 +216,7 @@ + # is not set. + soup_from_unicode = self.soup(self.unicode_data) + self.assertEqual(soup_from_unicode.decode(), self.unicode_data) +- self.assertEqual(soup_from_unicode.foo.string, u'Sacr\xe9 bleu!') ++ self.assertEqual(soup_from_unicode.foo.string, 'Sacr\xe9 bleu!') + self.assertEqual(soup_from_unicode.original_encoding, None) + + def test_utf8_in_unicode_out(self): +@@ -224,7 +224,7 @@ + # attribute is set. + soup_from_utf8 = self.soup(self.utf8_data) + self.assertEqual(soup_from_utf8.decode(), self.unicode_data) +- self.assertEqual(soup_from_utf8.foo.string, u'Sacr\xe9 bleu!') ++ self.assertEqual(soup_from_utf8.foo.string, 'Sacr\xe9 bleu!') + + def test_utf8_out(self): + # The internal data structures can be encoded as UTF-8. +@@ -235,14 +235,14 @@ + PYTHON_2_PRE_2_7 or PYTHON_3_PRE_3_2, + "Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.") + def test_attribute_name_containing_unicode_characters(self): +- markup = u'
' ++ markup = '
' + self.assertEqual(self.soup(markup).div.encode("utf8"), markup.encode("utf8")) + + class TestUnicodeDammit(unittest.TestCase): + """Standalone tests of UnicodeDammit.""" + + def test_unicode_input(self): +- markup = u"I'm already Unicode! \N{SNOWMAN}" ++ markup = "I'm already Unicode! \N{SNOWMAN}" + dammit = UnicodeDammit(markup) + self.assertEqual(dammit.unicode_markup, markup) + +@@ -250,7 +250,7 @@ + markup = b"\x91\x92\x93\x94" + dammit = UnicodeDammit(markup) + self.assertEqual( +- dammit.unicode_markup, u"\u2018\u2019\u201c\u201d") ++ dammit.unicode_markup, "\u2018\u2019\u201c\u201d") + + def test_smart_quotes_to_xml_entities(self): + markup = b"\x91\x92\x93\x94" +@@ -273,14 +273,14 @@ + def test_detect_utf8(self): + utf8 = b"\xc3\xa9" + dammit = UnicodeDammit(utf8) +- self.assertEqual(dammit.unicode_markup, u'\xe9') ++ self.assertEqual(dammit.unicode_markup, '\xe9') + self.assertEqual(dammit.original_encoding.lower(), 'utf-8') + + def test_convert_hebrew(self): + hebrew = b"\xed\xe5\xec\xf9" + dammit = UnicodeDammit(hebrew, ["iso-8859-8"]) + self.assertEqual(dammit.original_encoding.lower(), 'iso-8859-8') +- self.assertEqual(dammit.unicode_markup, u'\u05dd\u05d5\u05dc\u05e9') ++ self.assertEqual(dammit.unicode_markup, '\u05dd\u05d5\u05dc\u05e9') + + def test_dont_see_smart_quotes_where_there_are_none(self): + utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch" +@@ -289,12 +289,12 @@ + self.assertEqual(dammit.unicode_markup.encode("utf-8"), utf_8) + + def test_ignore_inappropriate_codecs(self): +- utf8_data = u"Räksmörgås".encode("utf-8") ++ utf8_data = "Räksmörgås".encode("utf-8") + dammit = UnicodeDammit(utf8_data, ["iso-8859-8"]) + self.assertEqual(dammit.original_encoding.lower(), 'utf-8') + + def test_ignore_invalid_codecs(self): +- utf8_data = u"Räksmörgås".encode("utf-8") ++ utf8_data = "Räksmörgås".encode("utf-8") + for bad_encoding in ['.utf8', '...', 'utF---16.!']: + dammit = UnicodeDammit(utf8_data, [bad_encoding]) + self.assertEqual(dammit.original_encoding.lower(), 'utf-8') +@@ -337,7 +337,7 @@ + bs4.dammit.chardet_dammit = noop + dammit = UnicodeDammit(doc) + self.assertEqual(True, dammit.contains_replacement_characters) +- self.assertTrue(u"\ufffd" in dammit.unicode_markup) ++ self.assertTrue("\ufffd" in dammit.unicode_markup) + + soup = BeautifulSoup(doc, "html.parser") + self.assertTrue(soup.contains_replacement_characters) +@@ -349,17 +349,17 @@ + # A document written in UTF-16LE will have its byte order marker stripped. + data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00' + dammit = UnicodeDammit(data) +- self.assertEqual(u"áé", dammit.unicode_markup) ++ self.assertEqual("áé", dammit.unicode_markup) + self.assertEqual("utf-16le", dammit.original_encoding) + + def test_detwingle(self): + # Here's a UTF8 document. +- utf8 = (u"\N{SNOWMAN}" * 3).encode("utf8") ++ utf8 = ("\N{SNOWMAN}" * 3).encode("utf8") + + # Here's a Windows-1252 document. + windows_1252 = ( +- u"\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!" +- u"\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252") ++ "\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!" ++ "\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252") + + # Through some unholy alchemy, they've been stuck together. + doc = utf8 + windows_1252 + utf8 +@@ -374,7 +374,7 @@ + + fixed = UnicodeDammit.detwingle(doc) + self.assertEqual( +- u"☃☃☃“Hi, I like Windows!”☃☃☃", fixed.decode("utf8")) ++ "☃☃☃“Hi, I like Windows!”☃☃☃", fixed.decode("utf8")) + + def test_detwingle_ignores_multibyte_characters(self): + # Each of these characters has a UTF-8 representation ending +@@ -382,9 +382,9 @@ + # Windows-1252. But our code knows to skip over multibyte + # UTF-8 characters, so they'll survive the process unscathed. + for tricky_unicode_char in ( +- u"\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93' +- u"\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93' +- u"\xf0\x90\x90\x93", # This is a CJK character, not sure which one. ++ "\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93' ++ "\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93' ++ "\xf0\x90\x90\x93", # This is a CJK character, not sure which one. + ): + input = tricky_unicode_char.encode("utf8") + self.assertTrue(input.endswith(b'\x93')) +--- a/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/tests/test_tree.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/bs4/tests/test_tree.py 2025-01-16 02:26:08.574013171 +0800 +@@ -67,8 +67,8 @@ + self.assertEqual(soup.find("b").string, "2") + + def test_unicode_text_find(self): +- soup = self.soup(u'

Räksmörgås

') +- self.assertEqual(soup.find(text=u'Räksmörgås'), u'Räksmörgås') ++ soup = self.soup('

Räksmörgås

') ++ self.assertEqual(soup.find(text='Räksmörgås'), 'Räksmörgås') + + def test_find_everything(self): + """Test an optimization that finds all tags.""" +@@ -87,16 +87,16 @@ + """You can search the tree for text nodes.""" + soup = self.soup("Foobar\xbb") + # Exact match. +- self.assertEqual(soup.find_all(text="bar"), [u"bar"]) ++ self.assertEqual(soup.find_all(text="bar"), ["bar"]) + # Match any of a number of strings. + self.assertEqual( +- soup.find_all(text=["Foo", "bar"]), [u"Foo", u"bar"]) ++ soup.find_all(text=["Foo", "bar"]), ["Foo", "bar"]) + # Match a regular expression. + self.assertEqual(soup.find_all(text=re.compile('.*')), +- [u"Foo", u"bar", u'\xbb']) ++ ["Foo", "bar", '\xbb']) + # Match anything. + self.assertEqual(soup.find_all(text=True), +- [u"Foo", u"bar", u'\xbb']) ++ ["Foo", "bar", '\xbb']) + + def test_find_all_limit(self): + """You can limit the number of items returned by find_all.""" +@@ -227,8 +227,8 @@ + ["Matching a.", "Matching b."]) + + def test_find_all_by_utf8_attribute_value(self): +- peace = u"םולש".encode("utf8") +- data = u''.encode("utf8") ++ peace = "םולש".encode("utf8") ++ data = ''.encode("utf8") + soup = self.soup(data) + self.assertEqual([soup.a], soup.find_all(title=peace)) + self.assertEqual([soup.a], soup.find_all(title=peace.decode("utf8"))) +@@ -1287,7 +1287,7 @@ + + def test_unicode_pickle(self): + # A tree containing Unicode characters can be pickled. +- html = u"\N{SNOWMAN}" ++ html = "\N{SNOWMAN}" + soup = self.soup(html) + dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL) + loaded = pickle.loads(dumped) +@@ -1297,17 +1297,17 @@ + class TestSubstitutions(SoupTest): + + def test_default_formatter_is_minimal(self): +- markup = u"<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>" ++ markup = "<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>" + soup = self.soup(markup) + decoded = soup.decode(formatter="minimal") + # The < is converted back into < but the e-with-acute is left alone. + self.assertEqual( + decoded, + self.document_for( +- u"<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>")) ++ "<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>")) + + def test_formatter_html(self): +- markup = u"<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>" ++ markup = "<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>" + soup = self.soup(markup) + decoded = soup.decode(formatter="html") + self.assertEqual( +@@ -1315,49 +1315,49 @@ + self.document_for("<<Sacré bleu!>>")) + + def test_formatter_minimal(self): +- markup = u"<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>" ++ markup = "<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>" + soup = self.soup(markup) + decoded = soup.decode(formatter="minimal") + # The < is converted back into < but the e-with-acute is left alone. + self.assertEqual( + decoded, + self.document_for( +- u"<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>")) ++ "<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>")) + + def test_formatter_null(self): +- markup = u"<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>" ++ markup = "<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>" + soup = self.soup(markup) + decoded = soup.decode(formatter=None) + # Neither the angle brackets nor the e-with-acute are converted. + # This is not valid HTML, but it's what the user wanted. + self.assertEqual(decoded, +- self.document_for(u"<>")) ++ self.document_for("<>")) + + def test_formatter_custom(self): +- markup = u"<foo>bar" ++ markup = "<foo>bar" + soup = self.soup(markup) + decoded = soup.decode(formatter = lambda x: x.upper()) + # Instead of normal entity conversion code, the custom + # callable is called on every string. + self.assertEqual( + decoded, +- self.document_for(u"BAR")) ++ self.document_for("BAR")) + + def test_formatter_is_run_on_attribute_values(self): +- markup = u'e' ++ markup = 'e' + soup = self.soup(markup) + a = soup.a + +- expect_minimal = u'e' ++ expect_minimal = 'e' + + self.assertEqual(expect_minimal, a.decode()) + self.assertEqual(expect_minimal, a.decode(formatter="minimal")) + +- expect_html = u'e' ++ expect_html = 'e' + self.assertEqual(expect_html, a.decode(formatter="html")) + + self.assertEqual(markup, a.decode(formatter=None)) +- expect_upper = u'E' ++ expect_upper = 'E' + self.assertEqual(expect_upper, a.decode(formatter=lambda x: x.upper())) + + def test_formatter_skips_script_tag_for_html_documents(self): +@@ -1383,7 +1383,7 @@ + # Everything outside the
 tag is reformatted, but everything
+         # inside is left alone.
+         self.assertEqual(
+-            u'
\n foo\n
  \tbar\n  \n  
\n baz\n
', ++ '
\n foo\n
  \tbar\n  \n  
\n baz\n
', + soup.div.prettify()) + + def test_prettify_accepts_formatter(self): +@@ -1393,14 +1393,14 @@ + + def test_prettify_outputs_unicode_by_default(self): + soup = self.soup("") +- self.assertEqual(unicode, type(soup.prettify())) ++ self.assertEqual(str, type(soup.prettify())) + + def test_prettify_can_encode_data(self): + soup = self.soup("") + self.assertEqual(bytes, type(soup.prettify("utf-8"))) + + def test_html_entity_substitution_off_by_default(self): +- markup = u"Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!" ++ markup = "Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!" + soup = self.soup(markup) + encoded = soup.b.encode("utf-8") + self.assertEqual(encoded, markup.encode('utf-8')) +@@ -1444,45 +1444,45 @@ + """Test the ability to encode objects into strings.""" + + def test_unicode_string_can_be_encoded(self): +- html = u"\N{SNOWMAN}" ++ html = "\N{SNOWMAN}" + soup = self.soup(html) + self.assertEqual(soup.b.string.encode("utf-8"), +- u"\N{SNOWMAN}".encode("utf-8")) ++ "\N{SNOWMAN}".encode("utf-8")) + + def test_tag_containing_unicode_string_can_be_encoded(self): +- html = u"\N{SNOWMAN}" ++ html = "\N{SNOWMAN}" + soup = self.soup(html) + self.assertEqual( + soup.b.encode("utf-8"), html.encode("utf-8")) + + def test_encoding_substitutes_unrecognized_characters_by_default(self): +- html = u"\N{SNOWMAN}" ++ html = "\N{SNOWMAN}" + soup = self.soup(html) + self.assertEqual(soup.b.encode("ascii"), b"") + + def test_encoding_can_be_made_strict(self): +- html = u"\N{SNOWMAN}" ++ html = "\N{SNOWMAN}" + soup = self.soup(html) + self.assertRaises( + UnicodeEncodeError, soup.encode, "ascii", errors="strict") + + def test_decode_contents(self): +- html = u"\N{SNOWMAN}" ++ html = "\N{SNOWMAN}" + soup = self.soup(html) +- self.assertEqual(u"\N{SNOWMAN}", soup.b.decode_contents()) ++ self.assertEqual("\N{SNOWMAN}", soup.b.decode_contents()) + + def test_encode_contents(self): +- html = u"\N{SNOWMAN}" ++ html = "\N{SNOWMAN}" + soup = self.soup(html) + self.assertEqual( +- u"\N{SNOWMAN}".encode("utf8"), soup.b.encode_contents( ++ "\N{SNOWMAN}".encode("utf8"), soup.b.encode_contents( + encoding="utf8")) + + def test_deprecated_renderContents(self): +- html = u"\N{SNOWMAN}" ++ html = "\N{SNOWMAN}" + soup = self.soup(html) + self.assertEqual( +- u"\N{SNOWMAN}".encode("utf8"), soup.b.renderContents()) ++ "\N{SNOWMAN}".encode("utf8"), soup.b.renderContents()) + + class TestNavigableStringSubclasses(SoupTest): + +@@ -1587,7 +1587,7 @@ + els = self.soup.select('title') + self.assertEqual(len(els), 1) + self.assertEqual(els[0].name, 'title') +- self.assertEqual(els[0].contents, [u'The title']) ++ self.assertEqual(els[0].contents, ['The title']) + + def test_one_tag_many(self): + els = self.soup.select('div') +@@ -1773,12 +1773,12 @@ + # Try to select first paragraph + els = self.soup.select('div#inner p:nth-of-type(1)') + self.assertEqual(len(els), 1) +- self.assertEqual(els[0].string, u'Some text') ++ self.assertEqual(els[0].string, 'Some text') + + # Try to select third paragraph + els = self.soup.select('div#inner p:nth-of-type(3)') + self.assertEqual(len(els), 1) +- self.assertEqual(els[0].string, u'Another') ++ self.assertEqual(els[0].string, 'Another') + + # Try to select (non-existent!) fourth paragraph + els = self.soup.select('div#inner p:nth-of-type(4)') +@@ -1791,7 +1791,7 @@ + def test_nth_of_type_direct_descendant(self): + els = self.soup.select('div#inner > p:nth-of-type(1)') + self.assertEqual(len(els), 1) +- self.assertEqual(els[0].string, u'Some text') ++ self.assertEqual(els[0].string, 'Some text') + + def test_id_child_selector_nth_of_type(self): + self.assertSelects('#inner > p:nth-of-type(2)', ['p1']) +--- a/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/doc/source/conf.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/doc/source/conf.py 2025-01-16 02:26:08.574013171 +0800 +@@ -40,8 +40,8 @@ + master_doc = 'index' + + # General information about the project. +-project = u'Beautiful Soup' +-copyright = u'2012, Leonard Richardson' ++project = 'Beautiful Soup' ++copyright = '2012, Leonard Richardson' + + # The version info for the project you're documenting, acts as replacement for + # |version| and |release|, also used in various other places throughout the +@@ -178,8 +178,8 @@ + # Grouping the document tree into LaTeX files. List of tuples + # (source start file, target name, title, author, documentclass [howto/manual]). + latex_documents = [ +- ('index', 'BeautifulSoup.tex', u'Beautiful Soup Documentation', +- u'Leonard Richardson', 'manual'), ++ ('index', 'BeautifulSoup.tex', 'Beautiful Soup Documentation', ++ 'Leonard Richardson', 'manual'), + ] + + # The name of an image file (relative to this directory) to place at the top of +@@ -211,18 +211,18 @@ + # One entry per manual page. List of tuples + # (source start file, name, description, authors, manual section). + man_pages = [ +- ('index', 'beautifulsoup', u'Beautiful Soup Documentation', +- [u'Leonard Richardson'], 1) ++ ('index', 'beautifulsoup', 'Beautiful Soup Documentation', ++ ['Leonard Richardson'], 1) + ] + + + # -- Options for Epub output --------------------------------------------------- + + # Bibliographic Dublin Core info. +-epub_title = u'Beautiful Soup' +-epub_author = u'Leonard Richardson' +-epub_publisher = u'Leonard Richardson' +-epub_copyright = u'2012, Leonard Richardson' ++epub_title = 'Beautiful Soup' ++epub_author = 'Leonard Richardson' ++epub_publisher = 'Leonard Richardson' ++epub_copyright = '2012, Leonard Richardson' + + # The language of the text. It defaults to the language option + # or en if the language is not set. +--- a/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/scripts/demonstrate_parser_differences.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/beautifulsoup4/scripts/demonstrate_parser_differences.py 2025-01-16 02:26:08.574013171 +0800 +@@ -22,13 +22,13 @@ + try: + from bs4.builder import _lxml + parsers.append('lxml') +-except ImportError, e: ++except ImportError as e: + pass + + try: + from bs4.builder import _html5lib + parsers.append('html5lib') +-except ImportError, e: ++except ImportError as e: + pass + + class Demonstration(object): +@@ -47,7 +47,7 @@ + output = soup.div + else: + output = soup +- except Exception, e: ++ except Exception as e: + output = "[EXCEPTION] %s" % str(e) + self.results[parser] = output + if previous_output is None: +@@ -57,15 +57,15 @@ + return uniform_results + + def dump(self): +- print "%s: %s" % ("Markup".rjust(13), self.markup.encode("utf8")) +- for parser, output in self.results.items(): +- print "%s: %s" % (parser.rjust(13), output.encode("utf8")) ++ print("%s: %s" % ("Markup".rjust(13), self.markup.encode("utf8"))) ++ for parser, output in list(self.results.items()): ++ print("%s: %s" % (parser.rjust(13), output.encode("utf8"))) + + different_results = [] + uniform_results = [] + +-print "= Testing the following parsers: %s =" % ", ".join(parsers) +-print ++print("= Testing the following parsers: %s =" % ", ".join(parsers)) ++print() + + input_file = sys.stdin + if sys.stdin.isatty(): +@@ -83,13 +83,13 @@ + else: + different_results.append(demo) + +-print "== Markup that's handled the same in every parser ==" +-print ++print("== Markup that's handled the same in every parser ==") ++print() + for demo in uniform_results: + demo.dump() +- print +-print "== Markup that's not handled the same in every parser ==" +-print ++ print() ++print("== Markup that's not handled the same in every parser ==") ++print() + for demo in different_results: + demo.dump() +- print ++ print() +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/debug-info.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/debug-info.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import print_function, unicode_literals ++ + + import platform + import sys +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/parse.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/parse.py 2025-01-16 02:26:08.574013171 +0800 +@@ -102,7 +102,7 @@ + + def printOutput(parser, document, opts): + if opts.encoding: +- print("Encoding:", parser.tokenizer.stream.charEncoding) ++ print(("Encoding:", parser.tokenizer.stream.charEncoding)) + + for item in parser.log: + print(item) +@@ -121,7 +121,7 @@ + if not hasattr(document,'__getitem__'): + document = [document] + for fragment in document: +- print(parser.tree.testSerializer(fragment)) ++ print((parser.tree.testSerializer(fragment))) + elif opts.hilite: + sys.stdout.write(document.hilite("utf-8")) + elif opts.html: +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/setup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/setup.py 2025-01-16 02:26:08.574013171 +0800 +@@ -34,7 +34,7 @@ + with open(os.path.join("html5lib", "__init__.py"), "rb") as init_file: + t = ast.parse(init_file.read(), filename="__init__.py", mode="exec") + assert isinstance(t, ast.Module) +- assignments = filter(lambda x: isinstance(x, ast.Assign), t.body) ++ assignments = [x for x in t.body if isinstance(x, ast.Assign)] + for a in assignments: + if (len(a.targets) == 1 and + isinstance(a.targets[0], ast.Name) and +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/__init__.py 2025-01-16 02:26:08.574013171 +0800 +@@ -11,7 +11,7 @@ + tree = html5lib.parse(f) + """ + +-from __future__ import absolute_import, division, unicode_literals ++ + + from .html5parser import HTMLParser, parse, parseFragment + from .treebuilders import getTreeBuilder +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/constants.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/constants.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import string + +@@ -447,7 +447,7 @@ + } + + unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in +- adjustForeignAttributes.items()]) ++ list(adjustForeignAttributes.items())]) + + spaceCharacters = frozenset([ + "\t", +@@ -3090,7 +3090,7 @@ + tokenTypes["EmptyTag"]]) + + +-prefixes = dict([(v, k) for k, v in namespaces.items()]) ++prefixes = dict([(v, k) for k, v in list(namespaces.items())]) + prefixes["http://www.w3.org/1998/Math/MathML"] = "math" + + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/html5parser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/html5parser.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + from six import with_metaclass + + import types +@@ -39,7 +39,7 @@ + def method_decorator_metaclass(function): + class Decorated(type): + def __new__(meta, classname, bases, classDict): +- for attributeName, attribute in classDict.items(): ++ for attributeName, attribute in list(classDict.items()): + if isinstance(attribute, types.FunctionType): + attribute = function(attribute) + +@@ -76,7 +76,7 @@ + self.errors = [] + + self.phases = dict([(name, cls(self, self.tree)) for name, cls in +- getPhases(debug).items()]) ++ list(getPhases(debug).items())]) + + def _parse(self, stream, innerHTML=False, container="div", + encoding=None, parseMeta=True, useChardet=True, **kwargs): +@@ -269,7 +269,7 @@ + + def adjustMathMLAttributes(self, token): + replacements = {"definitionurl": "definitionURL"} +- for k, v in replacements.items(): ++ for k, v in list(replacements.items()): + if k in token["data"]: + token["data"][v] = token["data"][k] + del token["data"][k] +@@ -348,7 +348,7 @@ + def adjustForeignAttributes(self, token): + replacements = adjustForeignAttributesMap + +- for originalName in token["data"].keys(): ++ for originalName in list(token["data"].keys()): + if originalName in replacements: + foreignName = replacements[originalName] + token["data"][foreignName] = token["data"][originalName] +@@ -423,7 +423,7 @@ + def log(function): + """Logger that records which phase processes each token""" + type_names = dict((value, key) for key, value in +- constants.tokenTypes.items()) ++ list(constants.tokenTypes.items())) + + def wrapped(self, *args, **kwargs): + if function.__name__.startswith("process") and len(args) > 0: +@@ -484,7 +484,7 @@ + self.parser.parseError("non-html-root") + # XXX Need a check here to see if the first start tag token emitted is + # this token... If it's not, invoke self.parser.parseError(). +- for attr, value in token["data"].items(): ++ for attr, value in list(token["data"].items()): + if attr not in self.tree.openElements[0].attributes: + self.tree.openElements[0].attributes[attr] = value + self.parser.firstStartTag = False +@@ -1021,7 +1021,7 @@ + assert self.parser.innerHTML + else: + self.parser.framesetOK = False +- for attr, value in token["data"].items(): ++ for attr, value in list(token["data"].items()): + if attr not in self.tree.openElements[1].attributes: + self.tree.openElements[1].attributes[attr] = value + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/ihatexml.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/ihatexml.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import re + import warnings +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/inputstream.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/inputstream.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + from six import text_type + from six.moves import http_client + +@@ -587,7 +587,7 @@ + raise TypeError + return self[p:p + 1] + +- def next(self): ++ def __next__(self): + # Py2 compat + return self.__next__() + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/sanitizer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/sanitizer.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import re + from xml.sax.saxutils import escape, unescape +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tokenizer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tokenizer.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,7 +1,7 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + try: +- chr = unichr # flake8: noqa ++ chr = chr # flake8: noqa + except NameError: + pass + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/utils.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from types import ModuleType + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/filters/_base.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/filters/_base.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + + class Filter(object): +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/filters/alphabeticalattributes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/filters/alphabeticalattributes.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from . import _base + +@@ -13,7 +13,7 @@ + for token in _base.Filter.__iter__(self): + if token["type"] in ("StartTag", "EmptyTag"): + attrs = OrderedDict() +- for name, value in sorted(token["data"].items(), ++ for name, value in sorted(list(token["data"].items()), + key=lambda x: x[0]): + attrs[name] = value + token["data"] = attrs +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/filters/inject_meta_charset.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/filters/inject_meta_charset.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from . import _base + +@@ -23,7 +23,7 @@ + if token["name"].lower() == "meta": + # replace charset with actual encoding + has_http_equiv_content_type = False +- for (namespace, name), value in token["data"].items(): ++ for (namespace, name), value in list(token["data"].items()): + if namespace is not None: + continue + elif name.lower() == 'charset': +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/filters/lint.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/filters/lint.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from . import _base + from ..constants import cdataElements, rcdataElements, voidElements +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/filters/optionaltags.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/filters/optionaltags.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from . import _base + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/filters/sanitizer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/filters/sanitizer.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from . import _base + from ..sanitizer import HTMLSanitizerMixin +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/filters/whitespace.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/filters/whitespace.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import re + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/serializer/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/serializer/__init__.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from .. import treewalkers + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/serializer/htmlserializer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/serializer/htmlserializer.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + from six import text_type + + try: +@@ -229,7 +229,7 @@ + in_cdata = True + elif in_cdata: + self.serializeError("Unexpected child element of a CDATA element") +- for (attr_namespace, attr_name), attr_value in token["data"].items(): ++ for (attr_namespace, attr_name), attr_value in list(token["data"].items()): + # TODO: Add namespace support here + k = attr_name + v = attr_value +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/__init__.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1 +1 @@ +-from __future__ import absolute_import, division, unicode_literals ++ +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/mockParser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/mockParser.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import sys + import os +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/support.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/support.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import os + import sys +@@ -103,7 +103,7 @@ + + def normaliseOutput(self, data): + # Remove trailing newlines +- for key, value in data.items(): ++ for key, value in list(data.items()): + if value.endswith("\n" if self.encoding else b"\n"): + data[key] = value[:-1] + return data +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_encoding.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_encoding.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import os + import unittest +@@ -6,7 +6,7 @@ + try: + unittest.TestCase.assertEqual + except AttributeError: +- unittest.TestCase.assertEqual = unittest.TestCase.assertEquals ++ unittest.TestCase.assertEqual = unittest.TestCase.assertEqual + + from .support import get_data_files, TestData, test_dir, errorMessage + from html5lib import HTMLParser, inputstream +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_parser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_parser.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import os + import sys +@@ -90,7 +90,7 @@ + if errors: + errors = errors.split("\n") + +- for treeName, treeCls in treeTypes.items(): ++ for treeName, treeCls in list(treeTypes.items()): + for namespaceHTMLElements in (True, False): + yield (runParserTest, innerHTML, input, expected, errors, treeCls, + namespaceHTMLElements) +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_parser2.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_parser2.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import io + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_sanitizer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_sanitizer.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + try: + import json +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_serializer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_serializer.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import json + import unittest +@@ -8,7 +8,7 @@ + try: + unittest.TestCase.assertEqual + except AttributeError: +- unittest.TestCase.assertEqual = unittest.TestCase.assertEquals ++ unittest.TestCase.assertEqual = unittest.TestCase.assertEqual + + import html5lib + from html5lib import constants +@@ -81,7 +81,7 @@ + + + def serialize_html(input, options): +- options = dict([(str(k), v) for k, v in options.items()]) ++ options = dict([(str(k), v) for k, v in list(options.items())]) + stream = JsonWalker(input) + serializer = HTMLSerializer(alphabetical_attributes=True, **options) + return serializer.render(stream, options.get("encoding", None)) +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_stream.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_stream.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from . import support # flake8: noqa + import unittest +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_tokenizer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_tokenizer.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,10 +1,10 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import json + import warnings + import re + +-from six import unichr ++from six import chr + + from .support import get_data_files + +@@ -27,7 +27,7 @@ + tokenizer.currentToken = {"type": "startTag", + "name": self._lastStartTag} + +- types = dict((v, k) for k, v in constants.tokenTypes.items()) ++ types = dict((v, k) for k, v in list(constants.tokenTypes.items())) + for token in tokenizer: + getattr(self, 'process%s' % types[token["type"]])(token) + +@@ -143,11 +143,11 @@ + low = int(m.group(2), 16) + if 0xD800 <= high <= 0xDBFF and 0xDC00 <= low <= 0xDFFF: + cp = ((high - 0xD800) << 10) + (low - 0xDC00) + 0x10000 +- return unichr(cp) ++ return chr(cp) + else: +- return unichr(high) + unichr(low) ++ return chr(high) + chr(low) + else: +- return unichr(int(m.group(1), 16)) ++ return chr(int(m.group(1), 16)) + try: + return _surrogateRe.sub(repl, inp) + except ValueError: +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_treeadapters.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_treeadapters.py 2025-01-16 02:26:08.574013171 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from . import support # flake8: noqa + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_treewalkers.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_treewalkers.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import os + import sys +@@ -9,7 +9,7 @@ + try: + unittest.TestCase.assertEqual + except AttributeError: +- unittest.TestCase.assertEqual = unittest.TestCase.assertEquals ++ unittest.TestCase.assertEqual = unittest.TestCase.assertEqual + + from .support import get_data_files, TestData, convertExpected + +@@ -108,7 +108,7 @@ + else: + name = token["name"] + attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value) +- for attr, value in token["data"].items()]) ++ for attr, value in list(token["data"].items())]) + yield (START, (QName(name), attrs), (None, -1, -1)) + if type == "EmptyTag": + type = "EndTag" +@@ -164,7 +164,7 @@ + {'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'}, + {'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'} + ] +- for treeName, treeCls in treeTypes.items(): ++ for treeName, treeCls in list(treeTypes.items()): + p = html5parser.HTMLParser(tree=treeCls["builder"]) + document = p.parse("a
b
c") + document = treeCls.get("adapter", lambda x: x)(document) +@@ -207,7 +207,7 @@ + def test_treewalker(): + sys.stdout.write('Testing tree walkers ' + " ".join(list(treeTypes.keys())) + "\n") + +- for treeName, treeCls in treeTypes.items(): ++ for treeName, treeCls in list(treeTypes.items()): + files = get_data_files('tree-construction') + for filename in files: + testName = os.path.basename(filename).replace(".dat", "") +@@ -271,6 +271,6 @@ + '\n href="http://example.com/cow"\n rel="alternate"\n "Example"') + ] + +- for tree in treeTypes.items(): ++ for tree in list(treeTypes.items()): + for intext, attrs, expected in sm_tests: + yield runTreewalkerEditTest, intext, expected, attrs, tree +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_whitespace_filter.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/test_whitespace_filter.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import unittest + +@@ -9,7 +9,7 @@ + try: + unittest.TestCase.assertEqual + except AttributeError: +- unittest.TestCase.assertEqual = unittest.TestCase.assertEquals ++ unittest.TestCase.assertEqual = unittest.TestCase.assertEqual + + + class TestCase(unittest.TestCase): +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/tokenizertotree.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/tokenizertotree.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + import sys + import os +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/performance/concatenation.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/tests/performance/concatenation.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + + def f1(): +@@ -33,4 +33,4 @@ + statement = "f%s" % (x + 1) + t = timeit.Timer(statement, "from __main__ import " + statement) + r = t.repeat(3, 1000000) +- print(r, min(r)) ++ print((r, min(r))) +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treeadapters/sax.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treeadapters/sax.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,11 +1,11 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from xml.sax.xmlreader import AttributesNSImpl + + from ..constants import adjustForeignAttributes, unadjustForeignAttributes + + prefix_mapping = {} +-for prefix, localName, namespace in adjustForeignAttributes.values(): ++for prefix, localName, namespace in list(adjustForeignAttributes.values()): + if prefix is not None: + prefix_mapping[prefix] = namespace + +@@ -13,7 +13,7 @@ + def to_sax(walker, handler): + """Call SAX-like content handler based on treewalker walker""" + handler.startDocument() +- for prefix, namespace in prefix_mapping.items(): ++ for prefix, namespace in list(prefix_mapping.items()): + handler.startPrefixMapping(prefix, namespace) + + for token in walker: +@@ -39,6 +39,6 @@ + else: + assert False, "Unknown token type" + +- for prefix, namespace in prefix_mapping.items(): ++ for prefix, namespace in list(prefix_mapping.items()): + handler.endPrefixMapping(prefix) + handler.endDocument() +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treebuilders/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treebuilders/__init__.py 2025-01-16 02:26:08.577263116 +0800 +@@ -26,7 +26,7 @@ + to the format used in the unittests + """ + +-from __future__ import absolute_import, division, unicode_literals ++ + + from ..utils import default_etree + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treebuilders/_base.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treebuilders/_base.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + from six import text_type + + from ..constants import scopingElements, tableInsertModeElements, namespaces +@@ -42,7 +42,7 @@ + def __str__(self): + attributesStr = " ".join(["%s=\"%s\"" % (name, value) + for name, value in +- self.attributes.items()]) ++ list(self.attributes.items())]) + if attributesStr: + return "<%s %s>" % (self.name, attributesStr) + else: +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treebuilders/dom.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treebuilders/dom.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + + from xml.dom import minidom, Node +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treebuilders/etree.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treebuilders/etree.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + from six import text_type + + import re +@@ -63,7 +63,7 @@ + # XXX - there may be a better way to do this... + for key in list(self._element.attrib.keys()): + del self._element.attrib[key] +- for key, value in attributes.items(): ++ for key, value in list(attributes.items()): + if isinstance(key, tuple): + name = "{%s}%s" % (key[2], key[1]) + else: +@@ -126,7 +126,7 @@ + + def cloneNode(self): + element = type(self)(self.name, self.namespace) +- for name, value in self.attributes.items(): ++ for name, value in list(self.attributes.items()): + element.attributes[name] = value + return element + +@@ -230,7 +230,7 @@ + + if hasattr(element, "attrib"): + attributes = [] +- for name, value in element.attrib.items(): ++ for name, value in list(element.attrib.items()): + nsmatch = tag_regexp.match(name) + if nsmatch is not None: + ns, name = nsmatch.groups() +@@ -290,7 +290,7 @@ + else: + attr = " ".join(["%s=\"%s\"" % ( + filter.fromXmlName(name), value) +- for name, value in element.attrib.items()]) ++ for name, value in list(element.attrib.items())]) + rv.append("<%s %s>" % (element.tag, attr)) + if element.text: + rv.append(element.text) +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treebuilders/etree_lxml.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treebuilders/etree_lxml.py 2025-01-16 02:26:08.577263116 +0800 +@@ -9,7 +9,7 @@ + When any of these things occur, we emit a DataLossWarning + """ + +-from __future__ import absolute_import, division, unicode_literals ++ + + import warnings + import re +@@ -105,7 +105,7 @@ + + if hasattr(element, "attrib"): + attributes = [] +- for name, value in element.attrib.items(): ++ for name, value in list(element.attrib.items()): + nsmatch = tag_regexp.match(name) + if nsmatch is not None: + ns, name = nsmatch.groups() +@@ -158,7 +158,7 @@ + rv.append("<%s>" % (element.tag,)) + else: + attr = " ".join(["%s=\"%s\"" % (name, value) +- for name, value in element.attrib.items()]) ++ for name, value in list(element.attrib.items())]) + rv.append("<%s %s>" % (element.tag, attr)) + if element.text: + rv.append(element.text) +@@ -196,7 +196,7 @@ + def __init__(self, element, value={}): + self._element = element + dict.__init__(self, value) +- for key, value in self.items(): ++ for key, value in list(self.items()): + if isinstance(key, tuple): + name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) + else: +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treewalkers/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treewalkers/__init__.py 2025-01-16 02:26:08.577263116 +0800 +@@ -8,7 +8,7 @@ + returning an iterator generating tokens. + """ + +-from __future__ import absolute_import, division, unicode_literals ++ + + __all__ = ["getTreeWalker", "pprint", "dom", "etree", "genshistream", "lxmletree", + "pulldom"] +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treewalkers/_base.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treewalkers/_base.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + from six import text_type, string_types + + __all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN", +@@ -52,7 +52,7 @@ + assert all((namespace is None or isinstance(namespace, string_types)) and + isinstance(name, string_types) and + isinstance(value, string_types) +- for (namespace, name), value in attrs.items()) ++ for (namespace, name), value in list(attrs.items())) + + yield {"type": "EmptyTag", "name": to_text(name, False), + "namespace": to_text(namespace), +@@ -66,14 +66,14 @@ + assert all((namespace is None or isinstance(namespace, string_types)) and + isinstance(name, string_types) and + isinstance(value, string_types) +- for (namespace, name), value in attrs.items()) ++ for (namespace, name), value in list(attrs.items())) + + return {"type": "StartTag", + "name": text_type(name), + "namespace": to_text(namespace), + "data": dict(((to_text(namespace, False), to_text(name)), + to_text(value, False)) +- for (namespace, name), value in attrs.items())} ++ for (namespace, name), value in list(attrs.items()))} + + def endTag(self, namespace, name): + assert namespace is None or isinstance(namespace, string_types), type(namespace) +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treewalkers/dom.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treewalkers/dom.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from xml.dom import Node + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treewalkers/etree.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treewalkers/etree.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + try: + from collections import OrderedDict +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treewalkers/genshistream.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treewalkers/genshistream.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from genshi.core import QName + from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treewalkers/lxmletree.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treewalkers/lxmletree.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + from six import text_type + + from lxml import etree +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treewalkers/pulldom.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/treewalkers/pulldom.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \ + COMMENT, IGNORABLE_WHITESPACE, CHARACTERS +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/trie/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/trie/__init__.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from .py import Trie as PyTrie + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/trie/_base.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/trie/_base.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,13 +1,13 @@ +-from __future__ import absolute_import, division, unicode_literals + +-from collections import Mapping ++ ++from collections.abc import Mapping + + + class Trie(Mapping): + """Abstract base class for tries""" + + def keys(self, prefix=None): +- keys = super().keys() ++ keys = list(super().keys()) + + if prefix is None: + return set(keys) +@@ -16,7 +16,7 @@ + return set([x for x in keys if x.startswith(prefix)]) + + def has_keys_with_prefix(self, prefix): +- for key in self.keys(): ++ for key in list(self.keys()): + if key.startswith(prefix): + return True + +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/trie/datrie.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/trie/datrie.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + + from datrie import Trie as DATrie + from six import text_type +@@ -9,14 +9,14 @@ + class Trie(ABCTrie): + def __init__(self, data): + chars = set() +- for key in data.keys(): ++ for key in list(data.keys()): + if not isinstance(key, text_type): + raise TypeError("All keys must be strings") + for char in key: + chars.add(char) + + self._data = DATrie("".join(chars)) +- for key, value in data.items(): ++ for key, value in list(data.items()): + self._data[key] = value + + def __contains__(self, key): +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/trie/py.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/html5lib/trie/py.py 2025-01-16 02:26:08.577263116 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import absolute_import, division, unicode_literals ++ + from six import text_type + + from bisect import bisect_left +@@ -8,7 +8,7 @@ + + class Trie(ABCTrie): + def __init__(self, data): +- if not all(isinstance(x, text_type) for x in data.keys()): ++ if not all(isinstance(x, text_type) for x in list(data.keys())): + raise TypeError("All keys must be strings") + + self._data = data +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/utils/entities.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/utils/entities.py 2025-01-16 02:26:08.577263116 +0800 +@@ -20,7 +20,7 @@ + if item) + + def codepoint_to_character(inp): +- return ("\U000"+inp[2:]).decode("unicode-escape") ++ return ("\\U000"+inp[2:]).decode("unicode-escape") + + def make_tests_json(entities): + test_list = make_test_list(entities) +@@ -55,7 +55,7 @@ + + def make_test_list(entities): + tests = [] +- for entity_name, characters in entities.items(): ++ for entity_name, characters in list(entities.items()): + if entity_name.endswith(";") and not subentity_exists(entity_name, entities): + tests.append((entity_name[:-1], "&" + entity_name[:-1], False)) + tests.append((entity_name, characters, True)) +--- a/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/utils/spider.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/html5lib-python/utils/spider.py 2025-01-16 02:26:08.577263116 +0800 +@@ -45,7 +45,7 @@ + except: + self.buggyURLs.add(self.currentURL) + failed = True +- print("BUGGY:", self.currentURL) ++ print(("BUGGY:", self.currentURL)) + self.visitedURLs.add(self.currentURL) + if not failed: + self.updateURLs(tree) +--- a/src/3rdparty/chromium/third_party/catapult/third_party/six/setup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/six/setup.py 2025-01-16 02:26:08.577263116 +0800 +@@ -18,7 +18,7 @@ + # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + # SOFTWARE. + +-from __future__ import with_statement ++ + + try: + from setuptools import setup +--- a/src/3rdparty/chromium/third_party/catapult/third_party/six/six.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/six/six.py 2025-01-16 02:26:08.577263116 +0800 +@@ -20,7 +20,7 @@ + + """Utilities for writing code that runs on Python 2 and 3""" + +-from __future__ import absolute_import ++ + + import functools + import itertools +@@ -50,10 +50,10 @@ + + MAXSIZE = sys.maxsize + else: +- string_types = basestring, +- integer_types = (int, long) +- class_types = (type, types.ClassType) +- text_type = unicode ++ string_types = str, ++ integer_types = (int, int) ++ class_types = (type, type) ++ text_type = str + binary_type = str + + if sys.platform.startswith("java"): +@@ -525,7 +525,7 @@ + advance_iterator = next + except NameError: + def advance_iterator(it): +- return it.next() ++ return it.__next__() + next = advance_iterator + + +@@ -548,7 +548,7 @@ + Iterator = object + else: + def get_unbound_function(unbound): +- return unbound.im_func ++ return unbound.__func__ + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) +@@ -558,7 +558,7 @@ + + class Iterator(object): + +- def next(self): ++ def __next__(self): + return type(self).__next__(self) + + callable = callable +@@ -625,7 +625,7 @@ + + def u(s): + return s +- unichr = chr ++ chr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct +@@ -648,8 +648,8 @@ + # Workaround for standalone backslash + + def u(s): +- return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") +- unichr = unichr ++ return str(s.replace(r'\\', r'\\\\'), "unicode_escape") ++ chr = chr + int2byte = chr + + def byte2int(bs): +@@ -658,8 +658,8 @@ + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) +- import StringIO +- StringIO = BytesIO = StringIO.StringIO ++ import io ++ StringIO = BytesIO = io.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +@@ -731,11 +731,11 @@ + return + + def write(data): +- if not isinstance(data, basestring): ++ if not isinstance(data, str): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and +- isinstance(data, unicode) and ++ isinstance(data, str) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: +@@ -745,13 +745,13 @@ + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: +- if isinstance(sep, unicode): ++ if isinstance(sep, str): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: +- if isinstance(end, unicode): ++ if isinstance(end, str): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") +@@ -759,12 +759,12 @@ + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: +- if isinstance(arg, unicode): ++ if isinstance(arg, str): + want_unicode = True + break + if want_unicode: +- newline = unicode("\n") +- space = unicode(" ") ++ newline = str("\n") ++ space = str(" ") + else: + newline = "\n" + space = " " +--- a/src/3rdparty/chromium/third_party/catapult/third_party/six/test_six.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/six/test_six.py 2025-01-16 02:26:08.577263116 +0800 +@@ -105,7 +105,7 @@ + + have_gdbm = True + try: +- import gdbm ++ import dbm.gnu + except ImportError: + try: + import dbm.gnu +@@ -216,31 +216,31 @@ + + def test_filter(): + from six.moves import filter +- f = filter(lambda x: x % 2, range(10)) ++ f = [x for x in range(10) if x % 2] + assert six.advance_iterator(f) == 1 + + + def test_filter_false(): + from six.moves import filterfalse +- f = filterfalse(lambda x: x % 3, range(10)) ++ f = filterfalse(lambda x: x % 3, list(range(10))) + assert six.advance_iterator(f) == 0 + assert six.advance_iterator(f) == 3 + assert six.advance_iterator(f) == 6 + + def test_map(): + from six.moves import map +- assert six.advance_iterator(map(lambda x: x + 1, range(2))) == 1 ++ assert six.advance_iterator([x + 1 for x in range(2)]) == 1 + + + def test_zip(): + from six.moves import zip +- assert six.advance_iterator(zip(range(2), range(2))) == (0, 0) ++ assert six.advance_iterator(list(zip(list(range(2)), list(range(2))))) == (0, 0) + + + @py.test.mark.skipif("sys.version_info < (2, 6)") + def test_zip_longest(): + from six.moves import zip_longest +- it = zip_longest(range(2), range(1)) ++ it = zip_longest(list(range(2)), list(range(1))) + + assert six.advance_iterator(it) == (0, 0) + assert six.advance_iterator(it) == (1, None) +@@ -392,7 +392,7 @@ + del MyDict.iterlists + setattr(MyDict, stock_method_name('lists'), f) + +- d = MyDict(zip(range(10), reversed(range(10)))) ++ d = MyDict(list(zip(list(range(10)), reversed(list(range(10)))))) + for name in "keys", "values", "items", "lists": + meth = getattr(six, "iter" + name) + it = meth(d) +@@ -421,7 +421,7 @@ + return viewwhat + return 'view' + viewwhat + +- d = dict(zip(range(10), (range(11, 20)))) ++ d = dict(list(zip(list(range(10)), (list(range(11, 20)))))) + for name in "keys", "values", "items": + meth = getattr(six, "view" + name) + view = meth(d) +@@ -429,13 +429,13 @@ + + + def test_advance_iterator(): +- assert six.next is six.advance_iterator ++ assert six.__next__ is six.advance_iterator + l = [1, 2] + it = iter(l) + assert six.next(it) == 1 + assert six.next(it) == 2 +- py.test.raises(StopIteration, six.next, it) +- py.test.raises(StopIteration, six.next, it) ++ py.test.raises(StopIteration, six.__next__, it) ++ py.test.raises(StopIteration, six.__next__, it) + + + def test_iterator(): +@@ -500,9 +500,9 @@ + + + def test_u(): +- s = six.u("hi \u0439 \U00000439 \\ \\\\ \n") ++ s = six.u("hi \\u0439 \\U00000439 \\ \\\\ \n") + assert isinstance(s, str) +- assert s == "hi \u0439 \U00000439 \\ \\\\ \n" ++ assert s == "hi \\u0439 \\U00000439 \\ \\\\ \n" + + else: + +@@ -514,19 +514,19 @@ + + + def test_u(): +- s = six.u("hi \u0439 \U00000439 \\ \\\\ \n") +- assert isinstance(s, unicode) ++ s = six.u("hi \\u0439 \\U00000439 \\ \\\\ \n") ++ assert isinstance(s, str) + assert s == "hi \xd0\xb9 \xd0\xb9 \\ \\\\ \n".decode("utf8") + + + def test_u_escapes(): +- s = six.u("\u1234") ++ s = six.u("\\u1234") + assert len(s) == 1 + + + def test_unichr(): +- assert six.u("\u1234") == six.unichr(0x1234) +- assert type(six.u("\u1234")) is type(six.unichr(0x1234)) ++ assert six.u("\\u1234") == six.chr(0x1234) ++ assert type(six.u("\\u1234")) is type(six.chr(0x1234)) + + + def test_int2byte(): +@@ -548,7 +548,7 @@ + it = six.iterbytes(six.b("hi")) + assert six.next(it) == ord("h") + assert six.next(it) == ord("i") +- py.test.raises(StopIteration, six.next, it) ++ py.test.raises(StopIteration, six.__next__, it) + + + def test_StringIO(): +@@ -690,14 +690,14 @@ + out = six.BytesIO() + out.encoding = "utf-8" + out.errors = None +- six.print_(six.u("\u053c"), end="", file=out) ++ six.print_(six.u("\\u053c"), end="", file=out) + assert out.getvalue() == six.b("\xd4\xbc") + out = six.BytesIO() + out.encoding = "ascii" + out.errors = "strict" +- py.test.raises(UnicodeEncodeError, six.print_, six.u("\u053c"), file=out) ++ py.test.raises(UnicodeEncodeError, six.print_, six.u("\\u053c"), file=out) + out.errors = "backslashreplace" +- six.print_(six.u("\u053c"), end="", file=out) ++ six.print_(six.u("\\u053c"), end="", file=out) + assert out.getvalue() == six.b("\\u053c") + + +@@ -884,7 +884,7 @@ + + if six.PY2: + assert str(my_test) == six.b("hello") +- assert unicode(my_test) == six.u("hello") ++ assert str(my_test) == six.u("hello") + elif six.PY3: + assert bytes(my_test) == six.b("hello") + assert str(my_test) == six.u("hello") +--- a/src/3rdparty/chromium/third_party/catapult/third_party/six/documentation/conf.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/third_party/six/documentation/conf.py 2025-01-16 02:26:08.577263116 +0800 +@@ -32,8 +32,8 @@ + master_doc = "index" + + # General information about the project. +-project = u"six" +-copyright = u"2010-2015, Benjamin Peterson" ++project = "six" ++copyright = "2010-2015, Benjamin Peterson" + + sys.path.append(os.path.abspath(os.path.join(".", ".."))) + from six import __version__ as six_version +@@ -174,8 +174,8 @@ + # Grouping the document tree into LaTeX files. List of tuples + # (source start file, target name, title, author, documentclass [howto/manual]). + latex_documents = [ +- ("index", "six.tex", u"six Documentation", +- u"Benjamin Peterson", "manual"), ++ ("index", "six.tex", "six Documentation", ++ "Benjamin Peterson", "manual"), + ] + + # The name of an image file (relative to this directory) to place at the top of +@@ -207,8 +207,8 @@ + # One entry per manual page. List of tuples + # (source start file, name, description, authors, manual section). + man_pages = [ +- ("index", "six", u"six Documentation", +- [u"Benjamin Peterson"], 1) ++ ("index", "six", "six Documentation", ++ ["Benjamin Peterson"], 1) + ] + + # -- Intersphinx --------------------------------------------------------------- +--- a/src/3rdparty/chromium/third_party/catapult/tracing/third_party/symbols/symbols/elf_symbolizer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/third_party/symbols/symbols/elf_symbolizer.py 2025-01-16 02:26:08.577263116 +0800 +@@ -14,7 +14,7 @@ + import threading + import time + try: +- import Queue ++ import queue + except ImportError: + import queue as Queue + +@@ -277,7 +277,7 @@ + + try: + lines = self._out_queue.get(block=True, timeout=0.25) +- except Queue.Empty: ++ except queue.Empty: + # On timeout (1/4 s.) repeat the inner loop and check if either the + # addr2line process did crash or we waited its output for too long. + continue +@@ -298,7 +298,7 @@ + while True: + try: + lines = self._out_queue.get_nowait() +- except Queue.Empty: ++ except queue.Empty: + break + self._ProcessSymbolOutput(lines) + +@@ -388,7 +388,7 @@ + # The only reason of existence of this Queue (and the corresponding + # Thread below) is the lack of a subprocess.stdout.poll_avail_lines(). + # Essentially this is a pipe able to extract a couple of lines atomically. +- self._out_queue = Queue.Queue() ++ self._out_queue = queue.Queue() + + # Start the underlying addr2line process in line buffered mode. + +--- a/src/3rdparty/chromium/third_party/catapult/tracing/third_party/symbols/symbols/elf_symbolizer_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/third_party/symbols/symbols/elf_symbolizer_unittest.py 2025-01-16 02:26:08.577263116 +0800 +@@ -11,8 +11,8 @@ + + sys.path.insert(0, os.path.dirname(__file__)) + # pylint: disable=relative-import +-import elf_symbolizer +-import mock_addr2line ++from . import elf_symbolizer ++from . import mock_addr2line + + + _MOCK_A2L_PATH = os.path.join(os.path.dirname(mock_addr2line.__file__), +@@ -58,7 +58,7 @@ + inlines=True, + max_concurrent_jobs=4) + +- for addr in xrange(1000): ++ for addr in range(1000): + exp_inline = False + exp_unknown = False + +@@ -128,7 +128,7 @@ + max_concurrent_jobs=max_concurrent_jobs, + addr2line_timeout=0.5) + +- for addr in xrange(num_symbols): ++ for addr in range(num_symbols): + exp_name = 'mock_sym_for_addr_%d' % addr + exp_source_path = 'mock_src/mock_lib1.so.c' + exp_source_line = addr +@@ -138,7 +138,7 @@ + symbolizer.Join() + + # Check that all the expected callbacks have been received. +- for addr in xrange(num_symbols): ++ for addr in range(num_symbols): + self.assertIn(addr, self._resolved_addresses) + self._resolved_addresses.remove(addr) + +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/extras/symbolizer/symbolize_trace.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/extras/symbolizer/symbolize_trace.py 2025-01-16 02:26:08.577263116 +0800 +@@ -212,9 +212,9 @@ + See crbug.com/708930 for more information about the modern format. + """ + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import argparse + import bisect +@@ -516,7 +516,7 @@ + for strings_node in self._strings_nodes: + del strings_node[:] + strings_node = self._strings_nodes[0] +- for string_id, string in self._string_by_id.items(): ++ for string_id, string in list(self._string_by_id.items()): + strings_node.append({'id': string_id, 'string': string}) + + self._modified = False +@@ -607,7 +607,7 @@ + """Returns True if the wrapper was modified (see NodeWrapper) or if the + object type was overwritten.""" + return (self._modified or +- any(t.modified for t in self._type_by_id.values())) ++ any(t.modified for t in list(self._type_by_id.values()))) + + @property + def type_by_id(self): +@@ -685,7 +685,7 @@ + for types_node in self._type_name_nodes: + del types_node[:] + types_node = self._type_name_nodes[0] +- for type_node in self._type_by_id.values(): ++ for type_node in list(self._type_by_id.values()): + types_node.append({ + 'id': type_node.id, + 'name_sid': string_map.AddString(type_node.name) +@@ -795,7 +795,7 @@ + def modified(self): + """Returns True if the wrapper or any of its frames were modified.""" + return (self._modified or +- any(f.modified for f in self._frame_by_id.values())) ++ any(f.modified for f in list(self._frame_by_id.values()))) + + @property + def frame_by_id(self): +@@ -813,7 +813,7 @@ + if heap_dump_version == Trace.HEAP_DUMP_VERSION_LEGACY: + if self._stack_frames_nodes: + raise Exception('Legacy stack frames node is expected only once.') +- for frame_id, frame_node in stack_frames_node.items(): ++ for frame_id, frame_node in list(stack_frames_node.items()): + frame = self.Frame(frame_id, + frame_node['name'], + frame_node.get('parent')) +@@ -852,7 +852,7 @@ + del frames_node[:] + + frames_node = self._stack_frames_nodes[0] +- for frame in self._frame_by_id.values(): ++ for frame in list(self._frame_by_id.values()): + if self._heap_dump_version == Trace.HEAP_DUMP_VERSION_LEGACY: + frame_node = {'name': frame.name} + frames_node[frame.id] = frame_node +@@ -1045,7 +1045,7 @@ + version, stack_frames, process._string_map) + if types: + if self._frame_as_object_type: +- for alloc in allocators.values(): ++ for alloc in list(allocators.values()): + process._type_name_map.ParseNextWithPCAsTypeName( + version, types, process._stack_frame_map._frame_by_id, + alloc['types'], alloc['nodes']) +@@ -1054,7 +1054,7 @@ + process._string_map) + + self._processes = [] +- for pe in process_ext_by_pid.values(): ++ for pe in list(process_ext_by_pid.values()): + pe.process._heap_dump_version = self._heap_dump_version + if pe.process_mmaps_node: + # Now parse the most recent memory map. +@@ -1218,11 +1218,11 @@ + continue + ResolveSymbolizableFilesByNodes( + symfile_by_path, process.memory_map, +- process.stack_frame_map.frame_by_id.values(), trace_from_win) ++ list(process.stack_frame_map.frame_by_id.values()), trace_from_win) + + if frame_as_object_type: + ResolveSymbolizableFilesByNodes(symfile_by_path, process.memory_map, +- process.type_name_map.type_by_id.values(), ++ list(process.type_name_map.type_by_id.values()), + trace_from_win) + + return list(symfile_by_path.values()) +@@ -1311,7 +1311,7 @@ + _SymbolizerCallback, + inlines=True) + +- for address, frames in symfile.frames_by_address.items(): ++ for address, frames in list(symfile.frames_by_address.items()): + # SymbolizeAsync() asserts that the type of address is int. We operate + # on longs (since they are raw pointers possibly from 64-bit processes). + # It's OK to cast here because we're passing relative PC, which should +@@ -1328,7 +1328,7 @@ + address_os_file, address_file_path = tempfile.mkstemp() + try: + with os.fdopen(address_os_file, 'w') as address_file: +- for address in symfile.frames_by_address.keys(): ++ for address in list(symfile.frames_by_address.keys()): + address_file.write('{:x} '.format(address + load_address)) + + cmd = [self.symbolizer_path, '-arch', 'x86_64', '-l', +@@ -1363,7 +1363,7 @@ + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, + stderr=None) + addrs = ["%x" % relative_pc for relative_pc in +- symfile.frames_by_address.keys()] ++ list(symfile.frames_by_address.keys())] + (stdout_data, _) = proc.communicate('\n'.join(addrs)) + # On windows, lines may contain '\r' character: e.g. "RtlUserThreadStart\r". + stdout_data.replace('\r', '') +@@ -1415,7 +1415,7 @@ + + def SymbolizeSymfile(self, symfile): + if symfile.skip_symbolization: +- for address, frames in symfile.frames_by_address.items(): ++ for address, frames in list(symfile.frames_by_address.items()): + unsymbolized_name = ('<' + symfile.module_name + '>') + # Only append the address if there's a library. + if symfile.symbolizable_path != _UNNAMED_FILE: +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/extras/symbolizer/symbolize_trace_end_to_end_test_slow.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/extras/symbolizer/symbolize_trace_end_to_end_test_slow.py 2025-01-16 02:26:08.577263116 +0800 +@@ -9,7 +9,7 @@ + # + # To run this test suite, use ./tracing/bin/run_symbolizer_tests + +-from __future__ import print_function ++ + + import json + import os +@@ -56,7 +56,7 @@ + frames = browser.stack_frame_map.frame_by_id + exact = expectations['frame_exact'] + found = False +- for _, frame in frames.items(): ++ for _, frame in list(frames.items()): + if frame.name.strip() == exact['frame_name']: + parent_id = frame.parent_id + if frames[parent_id].name.strip() == exact['parent_name']: +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/extras/symbolizer/symbolize_trace_macho_reader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/extras/symbolizer/symbolize_trace_macho_reader.py 2025-01-16 02:26:08.577263116 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + import re + import subprocess + from six.moves import range # pylint: disable=redefined-builtin +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/metrics/compare_samples.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/metrics/compare_samples.py 2025-01-16 02:26:08.577263116 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import os + +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/metrics/compare_samples_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/metrics/compare_samples_unittest.py 2025-01-16 02:26:08.579429745 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import json + import math +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/metrics/discover_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/metrics/discover_unittest.py 2025-01-16 02:26:08.579429745 +0800 +@@ -11,7 +11,7 @@ + self.assertFalse(discover.DiscoverMetrics([])) + + def testMetricsDiscoverNonEmpty(self): +- self.assertEquals(['sampleMetric'], discover.DiscoverMetrics( ++ self.assertEqual(['sampleMetric'], discover.DiscoverMetrics( + ['/tracing/metrics/sample_metric.html'])) + + def testMetricsDiscoverMultipleMetrics(self): +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/metrics/metric_runner.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/metrics/metric_runner.py 2025-01-16 02:26:08.579429745 +0800 +@@ -1,9 +1,9 @@ + # Copyright 2016 The Chromium Authors. All rights reserved. + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import os + import sys +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/failure_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/failure_unittest.py 2025-01-16 02:26:08.579429745 +0800 +@@ -24,7 +24,7 @@ + 'file://foo.html', + 'err', 'desc', 'stack') + +- self.assertEquals(failure.AsDict(), { ++ self.assertEqual(failure.AsDict(), { + 'job_guid': '1', + 'function_handle_string': 'foo.html:Foo', + 'trace_canonical_url': 'file://foo.html', +@@ -48,9 +48,9 @@ + + failure = failure_module.Failure.FromDict(failure_dict, job) + +- self.assertEquals(failure.job.guid, '1') +- self.assertEquals(failure.function_handle_string, 'foo.html:Foo') +- self.assertEquals(failure.trace_canonical_url, 'file://foo.html') +- self.assertEquals(failure.failure_type_name, 'err') +- self.assertEquals(failure.description, 'desc') +- self.assertEquals(failure.stack, 'stack') ++ self.assertEqual(failure.job.guid, '1') ++ self.assertEqual(failure.function_handle_string, 'foo.html:Foo') ++ self.assertEqual(failure.trace_canonical_url, 'file://foo.html') ++ self.assertEqual(failure.failure_type_name, 'err') ++ self.assertEqual(failure.description, 'desc') ++ self.assertEqual(failure.stack, 'stack') +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/function_handle_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/function_handle_unittest.py 2025-01-16 02:26:08.579429745 +0800 +@@ -20,18 +20,18 @@ + mtl0 = function_handle.ModuleToLoad(href='/foo') + mtl1 = function_handle.ModuleToLoad(filename='foo.html') + +- self.assertEquals(str(mtl0), 'ModuleToLoad(href="/foo")') +- self.assertEquals(str(mtl1), 'ModuleToLoad(filename="foo.html")') ++ self.assertEqual(str(mtl0), 'ModuleToLoad(href="/foo")') ++ self.assertEqual(str(mtl1), 'ModuleToLoad(filename="foo.html")') + + def testAsDict(self): + mtl0 = function_handle.ModuleToLoad(href='/foo') + mtl1 = function_handle.ModuleToLoad(filename='foo.html') + +- self.assertEquals(mtl0.AsDict(), { ++ self.assertEqual(mtl0.AsDict(), { + 'href': '/foo' + }) + +- self.assertEquals(mtl1.AsDict(), { ++ self.assertEqual(mtl1.AsDict(), { + 'filename': 'foo.html' + }) + +@@ -47,9 +47,9 @@ + mtl0 = function_handle.ModuleToLoad.FromDict(module_dict0) + mtl1 = function_handle.ModuleToLoad.FromDict(module_dict1) + +- self.assertEquals(mtl0.href, '/foo') ++ self.assertEqual(mtl0.href, '/foo') + self.assertIsNone(mtl0.filename) +- self.assertEquals(mtl1.filename, 'foo.html') ++ self.assertEqual(mtl1.filename, 'foo.html') + self.assertIsNone(mtl1.href) + + +@@ -59,7 +59,7 @@ + module = function_handle.ModuleToLoad(href='/foo') + handle = function_handle.FunctionHandle([module], 'Bar') + +- self.assertEquals( ++ self.assertEqual( + str(handle), + 'FunctionHandle(modules_to_load=[ModuleToLoad(href="/foo")], ' + 'function_name="Bar")') +@@ -68,7 +68,7 @@ + module = function_handle.ModuleToLoad(href='/foo') + handle = function_handle.FunctionHandle([module], 'Bar') + +- self.assertEquals( ++ self.assertEqual( + handle.AsDict(), { + 'modules_to_load': [{'href': '/foo'}], + 'function_name': 'Bar' +@@ -81,6 +81,6 @@ + } + + handle = function_handle.FunctionHandle.FromDict(handle_dict) +- self.assertEquals(len(handle.modules_to_load), 1) +- self.assertEquals(handle.modules_to_load[0].href, '/foo') +- self.assertEquals(handle.function_name, 'Bar') ++ self.assertEqual(len(handle.modules_to_load), 1) ++ self.assertEqual(handle.modules_to_load[0].href, '/foo') ++ self.assertEqual(handle.function_name, 'Bar') +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/gtest_progress_reporter.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/gtest_progress_reporter.py 2025-01-16 02:26:08.579429745 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import time + import sys +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/map_single_trace.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/map_single_trace.py 2025-01-16 02:26:08.579429745 +0800 +@@ -147,7 +147,7 @@ + for f in failures: + result.AddFailure(f) + +- for k, v in found_dict['pairs'].items(): ++ for k, v in list(found_dict['pairs'].items()): + result.AddPair(k, v) + + else: +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/map_single_trace_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/map_single_trace_unittest.py 2025-01-16 02:26:08.579429745 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import json + import os +@@ -51,7 +51,7 @@ + + self.assertFalse(result.failures) + r = result.pairs['result'] +- self.assertEquals(r['numProcesses'], 1) ++ self.assertEqual(r['numProcesses'], 1) + + + def testProcessingGiantTrace(self): +@@ -87,7 +87,7 @@ + self.assertFalse(result.failures, + msg='\n'.join(str(f) for f in result.failures)) + r = result.pairs['result'] +- self.assertEquals(r['numEvents'], 2000000) ++ self.assertEqual(r['numEvents'], 2000000) + + + +@@ -104,8 +104,8 @@ + result = map_single_trace.MapSingleTrace(trace_handle, + _Handle(map_script.filename)) + +- self.assertEquals(len(result.failures), 1) +- self.assertEquals(len(result.pairs), 0) ++ self.assertEqual(len(result.failures), 1) ++ self.assertEqual(len(result.pairs), 0) + f = result.failures[0] + self.assertIsInstance(f, map_single_trace.TraceImportFailure) + +@@ -128,8 +128,8 @@ + result = map_single_trace.MapSingleTrace(trace_handle, + _Handle(map_script.filename)) + +- self.assertEquals(len(result.failures), 1) +- self.assertEquals(len(result.pairs), 0) ++ self.assertEqual(len(result.failures), 1) ++ self.assertEqual(len(result.pairs), 0) + f = result.failures[0] + self.assertIsInstance(f, map_single_trace.MapFunctionFailure) + +@@ -149,8 +149,8 @@ + result = map_single_trace.MapSingleTrace(trace_handle, + _Handle(map_script.filename)) + +- self.assertEquals(len(result.failures), 1) +- self.assertEquals(len(result.pairs), 0) ++ self.assertEqual(len(result.failures), 1) ++ self.assertEqual(len(result.pairs), 0) + f = result.failures[0] + self.assertIsInstance(f, map_single_trace.FunctionLoadingFailure) + +@@ -168,8 +168,8 @@ + result = map_single_trace.MapSingleTrace(trace_handle, + _Handle(map_script.filename)) + +- self.assertEquals(len(result.failures), 1) +- self.assertEquals(len(result.pairs), 0) ++ self.assertEqual(len(result.failures), 1) ++ self.assertEqual(len(result.pairs), 0) + f = result.failures[0] + self.assertIsInstance(f, failure.Failure) + +@@ -188,8 +188,8 @@ + result = map_single_trace.MapSingleTrace(trace_handle, + _Handle(map_script.filename)) + +- self.assertEquals(len(result.failures), 1) +- self.assertEquals(len(result.pairs), 0) ++ self.assertEqual(len(result.failures), 1) ++ self.assertEqual(len(result.pairs), 0) + f = result.failures[0] + self.assertIsInstance(f, map_single_trace.FunctionNotDefinedFailure) + +@@ -211,8 +211,8 @@ + result = map_single_trace.MapSingleTrace(trace_handle, + _Handle(map_script.filename)) + +- self.assertEquals(len(result.failures), 1) +- self.assertEquals(len(result.pairs), 0) ++ self.assertEqual(len(result.failures), 1) ++ self.assertEqual(len(result.pairs), 0) + f = result.failures[0] + self.assertIsInstance(f, map_single_trace.NoResultsAddedFailure) + +@@ -226,7 +226,7 @@ + results.addPair('numProcesses', model.getAllProcesses().length); + }; + """) +- self.assertEquals(results['numProcesses'], 2) ++ self.assertEqual(results['numProcesses'], 2) + + def testExecuteTraceMappingCodeWithError(self): + test_trace_path = os.path.join(os.path.dirname(__file__), 'test_trace.json') +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/mre_result_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/mre_result_unittest.py 2025-01-16 02:26:08.579429745 +0800 +@@ -37,8 +37,8 @@ + + result_dict = result.AsDict() + +- self.assertEquals(result_dict['failures'], [failure.AsDict()]) +- self.assertEquals(result_dict['pairs'], {'foo': 'bar'}) ++ self.assertEqual(result_dict['failures'], [failure.AsDict()]) ++ self.assertEqual(result_dict['pairs'], {'foo': 'bar'}) + + def testAddingNonFailure(self): + result = mre_result.MreResult() +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/threaded_work_queue.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/threaded_work_queue.py 2025-01-16 02:26:08.579429745 +0800 +@@ -1,9 +1,9 @@ + # Copyright 2015 The Chromium Authors. All rights reserved. + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + import threading + import traceback + from six.moves import range # pylint: disable=redefined-builtin +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/threaded_work_queue_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/mre/threaded_work_queue_unittest.py 2025-01-16 02:26:08.579429745 +0800 +@@ -23,7 +23,7 @@ + wq = threaded_work_queue.ThreadedWorkQueue(num_threads=1) + wq.PostAnyThreadTask(Ex) + res = wq.Run() +- self.assertEquals(res, None) ++ self.assertEqual(res, None) + + def _RunSimpleDecrementingTest(self, wq): + +@@ -39,4 +39,4 @@ + + wq.PostAnyThreadTask(Decrement) + res = wq.Run() +- self.assertEquals(res, 314) ++ self.assertEqual(res, 314) +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/proto/histogram_proto.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/proto/histogram_proto.py 2025-01-16 02:26:08.579429745 +0800 +@@ -51,14 +51,14 @@ + histogram_pb2.COUNT: 'count', + histogram_pb2.SIGMA: 'sigma', + } +- UNIT_PROTO_MAP = {v: k for k, v in PROTO_UNIT_MAP.iteritems()} ++ UNIT_PROTO_MAP = {v: k for k, v in PROTO_UNIT_MAP.items()} + + PROTO_IMPROVEMENT_DIRECTION_MAP = { + histogram_pb2.BIGGER_IS_BETTER: 'biggerIsBetter', + histogram_pb2.SMALLER_IS_BETTER: 'smallerIsBetter', + } + IMPROVEMENT_DIRECTION_PROTO_MAP = { +- v: k for k, v in PROTO_IMPROVEMENT_DIRECTION_MAP.iteritems() ++ v: k for k, v in PROTO_IMPROVEMENT_DIRECTION_MAP.items() + } + + +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/trace_data/trace_data.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/trace_data/trace_data.py 2025-01-16 02:26:08.579429745 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + import collections + import gzip + import json +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/gtest_json_converter.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/gtest_json_converter.py 2025-01-16 02:26:08.579429745 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import json + +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/gtest_json_converter_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/gtest_json_converter_unittest.py 2025-01-16 02:26:08.579429745 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import unittest + +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/heap_profiler.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/heap_profiler.py 2025-01-16 02:26:08.579429745 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import codecs + import collections +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/heap_profiler_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/heap_profiler_unittest.py 2025-01-16 02:26:08.579429745 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import unittest + +@@ -26,38 +26,38 @@ + histograms = heap_profiler.Profile(test_data) + + set_size_hist = histograms.GetHistogramNamed('heap:HistogramSet') +- self.assertEquals(set_size_hist.num_values, 1) ++ self.assertEqual(set_size_hist.num_values, 1) + # The exact sizes of python objects can vary between platforms and versions. + self.assertGreater(set_size_hist.sum, 10000) + + hist_size_hist = histograms.GetHistogramNamed('heap:Histogram') +- self.assertEquals(hist_size_hist.num_values, 10) ++ self.assertEqual(hist_size_hist.num_values, 10) + self.assertGreater(hist_size_hist.sum, 10000) + + related_names = hist_size_hist.diagnostics['types'] +- self.assertEquals(related_names.Get('HistogramBin'), 'heap:HistogramBin') +- self.assertEquals(related_names.Get('DiagnosticMap'), 'heap:DiagnosticMap') ++ self.assertEqual(related_names.Get('HistogramBin'), 'heap:HistogramBin') ++ self.assertEqual(related_names.Get('DiagnosticMap'), 'heap:DiagnosticMap') + + properties = hist_size_hist.bins[33].diagnostic_maps[0]['properties'] + types = hist_size_hist.bins[33].diagnostic_maps[0]['types'] + self.assertGreater(len(properties), 3) + self.assertGreater(properties.Get('_bins'), 1000) +- self.assertEquals(len(types), 4) ++ self.assertEqual(len(types), 4) + self.assertGreater(types.Get('HistogramBin'), 1000) + self.assertGreater(types.Get('(builtin types)'), 1000) + + bin_size_hist = histograms.GetHistogramNamed('heap:HistogramBin') +- self.assertEquals(bin_size_hist.num_values, 32) ++ self.assertEqual(bin_size_hist.num_values, 32) + self.assertGreater(bin_size_hist.sum, 1000) + + diag_map_size_hist = histograms.GetHistogramNamed('heap:DiagnosticMap') +- self.assertEquals(diag_map_size_hist.num_values, 10) ++ self.assertEqual(diag_map_size_hist.num_values, 10) + self.assertGreater(diag_map_size_hist.sum, 1000) + + range_size_hist = histograms.GetHistogramNamed('heap:Range') +- self.assertEquals(range_size_hist.num_values, 22) ++ self.assertEqual(range_size_hist.num_values, 22) + self.assertGreater(range_size_hist.sum, 1000) + + stats_size_hist = histograms.GetHistogramNamed('heap:RunningStatistics') +- self.assertEquals(stats_size_hist.num_values, 10) ++ self.assertEqual(stats_size_hist.num_values, 10) + self.assertGreater(stats_size_hist.sum, 1000) +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/histogram.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/histogram.py 2025-01-16 02:26:08.579429745 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import json + import math +@@ -482,7 +482,7 @@ + return dm + + def AddDicts(self, dct): +- for name, diagnostic_dict in dct.items(): ++ for name, diagnostic_dict in list(dct.items()): + if name == 'tagmap': + continue + if isinstance(diagnostic_dict, StringTypes): +@@ -494,7 +494,7 @@ + self[name] = diagnostic.Diagnostic.FromDict(diagnostic_dict) + + def AddProtos(self, protos): +- for name, diagnostic_proto in protos.items(): ++ for name, diagnostic_proto in list(protos.items()): + if diagnostic_proto.HasField('shared_diagnostic_guid'): + self[name] = diagnostic_ref.DiagnosticRef( + diagnostic_proto.shared_diagnostic_guid) +@@ -502,7 +502,7 @@ + self[name] = diagnostic.Diagnostic.FromProto(diagnostic_proto) + + def ResolveSharedDiagnostics(self, histograms, required=False): +- for name, diag in self.items(): ++ for name, diag in list(self.items()): + if not isinstance(diag, diagnostic_ref.DiagnosticRef): + continue + guid = diag.guid +@@ -514,21 +514,21 @@ + + def Serialize(self, serializer): + return [serializer.GetOrAllocateDiagnosticId(name, diag) +- for name, diag in self.items()] ++ for name, diag in list(self.items())] + + def AsDict(self): + dct = {} +- for name, diag in self.items(): ++ for name, diag in list(self.items()): + dct[name] = diag.AsDictOrReference() + return dct + + def AsProto(self, proto): +- for name, diag in self.items(): ++ for name, diag in list(self.items()): + proto.diagnostic_map[name].CopyFrom(diag.AsProtoOrReference()) + return proto + + def Merge(self, other): +- for name, other_diagnostic in other.items(): ++ for name, other_diagnostic in list(other.items()): + if name not in self: + self[name] = other_diagnostic + continue +@@ -752,7 +752,7 @@ + if summary_options: + hist.CustomizeSummaryOptions(summary_options) + if diagnostics: +- for name, diag in diagnostics.items(): ++ for name, diag in list(diagnostics.items()): + hist.diagnostics[name] = diag + + if not isinstance(samples, list): +@@ -824,7 +824,7 @@ + upper = PercentFromString(stat_name[8:]) + self._summary_options.get('iprs').push( + Range.FromExplicitRange(lower, upper)) +- for stat_name in self._summary_options.keys(): ++ for stat_name in list(self._summary_options.keys()): + if stat_name in ['percentile', 'iprs']: + continue + self._summary_options[stat_name] = stat_name in statistics_names +@@ -849,7 +849,7 @@ + for i, bin_data in enumerate(bins): + self._DeserializeBin(i, bin_data, deserializer) + else: +- for i, bin_data in bins.items(): ++ for i, bin_data in list(bins.items()): + self._DeserializeBin(int(i), bin_data, deserializer) + + @staticmethod +@@ -902,7 +902,7 @@ + hist._bins[i] = HistogramBin(hist._bins[i].range) + hist._bins[i].FromDict(bin_dct) + else: +- for i, bin_dct in dct['allBins'].items(): ++ for i, bin_dct in list(dct['allBins'].items()): + i = int(i) + # Check whether i is a valid index before using it as a list index. + if i >= len(hist._bins) or i < 0: +@@ -944,7 +944,7 @@ + hist._description = proto.description + if proto.HasField('diagnostics'): + hist._diagnostics.AddProtos(proto.diagnostics.diagnostic_map) +- for i, bin_spec in proto.all_bins.items(): ++ for i, bin_spec in list(proto.all_bins.items()): + i = int(i) + # Check whether i is a valid index before using it as a list index. + if i >= len(hist._bins) or i < 0: +@@ -1131,7 +1131,7 @@ + self.diagnostics.Merge(other.diagnostics) + + def CustomizeSummaryOptions(self, options): +- for key, value in options.items(): ++ for key, value in list(options.items()): + self._summary_options[key] = value + + def Clone(self): +@@ -1144,7 +1144,7 @@ + @property + def statistics_names(self): + names = set() +- for stat_name, option in self._summary_options.items(): ++ for stat_name, option in list(self._summary_options.items()): + if stat_name == 'percentile': + for pctile in option: + names.add('pct_' + PercentToString(pctile)) +@@ -1267,7 +1267,7 @@ + + summary_options = {} + any_overridden_summary_options = False +- for name, option in self._summary_options.items(): ++ for name, option in list(self._summary_options.items()): + if name == 'percentile' or name == 'ci': + if len(option) == 0: + continue +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/histogram_deserializer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/histogram_deserializer.py 2025-01-16 02:26:08.579429745 +0800 +@@ -17,9 +17,9 @@ + self._objects = objects + self._diagnostics = {} + if diagnostics: +- for type_name, diagnostics_by_name in diagnostics.items(): +- for name, diagnostics_by_id in diagnostics_by_name.items(): +- for i, data in diagnostics_by_id.items(): ++ for type_name, diagnostics_by_name in list(diagnostics.items()): ++ for name, diagnostics_by_id in list(diagnostics_by_name.items()): ++ for i, data in list(diagnostics_by_id.items()): + self._diagnostics[int(i)] = { + name: diagnostic.Deserialize(type_name, data, self)} + +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/histogram_serializer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/histogram_serializer.py 2025-01-16 02:26:08.579429745 +0800 +@@ -9,9 +9,9 @@ + serializer = HistogramSerializer() + histograms = [h.Serialize(serializer) for h in histograms] + diagnostics = serializer._diagnostics_by_type +- for diagnostics_by_name in diagnostics.values(): +- for diagnostics_by_id in diagnostics_by_name.values(): +- for did, diag in diagnostics_by_id.items(): ++ for diagnostics_by_name in list(diagnostics.values()): ++ for diagnostics_by_id in list(diagnostics_by_name.values()): ++ for did, diag in list(diagnostics_by_id.items()): + diagnostics_by_id[did] = diag.Serialize(serializer) + return [serializer._objects, diagnostics] + histograms + +@@ -45,7 +45,7 @@ + type_name = diag.__class__.__name__ + diagnostics_by_name = self._diagnostics_by_type.setdefault(type_name, {}) + diagnostics_by_id = diagnostics_by_name.setdefault(name, {}) +- for i, other in diagnostics_by_id.items(): ++ for i, other in list(diagnostics_by_id.items()): + if other is diag or other == diag: + return i + +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/histogram_set.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/histogram_set.py 2025-01-16 02:26:08.579429745 +0800 +@@ -32,7 +32,7 @@ + def RemoveOrphanedDiagnostics(self): + orphans = set(self._shared_diagnostics_by_guid.keys()) + for h in self._histograms: +- for d in h.diagnostics.values(): ++ for d in list(h.diagnostics.values()): + if d.guid in orphans: + orphans.remove(d.guid) + for guid in orphans: +@@ -46,7 +46,7 @@ + + def AddHistogram(self, hist, diagnostics=None): + if diagnostics: +- for name, diag in diagnostics.items(): ++ for name, diag in list(diagnostics.items()): + hist.diagnostics[name] = diag + + self._histograms.add(hist) +@@ -98,7 +98,7 @@ + hist_set = histogram_proto.Pb2().HistogramSet() + hist_set.ParseFromString(serialized_proto) + +- for guid, d in hist_set.shared_diagnostics.items(): ++ for guid, d in list(hist_set.shared_diagnostics.items()): + diag = diagnostic.Diagnostic.FromProto(d) + diag.guid = guid + self._shared_diagnostics_by_guid[guid] = diag +@@ -136,7 +136,7 @@ + + def AsDicts(self): + dcts = [] +- for d in self._shared_diagnostics_by_guid.values(): ++ for d in list(self._shared_diagnostics_by_guid.values()): + dcts.append(d.AsDict()) + for h in self: + dcts.append(h.AsDict()) +@@ -144,7 +144,7 @@ + + def AsProto(self): + proto = histogram_proto.Pb2().HistogramSet() +- for guid, d in self._shared_diagnostics_by_guid.items(): ++ for guid, d in list(self._shared_diagnostics_by_guid.items()): + proto.shared_diagnostics[guid].CopyFrom(d.AsProto()) + for h in self: + proto.histograms.extend([h.AsProto()]) +@@ -170,7 +170,7 @@ + return + + for hist in self: +- for name, diag in hist.diagnostics.items(): ++ for name, diag in list(hist.diagnostics.items()): + if diag.has_guid and diag.guid == old_guid: + hist.diagnostics[name] = new_diagnostic + +@@ -179,14 +179,14 @@ + diagnostics_to_histograms = collections.defaultdict(list) + + for hist in self: +- for name, candidate in hist.diagnostics.items(): ++ for name, candidate in list(hist.diagnostics.items()): + diagnostics_to_histograms[candidate].append(hist) + + if name not in names_to_candidates: + names_to_candidates[name] = set() + names_to_candidates[name].add(candidate) + +- for name, candidates in names_to_candidates.items(): ++ for name, candidates in list(names_to_candidates.items()): + deduplicated_diagnostics = set() + + for candidate in candidates: +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/histogram_set_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/histogram_set_unittest.py 2025-01-16 02:26:08.580513060 +0800 +@@ -237,7 +237,7 @@ + # All diagnostics should have been serialized as DiagnosticRefs. + for d in histogram_dicts: + if 'type' not in d: +- for diagnostic_dict in d['diagnostics'].values(): ++ for diagnostic_dict in list(d['diagnostics'].values()): + self.assertIsInstance(diagnostic_dict, str) + + histograms2 = histogram_set.HistogramSet() +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/histogram_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/histogram_unittest.py 2025-01-16 02:26:08.580513060 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import copy + import json +@@ -704,8 +704,8 @@ + self.assertEqual(hist.description, clone.description) + self.assertEqual(len(hist.diagnostics), len(clone.diagnostics)) + self.assertEqual(hist.diagnostics['foo'], clone.diagnostics['foo']) +- self.assertEqual(hist.statistics_scalars.keys(), +- clone.statistics_scalars.keys()) ++ self.assertEqual(list(hist.statistics_scalars.keys()), ++ list(clone.statistics_scalars.keys())) + self.assertEqual(hist.max_num_sample_values, clone.max_num_sample_values) + + class DiagnosticMapUnittest(unittest.TestCase): +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/diagnostics/add_reserved_diagnostics.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/diagnostics/add_reserved_diagnostics.py 2025-01-16 02:26:08.580513060 +0800 +@@ -215,7 +215,7 @@ + histograms.ImportDicts(hs_with_no_stories.AsDicts()) + + histograms.DeduplicateDiagnostics() +- for name, value in names_to_values.items(): ++ for name, value in list(names_to_values.items()): + assert name in ALL_NAMES + histograms.AddSharedDiagnosticToAllHistograms( + name, generic_set.GenericSet([value])) +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/diagnostics/add_reserved_diagnostics_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/diagnostics/add_reserved_diagnostics_unittest.py 2025-01-16 02:26:08.580513060 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import json + import unittest +@@ -103,12 +103,12 @@ + new_hs.ImportDicts(json.loads(new_hs_json)) + + expected = [ +- [u'foo1', [], [u'foo1']], +- [u'bar', [], [u'bar1']], +- [u'blah', [], []], +- [u'bar', [u'name'], [u'bar1', u'bar2']], +- [u'foo1', [u'name'], [u'foo1']], +- [u'bar', [], [u'bar2']], ++ ['foo1', [], ['foo1']], ++ ['bar', [], ['bar1']], ++ ['blah', [], []], ++ ['bar', ['name'], ['bar1', 'bar2']], ++ ['foo1', ['name'], ['foo1']], ++ ['bar', [], ['bar2']], + ] + + for h in new_hs: +@@ -133,10 +133,10 @@ + new_hs.ImportDicts(json.loads(new_hs_json)) + + expected = [ +- [u'foo2', [u'name']], +- [u'foo1', [u'name']], +- [u'foo2', []], +- [u'foo1', []], ++ ['foo2', ['name']], ++ ['foo1', ['name']], ++ ['foo2', []], ++ ['foo1', []], + ] + + for h in new_hs: +@@ -160,11 +160,11 @@ + new_hs.ImportDicts(json.loads(new_hs_json)) + + expected = [ +- [u'foo', [], [u'foo2']], +- [u'foo', [u'name'], [u'foo1', u'foo2']], +- [u'bar', [u'name'], [u'bar']], +- [u'foo', [], [u'foo1']], +- [u'bar', [], [u'bar']], ++ ['foo', [], ['foo2']], ++ ['foo', ['name'], ['foo1', 'foo2']], ++ ['bar', ['name'], ['bar']], ++ ['foo', [], ['foo1']], ++ ['bar', [], ['bar']], + ] + + for h in new_hs: +@@ -207,11 +207,11 @@ + new_hs.ImportDicts(json.loads(new_hs_json)) + + expected = [ +- [u'foo', [u'name'], [u'bar'], [u't:1', u't:2']], +- [u'foo', [], [u'bar'], [u't:1']], +- [u'foo', [], [u'bar'], [u't:2']], +- [u'foo', [u'name', u'storyTags'], [u'bar'], [u't:1']], +- [u'foo', [u'name', u'storyTags'], [u'bar'], [u't:2']], ++ ['foo', ['name'], ['bar'], ['t:1', 't:2']], ++ ['foo', [], ['bar'], ['t:1']], ++ ['foo', [], ['bar'], ['t:2']], ++ ['foo', ['name', 'storyTags'], ['bar'], ['t:1']], ++ ['foo', ['name', 'storyTags'], ['bar'], ['t:2']], + ] + + for h in new_hs: +@@ -239,9 +239,9 @@ + new_hs.ImportDicts(json.loads(new_hs_json)) + + expected = [ +- [u'foo', [u'name', u'storyTags'], [u'story1'], [u'ignored', u't:1']], +- [u'foo', [], [u'story1'], [u'ignored', u't:1']], +- [u'foo', [u'name'], [u'story1'], [u'ignored', u't:1']], ++ ['foo', ['name', 'storyTags'], ['story1'], ['ignored', 't:1']], ++ ['foo', [], ['story1'], ['ignored', 't:1']], ++ ['foo', ['name'], ['story1'], ['ignored', 't:1']], + ] + + for h in new_hs: +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/diagnostics/breakdown.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/diagnostics/breakdown.py 2025-01-16 02:26:08.580513060 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import math + import numbers +@@ -56,7 +56,7 @@ + def FromDict(d): + result = Breakdown() + result._color_scheme = d.get('colorScheme') +- for name, value in d['values'].items(): ++ for name, value in list(d['values'].items()): + if value in ['NaN', 'Infinity', '-Infinity']: + value = float(value) + result.Set(name, value) +@@ -99,7 +99,7 @@ + @staticmethod + def FromEntries(entries): + b = Breakdown() +- for name, value in entries.items(): ++ for name, value in list(entries.items()): + b.Set(name, value) + return b + +@@ -114,7 +114,7 @@ + return self._values.get(name, 0) + + def __iter__(self): +- for name, value in self._values.items(): ++ for name, value in list(self._values.items()): + yield name, value + + def __len__(self): +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/diagnostics/related_event_set.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/diagnostics/related_event_set.py 2025-01-16 02:26:08.580513060 +0800 +@@ -3,9 +3,9 @@ + # found in the LICENSE file. + + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + from six.moves import zip # pylint: disable=redefined-builtin + from tracing.value.diagnostics import diagnostic + +@@ -34,7 +34,7 @@ + return len(self._events_by_stable_id) + + def __iter__(self): +- for event in self._events_by_stable_id.values(): ++ for event in list(self._events_by_stable_id.values()): + yield event + + @staticmethod +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/diagnostics/related_name_map.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/diagnostics/related_name_map.py 2025-01-16 02:26:08.580513060 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + from six.moves import zip # pylint: disable=redefined-builtin + from tracing.value.diagnostics import diagnostic + +@@ -24,7 +24,7 @@ + return False + if set(self._map) != set(other._map): + return False +- for key, name in self._map.items(): ++ for key, name in list(self._map.items()): + if name != other.Get(key): + return False + return True +@@ -36,7 +36,7 @@ + return isinstance(other, RelatedNameMap) + + def AddDiagnostic(self, other): +- for key, name in other._map.items(): ++ for key, name in list(other._map.items()): + existing = self.Get(key) + if existing is None: + self.Set(key, name) +@@ -51,7 +51,7 @@ + self._map[key] = name + + def __iter__(self): +- for key, name in self._map.items(): ++ for key, name in list(self._map.items()): + yield key, name + + def Values(self): +@@ -81,7 +81,7 @@ + @staticmethod + def FromDict(dct): + names = RelatedNameMap() +- for key, name in dct['names'].items(): ++ for key, name in list(dct['names'].items()): + names.Set(key, name) + return names + +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/diagnostics/reserved_infos.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/diagnostics/reserved_infos.py 2025-01-16 02:26:08.580513060 +0800 +@@ -75,7 +75,7 @@ + + def _CreateCachedInfoTypes(): + info_types = {} +- for info in globals().values(): ++ for info in list(globals().values()): + if isinstance(info, _Info): + info_types[info.name] = info + return info_types +@@ -88,7 +88,7 @@ + return info.type + + def AllInfos(): +- for info in _CACHED_INFO_TYPES.values(): ++ for info in list(_CACHED_INFO_TYPES.values()): + yield info + + def AllNames(): +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/diagnostics/unmergeable_diagnostic_set.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing/value/diagnostics/unmergeable_diagnostic_set.py 2025-01-16 02:26:08.580513060 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + import six + from tracing.value.diagnostics import diagnostic + from tracing.value.diagnostics import diagnostic_ref +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/check_gni.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/check_gni.py 2025-01-16 02:26:08.580513060 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import os + import re +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/html2trace.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/html2trace.py 2025-01-16 02:26:08.580513060 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + import base64 + import gzip + import io +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/merge_traces.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/merge_traces.py 2025-01-16 02:26:08.580513060 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import argparse + import codecs +@@ -76,7 +76,7 @@ + """The maximum mapped ID of this map's entries.""" + if not self._entry_map: + return 0 +- return max(e._canonical_id for e in self._entry_map.values()) ++ return max(e._canonical_id for e in list(self._entry_map.values())) + + def AddEntry(self, source, path, **items): + """Add a source-specific entry path to the map. +@@ -91,7 +91,7 @@ + return self._GetSubMapEntry(source, path[0]).AddEntry(source, path[1:], + **items) + assert 'id' not in items # ID is set according to the path. +- for key, value in items.items(): ++ for key, value in list(items.items()): + value_set = self._items[key] + if (isinstance(value, collections.Iterable) and + not isinstance(value, StringTypes)): +@@ -176,12 +176,12 @@ + + def _CalculateUnmergeableMapFromEntrySources(self): + entry_ids_by_source = collections.defaultdict(set) +- for entry_id, entry in self._entry_map.items(): ++ for entry_id, entry in list(self._entry_map.items()): + for source in entry._sources: + entry_ids_by_source[source].add(entry_id) + + unmergeable_map = collections.defaultdict(set) +- for unmergeable_set in entry_ids_by_source.values(): ++ for unmergeable_set in list(entry_ids_by_source.values()): + for entry_id in unmergeable_set: + unmergeable_map[entry_id].update(unmergeable_set - {entry_id}) + +@@ -200,7 +200,7 @@ + if self._depth > 0: + # This is NOT a ROOT node, so we need to merge fields and sources from + # the source node. +- for key, values in source._items.items(): ++ for key, values in list(source._items.items()): + self._items[key].update(values) + self._sources.update(source._sources) + +@@ -215,8 +215,8 @@ + + # {ID1, ID2} -> Match between the two entries. + matches = {frozenset([full_id1, full_id2]): entry1._GetMatch(entry2) +- for full_id1, entry1 in canonical_entries.items() +- for full_id2, entry2 in canonical_entries.items() ++ for full_id1, entry1 in list(canonical_entries.items()) ++ for full_id2, entry2 in list(canonical_entries.items()) + if entry1._IsMergeableWith(entry2)} + + while matches: +@@ -239,7 +239,7 @@ + del merged_entry + self._entry_map[merged_full_id] = canonical_entry + +- for match_set in matches.keys(): ++ for match_set in list(matches.keys()): + if merged_full_id in match_set: + # Remove other matches with the merged entry. + del matches[match_set] +@@ -324,11 +324,11 @@ + + with open(filename) as file_handle: + for sub_trace in html2trace.ReadTracesFromHTMLFile(file_handle): +- for name, component in TraceAsDict(sub_trace).items(): ++ for name, component in list(TraceAsDict(sub_trace).items()): + trace_components[name].append(component) + + trace = {} +- for name, components in trace_components.items(): ++ for name, components in list(trace_components.items()): + if len(components) == 1: + trace[name] = components[0] + elif all(isinstance(component, list) for component in components): +@@ -388,12 +388,12 @@ + """Merge a collection of JSON traces into a single JSON trace.""" + trace_components = collections.defaultdict(collections.OrderedDict) + +- for filename, trace in traces.items(): +- for name, component in TraceAsDict(trace).items(): ++ for filename, trace in list(traces.items()): ++ for name, component in list(TraceAsDict(trace).items()): + trace_components[name][filename] = component + + merged_trace = {} +- for component_name, components_by_filename in trace_components.items(): ++ for component_name, components_by_filename in list(trace_components.items()): + logging.info('Merging %d %r components...', len(components_by_filename), + component_name) + merged_trace[component_name] = MergeComponents(component_name, +@@ -416,7 +416,7 @@ + # (https://github.com/catapult-project/catapult/issues/2497). + events_by_filename = collections.OrderedDict( + (filename, [e for e in events if not isinstance(e, StringTypes)]) +- for filename, events in events_by_filename.items()) ++ for filename, events in list(events_by_filename.items())) + + timestamp_range_by_filename = _AdjustTimestampRanges(events_by_filename) + process_map = _CreateProcessMapFromTraceEvents(events_by_filename) +@@ -485,7 +485,7 @@ + logging.info('Creating process map from trace events...') + + process_map = ProcessIdMap() +- for filename, events in events_by_filename.items(): ++ for filename, events in list(events_by_filename.items()): + for event in events: + pid, tid = event['pid'], event['tid'] + process_map.AddEntry(source=filename, path=(pid, tid)) +@@ -530,7 +530,7 @@ + # Update IDs in 'stackFrames' and 'typeNames' metadata events. + if event['name'] == 'stackFrames': + _UpdateDictIds(index, event['args'], 'stackFrames') +- for frame in event['args']['stackFrames'].values(): ++ for frame in list(event['args']['stackFrames'].values()): + _UpdateFieldId(index, frame, 'parent') + elif event['name'] == 'typeNames': + _UpdateDictIds(index, event['args'], 'typeNames') +@@ -547,7 +547,7 @@ + elif event['ph'] == MEMORY_DUMP_PHASE: + # Update stack frame and type name IDs in heap dump entries in process + # memory dumps. +- for heap_dump in event['args']['dumps'].get('heaps', {}).values(): ++ for heap_dump in list(event['args']['dumps'].get('heaps', {}).values()): + for heap_entry in heap_dump['entries']: + _UpdateFieldId(index, heap_entry, 'bt', ignored_values=['']) + _UpdateFieldId(index, heap_entry, 'type') +@@ -562,7 +562,7 @@ + def _UpdateDictIds(index, parent_dict, key): + parent_dict[key] = { + _ConvertId(index, original_id): value +- for original_id, value in parent_dict[key].items()} ++ for original_id, value in list(parent_dict[key].items())} + + + def _UpdateFieldId(index, parent_dict, key, ignored_values=()): +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/merge_traces_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/merge_traces_unittest.py 2025-01-16 02:26:08.580513060 +0800 +@@ -46,4 +46,4 @@ + events = json.load(f)['traceEvents'] + # Check that both dumps are found in the merged trace. + dump_pids = [e['pid'] for e in events if e['ph'] == 'v'] +- self.assertEquals([1, 2], dump_pids) ++ self.assertEqual([1, 2], dump_pids) +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/run_profile.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/run_profile.py 2025-01-16 02:26:08.580513060 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import argparse + import cProfile +@@ -38,7 +38,7 @@ + parser.add_argument('bench_name') + args = parser.parse_args(args) + +- benches = [g for g in globals().values() ++ benches = [g for g in list(globals().values()) + if g != Bench and inspect.isclass(g) and + Bench in inspect.getmro(g)] + +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/slim_trace.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/slim_trace.py 2025-01-16 02:26:08.580513060 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import argparse + import codecs +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/snapdragon2trace.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/snapdragon2trace.py 2025-01-16 02:26:08.580513060 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import codecs + import csv +@@ -130,7 +130,7 @@ + if result is None: + result = trace.copy() + continue +- for k, v in trace.items(): ++ for k, v in list(trace.items()): + if k in result: + if not isinstance(v, list): + raise Exception('Cannot concat two traces with non-list values ' +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/snapdragon2trace_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/snapdragon2trace_unittest.py 2025-01-16 02:26:08.580513060 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import unittest + +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/strip_memory_infra_trace.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/strip_memory_infra_trace.py 2025-01-16 02:26:08.580513060 +0800 +@@ -4,7 +4,7 @@ + + """Filters a big trace keeping only the last memory-infra dumps.""" + +-from __future__ import print_function ++ + + import collections + import gzip +@@ -64,14 +64,14 @@ + + print('Detected %d memory-infra global dumps' % len(global_dumps)) + if global_dumps: +- max_procs = max(len(x) for x in global_dumps.values()) ++ max_procs = max(len(x) for x in list(global_dumps.values())) + print('Max number of processes seen: %d' % max_procs) + + ndumps = 2 + print('Preserving the last %d memory-infra dumps' % ndumps) + detailed_dumps = [] + non_detailed_dumps = [] +- for global_dump in global_dumps.values(): ++ for global_dump in list(global_dumps.values()): + try: + level_of_detail = global_dump[0]['args']['dumps']['level_of_detail'] + except KeyError: +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/trace2html.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/trace2html.py 2025-01-16 02:26:08.580513060 +0800 +@@ -2,9 +2,9 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import argparse + import base64 +--- a/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/vulcanize_trace_viewer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/catapult/tracing/tracing_build/vulcanize_trace_viewer.py 2025-01-16 02:26:08.580513060 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import codecs + import argparse +--- a/src/3rdparty/chromium/third_party/closure_compiler/compiler.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/closure_compiler/compiler.py 2025-01-16 02:26:08.580513060 +0800 +@@ -63,7 +63,7 @@ + msg: A debug message to log. + """ + if self._verbose: +- print "(INFO) %s" % msg ++ print("(INFO) %s" % msg) + + def _log_error(self, msg): + """Logs |msg| to stderr regardless of --flags. +@@ -71,7 +71,7 @@ + Args: + msg: An error message to log. + """ +- print >> sys.stderr, "(ERROR) %s" % msg ++ print("(ERROR) %s" % msg, file=sys.stderr) + + def run_jar(self, jar, args): + """Runs a .jar from the command line with arguments. +@@ -195,8 +195,8 @@ + self._target = sources[0] + externs_and_deps += sources[1:] + +- externs = filter(is_extern, externs_and_deps) +- deps = filter(lambda f: not is_extern(f), externs_and_deps) ++ externs = list(filter(is_extern, externs_and_deps)) ++ deps = [f for f in externs_and_deps if not is_extern(f)] + + assert externs or deps or self._target + +@@ -270,7 +270,7 @@ + f.write('') + + if process_includes: +- errors = map(self._clean_up_error, errors) ++ errors = list(map(self._clean_up_error, errors)) + output = self._format_errors(errors) + + if errors: +@@ -310,5 +310,5 @@ + + if found_errors: + if opts.custom_sources: +- print stderr ++ print(stderr) + sys.exit(1) +--- a/src/3rdparty/chromium/third_party/closure_compiler/compiler_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/closure_compiler/compiler_test.py 2025-01-16 02:26:08.580513060 +0800 +@@ -80,7 +80,7 @@ + self.assertTrue(os.path.exists(out_file)) + if expected_output: + with open(out_file, "r") as file: +- self.assertEquals(file.read(), expected_output) ++ self.assertEqual(file.read(), expected_output) + + def _createOutFiles(self): + out_file = tempfile.NamedTemporaryFile(delete=False) +@@ -330,7 +330,7 @@ + expected_output = "'use strict';var goog,testScript=function(){};testScript();\n" + self.assertTrue(os.path.exists(out_file)) + with open(out_file, "r") as file: +- self.assertEquals(file.read(), expected_output) ++ self.assertEqual(file.read(), expected_output) + + def testMissingReturnAssertNotReached(self): + template = self._ASSERT_DEFINITION + """ +--- a/src/3rdparty/chromium/third_party/closure_compiler/js_binary.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/closure_compiler/js_binary.py 2025-01-16 02:26:08.580513060 +0800 +@@ -123,8 +123,8 @@ + + returncode, errors = compiler.Compiler().run_jar(args.compiler, compiler_args) + if returncode != 0: +- print args.compiler, ' '.join(compiler_args) +- print errors ++ print(args.compiler, ' '.join(compiler_args)) ++ print(errors) + + return returncode + +--- a/src/3rdparty/chromium/third_party/closure_compiler/processor_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/closure_compiler/processor_test.py 2025-01-16 02:26:08.580513060 +0800 +@@ -79,14 +79,14 @@ + + def testIncludedFiles(self): + """Verify that files are tracked correctly as they're inlined.""" +- self.assertEquals(set(["/global.js", "/debug.js"]), ++ self.assertEqual(set(["/global.js", "/debug.js"]), + self._processor.included_files) + + def testDoubleIncludedSkipped(self): + """Verify that doubly included files are skipped.""" + processor = Processor("/double-debug.js") +- self.assertEquals(set(["/debug.js"]), processor.included_files) +- self.assertEquals(FileCache.read("/debug.js") + "\n", processor.contents) ++ self.assertEqual(set(["/debug.js"]), processor.included_files) ++ self.assertEqual(FileCache.read("/debug.js") + "\n", processor.contents) + + class IfStrippingTest(unittest.TestCase): + """Test that the contents of XML blocks are stripped.""" +--- a/src/3rdparty/chromium/third_party/crashpad/update.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/crashpad/update.py 2025-01-16 02:26:08.580513060 +0800 +@@ -5,7 +5,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + import os +@@ -171,7 +171,7 @@ + You may use a new shell for this, or ^Z if job control is available. + Press ^C to abort. + """, file=sys.stderr) +- input() ++ eval(input()) + except: + # ^C, signal, or something else. + print('Aborting...', file=sys.stderr) +@@ -209,7 +209,7 @@ + revision_new = subprocess.check_output( + ['git', 'rev-parse', parsed.update_to], + shell=IS_WINDOWS).decode('utf-8').rstrip() +- new_message = u'Update ' + project_name + ' to ' + revision_new + '\n\n' ++ new_message = 'Update ' + project_name + ' to ' + revision_new + '\n\n' + + # Wrap everything to 72 characters, with a hanging indent. + wrapper = textwrap.TextWrapper(width=72, subsequent_indent = ' ' * 13) +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/build/install_linux_sysroot.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/build/install_linux_sysroot.py 2025-01-16 02:26:08.580513060 +0800 +@@ -21,7 +21,7 @@ + import shutil + import subprocess + import sys +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + +@@ -45,16 +45,16 @@ + if s.read() == url: + return + +- print 'Installing Debian root image from %s' % url ++ print('Installing Debian root image from %s' % url) + + if os.path.isdir(sysroot): + shutil.rmtree(sysroot) + os.mkdir(sysroot) + tarball = os.path.join(sysroot, FILENAME) +- print 'Downloading %s' % url ++ print('Downloading %s' % url) + + for _ in range(3): +- response = urllib2.urlopen(url) ++ response = urllib.request.urlopen(url) + with open(tarball, 'wb') as f: + f.write(response.read()) + break +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/build/run_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/build/run_tests.py 2025-01-16 02:26:08.580513060 +0800 +@@ -15,7 +15,7 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from __future__ import print_function ++ + + import argparse + import os +@@ -185,7 +185,7 @@ + # which adbd will run as an “sh -c” argument. + adb_command = ['adb', '-s', android_device, 'shell'] + script_commands = [] +- for k, v in env.items(): ++ for k, v in list(env.items()): + script_commands.append('export %s=%s' % + (pipes.quote(k), pipes.quote(v))) + script_commands.extend([ +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/build/ios/setup_ios_gn.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/build/ios/setup_ios_gn.py 2025-01-16 02:26:08.580513060 +0800 +@@ -27,10 +27,10 @@ + try: + import configparser + except ImportError: +- import ConfigParser as configparser ++ import configparser as configparser + + try: +- import StringIO as io ++ import io as io + except ImportError: + import io + +@@ -44,8 +44,7 @@ + ENV_VAR_PATTERN = re.compile(r'\$([A-Za-z0-9_]+)') + + def values(self, section): +- return map(lambda kv: self._UnquoteString(self._ExpandEnvVar(kv[1])), +- configparser.ConfigParser.items(self, section)) ++ return [self._UnquoteString(self._ExpandEnvVar(kv[1])) for kv in configparser.ConfigParser.items(self, section)] + + def getstring(self, section, option): + return self._UnquoteString(self._ExpandEnvVar(self.get(section, +@@ -110,7 +109,7 @@ + args.append(('target_cpu', target_cpu)) + args.append( + ('additional_target_cpus', +- [cpu for cpu in cpu_values.itervalues() if cpu != target_cpu])) ++ [cpu for cpu in cpu_values.values() if cpu != target_cpu])) + else: + args.append(('target_cpu', cpu_values[build_arch])) + +--- a/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/win/end_to_end_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/crashpad/crashpad/snapshot/win/end_to_end_test.py 2025-01-16 02:26:08.581596375 +0800 +@@ -14,7 +14,7 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from __future__ import print_function ++ + + import os + import platform +--- a/src/3rdparty/chromium/third_party/dav1d/generate_configs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/dav1d/generate_configs.py 2025-01-16 02:26:08.581596375 +0800 +@@ -5,7 +5,7 @@ + # found in the LICENSE file. + """Creates config files for building dav1d.""" + +-from __future__ import print_function ++ + + import os + import re +--- a/src/3rdparty/chromium/third_party/dawn/generator/dawn_json_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/dawn/generator/dawn_json_generator.py 2025-01-16 02:26:08.581596375 +0800 +@@ -326,10 +326,10 @@ + types = {} + + by_category = {} +- for name in category_to_parser.keys(): ++ for name in list(category_to_parser.keys()): + by_category[name] = [] + +- for (name, json_data) in json.items(): ++ for (name, json_data) in list(json.items()): + if name[0] == '_': + continue + category = json_data['category'] +@@ -346,7 +346,7 @@ + for callback in by_category['callback']: + link_callback(callback, types) + +- for category in by_category.keys(): ++ for category in list(by_category.keys()): + by_category[category] = sorted( + by_category[category], key=lambda typ: typ.name.canonical_case()) + +@@ -415,10 +415,10 @@ + command.derived_method = method + commands.append(command) + +- for (name, json_data) in wire_json['commands'].items(): ++ for (name, json_data) in list(wire_json['commands'].items()): + commands.append(Command(name, linked_record_members(json_data, types))) + +- for (name, json_data) in wire_json['return commands'].items(): ++ for (name, json_data) in list(wire_json['return commands'].items()): + return_commands.append( + Command(name, linked_record_members(json_data, types))) + +@@ -427,7 +427,7 @@ + 'return command': return_commands + } + +- for commands in wire_params['cmd_records'].values(): ++ for commands in list(wire_params['cmd_records'].values()): + for command in commands: + command.update_metadata() + commands.sort(key=lambda c: c.name.canonical_case()) +--- a/src/3rdparty/chromium/third_party/dawn/generator/extract_json.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/dawn/generator/extract_json.py 2025-01-16 02:26:08.581596375 +0800 +@@ -24,7 +24,7 @@ + + output_dir = sys.argv[2] + +- for (name, content) in files.items(): ++ for (name, content) in list(files.items()): + output_file = output_dir + os.path.sep + name + + directory = os.path.dirname(output_file) +--- a/src/3rdparty/chromium/third_party/dawn/generator/generator_lib.py 2025-01-14 21:29:17.881395321 +0800 ++++ b/src/3rdparty/chromium/third_party/dawn/generator/generator_lib.py 2025-01-16 02:26:08.581596375 +0800 +@@ -123,7 +123,7 @@ + # Filter lines that are pure comments. line_comment_prefix is not + # enough because it removes the comment but doesn't completely remove + # the line, resulting in more verbose output. +- lines = filter(lambda line: not line.strip().startswith('//*'), lines) ++ lines = [line for line in lines if not line.strip().startswith('//*')] + + # Remove indentation templates have for the Jinja control flow. + for line in lines: +@@ -196,7 +196,7 @@ + root_dir = os.path.join(os.path.dirname(__file__), os.pardir) + root_dir = os.path.abspath(root_dir) + +- module_paths = (module.__file__ for module in sys.modules.values() ++ module_paths = (module.__file__ for module in list(sys.modules.values()) + if module and hasattr(module, '__file__')) + + paths = set() +@@ -311,9 +311,9 @@ + actual = {render.output for render in renders} + + if actual != expected: +- print("Wrong expected outputs, caller expected:\n " + +- repr(sorted(expected))) +- print("Actual output:\n " + repr(sorted(actual))) ++ print(("Wrong expected outputs, caller expected:\n " + ++ repr(sorted(expected)))) ++ print(("Actual output:\n " + repr(sorted(actual)))) + return 1 + + # Print the list of all the outputs for cmake. +@@ -333,8 +333,8 @@ + + for directory in allowed_dirs: + if not directory.endswith('/'): +- print('Allowed directory entry "{}" doesn\'t ' +- 'end with /'.format(directory)) ++ print(('Allowed directory entry "{}" doesn\'t ' ++ 'end with /'.format(directory))) + return 1 + + def check_in_subdirectory(path, directory): +@@ -345,10 +345,10 @@ + if not any( + check_in_subdirectory(render.output, directory) + for directory in allowed_dirs): +- print('Output file "{}" is not in the allowed directory ' +- 'list below:'.format(render.output)) ++ print(('Output file "{}" is not in the allowed directory ' ++ 'list below:'.format(render.output))) + for directory in sorted(allowed_dirs): +- print(' "{}"'.format(directory)) ++ print((' "{}"'.format(directory))) + return 1 + + # Output the JSON tarball +--- a/src/3rdparty/chromium/third_party/dawn/generator/opengl_loader_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/dawn/generator/opengl_loader_generator.py 2025-01-16 02:26:08.581596375 +0800 +@@ -108,7 +108,7 @@ + + + def parse_version(version): +- return Version(*map(int, version.split('.'))) ++ return Version(*list(map(int, version.split('.')))) + + + def compute_params(root, supported_extensions): +--- a/src/3rdparty/chromium/third_party/dawn/generator/remove_files.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/dawn/generator/remove_files.py 2025-01-16 02:26:08.581596375 +0800 +@@ -65,8 +65,8 @@ + + for directory in allowed_dirs: + if not directory.endswith('/'): +- print('Allowed directory entry "{}" doesn\'t end with /'.format( +- directory)) ++ print(('Allowed directory entry "{}" doesn\'t end with /'.format( ++ directory))) + return 1 + + with open(args.stale_dirs_file) as f: +--- a/src/3rdparty/chromium/third_party/dawn/scripts/perf_test_runner.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/dawn/scripts/perf_test_runner.py 2025-01-16 02:26:08.581596375 +0800 +@@ -95,14 +95,14 @@ + perftests_path = newest_binary + + if perftests_path == None or not os.path.exists(perftests_path): +- print('Cannot find Release %s!' % binary_name) ++ print(('Cannot find Release %s!' % binary_name)) + sys.exit(1) + + if len(sys.argv) >= 2: + test_name = sys.argv[1] + +-print('Using test executable: ' + perftests_path) +-print('Test name: ' + test_name) ++print(('Using test executable: ' + perftests_path)) ++print(('Test name: ' + test_name)) + + + def get_results(metric, extra_args=[]): +@@ -121,7 +121,7 @@ + pattern = metric + r'.*= ([0-9.]+)' + m = re.findall(pattern, output) + if not m: +- print("Did not find the metric '%s' in the test output:" % metric) ++ print(("Did not find the metric '%s' in the test output:" % metric)) + print(output) + sys.exit(1) + +@@ -130,7 +130,7 @@ + + # Calibrate the number of steps + steps = get_results("steps", ["--calibration"])[0] +-print("running with %d steps." % steps) ++print(("running with %d steps." % steps)) + + # Loop 'max_experiments' times, running the tests. + for experiment in range(max_experiments): +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/PRESUBMIT.py 2025-01-16 02:26:08.581596375 +0800 +@@ -83,7 +83,7 @@ + num_affected = len(affected_files) + for dirs in EXCLUSIVE_CHANGE_DIRECTORIES: + dir_list = ', '.join(dirs) +- affected_in_dir = filter(lambda f: FileIsInDir(f, dirs), affected_files) ++ affected_in_dir = [f for f in affected_files if FileIsInDir(f, dirs)] + num_in_dir = len(affected_in_dir) + if num_in_dir == 0: + continue +@@ -569,8 +569,7 @@ + accepted_endings) + + # Exclude front_end/third_party files. +- files_to_lint = filter(lambda path: "third_party" not in path, +- files_to_lint) ++ files_to_lint = [path for path in files_to_lint if "third_party" not in path] + + if len(files_to_lint) is 0: + results.append( +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/optimize_svg_images.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/optimize_svg_images.py 2025-01-16 02:26:08.581596375 +0800 +@@ -32,7 +32,7 @@ + import subprocess + import sys + +-from build import devtools_file_hashes ++from .build import devtools_file_hashes + + try: + import json +@@ -60,7 +60,7 @@ + proc = subprocess.Popen("which %s" % app_name, stdout=subprocess.PIPE, shell=True) + proc.communicate() + if proc.returncode != 0: +- print "This script needs \"%s\" to be installed." % app_name ++ print("This script needs \"%s\" to be installed." % app_name) + sys.exit(1) + + +@@ -75,9 +75,9 @@ + + + if len(SVG_FILE_NAMES): +- print "%d unoptimized svg files found." % len(SVG_FILE_NAMES) ++ print("%d unoptimized svg files found." % len(SVG_FILE_NAMES)) + else: +- print "All svg files are already optimized." ++ print("All svg files are already optimized.") + sys.exit() + + processes = {} +@@ -85,8 +85,8 @@ + name = os.path.splitext(os.path.basename(svg_file_path))[0] + processes[name] = optimize_svg(svg_file_path) + +-for file_name, proc in processes.items(): ++for file_name, proc in list(processes.items()): + (optimize_out, _) = proc.communicate() +- print("Optimization of %s finished: %s" % (file_name, optimize_out)) ++ print(("Optimization of %s finished: %s" % (file_name, optimize_out))) + + devtools_file_hashes.update_file_hashes(HASHES_FILE_PATH, svg_file_paths) +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/unzip.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/unzip.py 2025-01-16 02:26:08.581596375 +0800 +@@ -7,7 +7,7 @@ + import zipfile + + if len(sys.argv) < 3: +- print('Usage: {} '.format(sys.argv[0])) ++ print(('Usage: {} '.format(sys.argv[0]))) + print(' full path to zip file to be extracted') + print(' full path to destination folder') + sys.exit(1) +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/build_inspector_overlay.py 2025-01-14 21:29:17.881395321 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/build_inspector_overlay.py 2025-01-16 02:37:38.291356265 +0800 +@@ -85,9 +85,9 @@ + write_file(join(output_path, filename), css_file) + + except: +- print( ++ print(( + 'Usage: %s filename_1 max_size_1 filename_2 max_size_2 ... filename_N max_size_N --input_path --output_path ' +- % argv[0]) ++ % argv[0])) + raise + + +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/build_release_applications.py 2025-01-14 21:29:17.881395321 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/build_release_applications.py 2025-01-16 02:39:39.357212175 +0800 +@@ -49,7 +49,7 @@ + application_names = argv[1:input_path_flag_index] + use_rollup = '--rollup' in argv + except: +- print('Usage: %s app_1 app_2 ... app_N --input_path --output_path --rollup true' % argv[0]) ++ print(('Usage: %s app_1 app_2 ... app_N --input_path --output_path --rollup true' % argv[0])) + raise + + loader = modular_build.DescriptorLoader(input_path) +@@ -109,8 +109,7 @@ + + def build_app(self): + self._build_app_script() +- for module in filter(lambda desc: (not desc.get('type')), +- self.descriptors.application.values()): ++ for module in [desc for desc in list(self.descriptors.application.values()) if (not desc.get('type'))]: + self._concatenate_dynamic_module(module['name']) + + def _build_app_script(self): +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/dependency_preprocessor.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/dependency_preprocessor.py 2025-01-16 02:26:08.581596375 +0800 +@@ -18,7 +18,7 @@ + import re + import shutil + +-import special_case_namespaces ++from . import special_case_namespaces + + try: + import simplejson as json +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/devtools_file_hashes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/devtools_file_hashes.py 2025-01-16 02:26:08.581596375 +0800 +@@ -41,7 +41,7 @@ + with open(hashes_file_path, "wt") as hashes_file: + json.dump(hashes, hashes_file, indent=4, separators=(",", ": ")) + except: +- print "ERROR: Failed to write %s" % hashes_file_path ++ print("ERROR: Failed to write %s" % hashes_file_path) + raise + + +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/generate_devtools_extension_api.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/generate_devtools_extension_api.py 2025-01-16 02:26:08.581596375 +0800 +@@ -49,7 +49,7 @@ + def main(argv): + + if len(argv) < 3: +- print('usage: %s output_js input_files ...' % argv[0]) ++ print(('usage: %s output_js input_files ...' % argv[0])) + return 1 + + output_name = argv[1] +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/generate_devtools_grd.py 2025-01-14 21:29:17.881395321 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/generate_devtools_grd.py 2025-01-16 02:26:08.581596375 +0800 +@@ -29,7 +29,7 @@ + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + """Creates a grd file for packaging the inspector files.""" + +-from __future__ import with_statement ++ + from os import path + + import errno +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/generate_protocol_externs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/generate_protocol_externs.py 2025-01-16 02:26:08.581596375 +0800 +@@ -100,7 +100,7 @@ + if type_id in ref_types: + return ref_types[type_id] + else: +- print "Type not found: " + type_id ++ print("Type not found: " + type_id) + return "!! Type not found: " + type_id + + +@@ -124,7 +124,7 @@ + output_file.write("var ProtocolProxyApi = {};\n") + + # Add basic types from protocol to closure +- for protocolName, closureName in type_traits.items(): ++ for protocolName, closureName in list(type_traits.items()): + output_file.write("/** @typedef {%s} */\n" % closureName) + output_file.write("Protocol.%s;\n" % protocolName) + +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/generate_supported_css.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/generate_supported_css.py 2025-01-16 02:26:08.581596375 +0800 +@@ -44,7 +44,7 @@ + + + def _keep_only_required_keys(entry): +- for key in entry.keys(): ++ for key in list(entry.keys()): + if key not in ("name", "longhands", "svg", "inherited", "keywords"): + del entry[key] + return entry +@@ -71,8 +71,7 @@ + property_names[entry["name"]] = entry + if "keywords" in entry: + keywords = list( +- filter(lambda keyword: not keyword.startswith("-internal-"), +- entry["keywords"])) ++ [keyword for keyword in entry["keywords"] if not keyword.startswith("-internal-")]) + property_values[entry["name"]] = {"values": keywords} + + properties.sort(key=lambda entry: entry["name"]) +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/modular_build.py 2025-01-14 21:29:17.881395321 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/modular_build.py 2025-01-16 02:26:08.582679690 +0800 +@@ -7,7 +7,7 @@ + Utilities for the modular DevTools build. + """ + +-from __future__ import print_function ++ + + import collections + from os import path +@@ -70,7 +70,7 @@ + for script in module.get('modules', []): + if script not in skipped_files: + files[path.normpath(path.join(self.application_dir, name, script))] = True +- return files.keys() ++ return list(files.keys()) + + def all_skipped_compilation_files(self): + files = collections.OrderedDict() +@@ -79,7 +79,7 @@ + skipped_files = set(module.get('skip_compilation', [])) + for script in skipped_files: + files[path.join(name, script)] = True +- return files.keys() ++ return list(files.keys()) + + def module_resources(self, name): + return [name + '/' + resource for resource in self.modules[name].get('resources', [])] +@@ -173,13 +173,13 @@ + extends = self._load_application(extends, all_module_descriptors) + worker = True if 'worker' in descriptor_json and descriptor_json['worker'] else False + +- for (module_name, module) in application_descriptor.items(): ++ for (module_name, module) in list(application_descriptor.items()): + if all_module_descriptors.get(module_name): + bail_error('Duplicate definition of module "%s" in %s' % (module_name, application_descriptor_filename)) + module_descriptors[module_name] = self._read_module_descriptor(module_name, application_descriptor_filename) + all_module_descriptors[module_name] = module_descriptors[module_name] + +- for module in module_descriptors.values(): ++ for module in list(module_descriptors.values()): + for dep in module.get('dependencies', []): + if dep not in all_module_descriptors: + bail_error('Module "%s" (dependency of "%s") not listed in application descriptor %s' % +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/rjsmin.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/build/rjsmin.py 2025-01-16 02:26:08.582679690 +0800 +@@ -167,7 +167,7 @@ + """ Make id_literal like char class """ + match = _re.compile(what).match + result = ''.join([ +- chr(c) for c in xrange(127) if not match(chr(c)) ++ chr(c) for c in range(127) if not match(chr(c)) + ]) + return '[^%s]' % fix_charclass(result) + +@@ -175,7 +175,7 @@ + """ Make negated id_literal like char class """ + match = _re.compile(id_literal_(keep)).match + result = ''.join([ +- chr(c) for c in xrange(127) if not match(chr(c)) ++ chr(c) for c in range(127) if not match(chr(c)) + ]) + return r'[%s]' % fix_charclass(result) + +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/closure/closure_runner/build_compiler_runner_jar.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/closure/closure_runner/build_compiler_runner_jar.py 2025-01-16 02:26:08.582679690 +0800 +@@ -27,12 +27,12 @@ + proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True) + proc.communicate() + if proc.returncode: +- print >> sys.stderr, error_template % proc.returncode ++ print(error_template % proc.returncode, file=sys.stderr) + sys.exit(proc.returncode) + + + def build_artifacts(): +- print 'Compiling...' ++ print('Compiling...') + java_files = [] + for root, dirs, files in sorted(os.walk(src_path)): + for file_name in files: +@@ -48,7 +48,7 @@ + ' '.join(java_files)) + run_and_communicate(javac_command, 'Error: javac returned %d') + +- print 'Building jar...' ++ print('Building jar...') + artifact_path = rel_to_abs(jar_name) + jar_path = os.path.join(java_bin_path, 'jar') + jar_command = '%s cvfme %s %s %s -C %s .' % (jar_path, artifact_path, manifest_file.name, main_class, bin_path) +@@ -59,8 +59,8 @@ + + + def help(): +- print 'usage: %s' % os.path.basename(__file__) +- print 'Builds closure_runner.jar from the %s directory contents' % src_dir ++ print('usage: %s' % os.path.basename(__file__)) ++ print('Builds closure_runner.jar from the %s directory contents' % src_dir) + + + def main(): +@@ -68,7 +68,7 @@ + help() + return + build_artifacts() +- print 'Done.' ++ print('Done.') + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/deps/download_chromium.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/deps/download_chromium.py 2025-01-16 02:26:08.582679690 +0800 +@@ -14,7 +14,7 @@ + import stat + import subprocess + import sys +-import urllib ++import urllib.request, urllib.parse, urllib.error + import zipfile + + +@@ -55,7 +55,7 @@ + + # Download again and save build number + try: +- filehandle, headers = urllib.urlretrieve(options.url) ++ filehandle, headers = urllib.request.urlretrieve(options.url) + except: + print("Using curl as fallback. You should probably update OpenSSL.") + filehandle = io.BytesIO( +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/deps/download_emscripten.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/deps/download_emscripten.py 2025-01-16 02:26:08.582679690 +0800 +@@ -20,7 +20,7 @@ + if sys.version_info >= (3, ): + from urllib.request import urlretrieve + else: +- from urllib import urlretrieve ++ from urllib.request import urlretrieve + + BS = 8192 + STAMP_FILE = 'build-revision' +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/deps/generate_protocol_resources.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/deps/generate_protocol_resources.py 2025-01-16 02:26:08.582679690 +0800 +@@ -64,7 +64,7 @@ + + if typescript_found_errors: + print('') +- print('TypeScript compilation failed on %s' % generator_script_to_compile) ++ print(('TypeScript compilation failed on %s' % generator_script_to_compile)) + print('') + print(typescript_stderr) + print('') +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/deps/manage_node_deps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/deps/manage_node_deps.py 2025-01-16 02:26:08.582679690 +0800 +@@ -102,7 +102,7 @@ + pattern = path.join(devtools_paths.node_modules_path(), 'package.json') + packages = [] + for root, dirnames, filenames in os.walk(devtools_paths.node_modules_path()): +- for filename in filter(lambda f: f == 'package.json', filenames): ++ for filename in [f for f in filenames if f == 'package.json']: + packages.append(path.join(root, filename)) + + for pkg in packages: +@@ -112,8 +112,8 @@ + + # Remove anything that begins with an underscore, as these are + # the private fields in a package.json +- for key in pkg_data.keys(): +- if key.find(u'_') == 0: ++ for key in list(pkg_data.keys()): ++ if key.find('_') == 0: + pkg_data.pop(key) + + pkg_file.truncate(0) +@@ -121,7 +121,7 @@ + json.dump(pkg_data, pkg_file, indent=2, sort_keys=True, separators=(',', ': ')) + pkg_file.write('\n') + except: +- print('Unable to fix: %s' % pkg) ++ print(('Unable to fix: %s' % pkg)) + return True + + return False +@@ -132,11 +132,11 @@ + with open(devtools_paths.package_lock_json_path(), 'r+') as pkg_lock_file: + try: + pkg_lock_data = json.load(pkg_lock_file) +- existing_deps = pkg_lock_data[u'dependencies'] ++ existing_deps = pkg_lock_data['dependencies'] + new_deps = [] + + # Find any new DEPS and add them in. +- for dep, version in DEPS.items(): ++ for dep, version in list(DEPS.items()): + if not dep in existing_deps or not existing_deps[dep]['version'] == version: + new_deps.append("%s@%s" % (dep, version)) + +@@ -147,7 +147,7 @@ + return exec_command(cmd) + + except Exception as exception: +- print('Unable to install: %s' % exception) ++ print(('Unable to install: %s' % exception)) + return True + + return False +@@ -159,7 +159,7 @@ + pkg_data = json.load(pkg_file) + + # Replace the dev deps. +- pkg_data[u'devDependencies'] = DEPS ++ pkg_data['devDependencies'] = DEPS + + pkg_file.truncate(0) + pkg_file.seek(0) +@@ -167,7 +167,7 @@ + pkg_file.write('\n') + + except: +- print('Unable to fix: %s' % sys.exc_info()[0]) ++ print(('Unable to fix: %s' % sys.exc_info()[0])) + return True + return False + +@@ -179,8 +179,8 @@ + + # Remove the dependencies and devDependencies from the root package.json + # so that they can't be used to overwrite the node_modules managed by this file. +- for key in pkg_data.keys(): +- if key.find(u'dependencies') == 0 or key.find(u'devDependencies') == 0: ++ for key in list(pkg_data.keys()): ++ if key.find('dependencies') == 0 or key.find('devDependencies') == 0: + pkg_data.pop(key) + + pkg_file.truncate(0) +@@ -188,7 +188,7 @@ + json.dump(pkg_data, pkg_file, indent=2, sort_keys=True, separators=(',', ': ')) + pkg_file.write('\n') + except: +- print('Unable to fix: %s' % pkg) ++ print(('Unable to fix: %s' % pkg)) + return True + return False + +@@ -216,8 +216,8 @@ + + + def run_npm_command(npm_command_args=None): +- for (name, version) in DEPS.items(): +- if (version.find(u'^') == 0): ++ for (name, version) in list(DEPS.items()): ++ if (version.find('^') == 0): + print('Versions must be locked to a specific version; remove ^ from the start of the version.') + return True + +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/deps/roll_deps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/deps/roll_deps.py 2025-01-16 02:26:08.583763005 +0800 +@@ -50,10 +50,10 @@ + subprocess.check_call(['gclient', 'sync'], cwd=options.chromium_dir) + + def copy_files(options): +- for from_path, to_path in FILE_MAPPINGS.items(): ++ for from_path, to_path in list(FILE_MAPPINGS.items()): + from_path = os.path.normpath(from_path) + to_path = os.path.normpath(to_path) +- print('%s => %s' % (from_path, to_path)) ++ print(('%s => %s' % (from_path, to_path))) + shutil.copy(os.path.join(options.chromium_dir, from_path), + os.path.join(options.devtools_dir, to_path)) + +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/jsdoc_validator/build_jsdoc_validator_jar.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/jsdoc_validator/build_jsdoc_validator_jar.py 2025-01-16 02:26:08.583763005 +0800 +@@ -87,12 +87,12 @@ + proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True) + proc.communicate() + if proc.returncode: +- print >> sys.stderr, error_template % proc.returncode ++ print(error_template % proc.returncode, file=sys.stderr) + sys.exit(proc.returncode) + + + def build_artifacts(): +- print 'Compiling...' ++ print('Compiling...') + java_files = [] + for root, dirs, files in sorted(os.walk(src_path)): + for file_name in files: +@@ -109,7 +109,7 @@ + ' '.join(java_files)) + run_and_communicate(javac_command, 'Error: javac returned %d') + +- print 'Building jar...' ++ print('Building jar...') + artifact_path = rel_to_abs(jar_name) + jar_path = os.path.join(java_bin_path, 'jar') + jar_command = '%s cvfme %s %s %s -C %s .' % (jar_path, artifact_path, manifest_file.name, main_class, bin_path) +@@ -120,9 +120,9 @@ + + + def update_hashes(): +- print 'Updating hashes...' ++ print('Updating hashes...') + with open(hashes_path, 'w') as file: +- file.writelines(['%s %s\n' % (hash, name) for (name, hash) in get_actual_hashes().iteritems()]) ++ file.writelines(['%s %s\n' % (hash, name) for (name, hash) in get_actual_hashes().items()]) + + + def hashes_modified(): +@@ -131,7 +131,7 @@ + return [('', 1, 0)] + actual_hashes = get_actual_hashes() + results = [] +- for name, expected_hash in expected_hashes.iteritems(): ++ for name, expected_hash in expected_hashes.items(): + actual_hash = actual_hashes.get(name) + if expected_hash != actual_hash: + results.append((name, expected_hash, actual_hash)) +@@ -139,10 +139,10 @@ + + + def help(): +- print 'usage: %s [option]' % os.path.basename(__file__) +- print 'Options:' +- print '--force-rebuild: Rebuild classes and jar even if there are no source file changes' +- print '--no-rebuild: Do not rebuild jar, just update hashes' ++ print('usage: %s [option]' % os.path.basename(__file__)) ++ print('Options:') ++ print('--force-rebuild: Rebuild classes and jar even if there are no source file changes') ++ print('--no-rebuild: Do not rebuild jar, just update hashes') + + + def main(): +@@ -157,13 +157,13 @@ + force_rebuild = sys.argv[1] == '--force-rebuild' + + if not hashes_modified() and not force_rebuild: +- print 'No modifications found, rebuild not required.' ++ print('No modifications found, rebuild not required.') + return + if not no_rebuild: + build_artifacts() + + update_hashes() +- print 'Done.' ++ print('Done.') + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/jsdoc_validator/run_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/scripts/jsdoc_validator/run_tests.py 2025-01-16 02:26:08.583763005 +0800 +@@ -35,16 +35,16 @@ + proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) + (out, _) = proc.communicate() + if proc.returncode: +- print >> sys.stderr, error_template % proc.returncode ++ print(error_template % proc.returncode, file=sys.stderr) + sys.exit(proc.returncode) + return out + + + def help(): +- print 'usage: %s [option]' % os.path.basename(__file__) +- print 'Options:' +- print '--generate-golden: Re-generate golden file' +- print '--dump: Dump the test results to stdout' ++ print('usage: %s [option]' % os.path.basename(__file__)) ++ print('Options:') ++ print('--generate-golden: Re-generate golden file') ++ print('--dump: Dump the test results to stdout') + + + def main(): +@@ -62,7 +62,7 @@ + result = run_and_communicate(validator_command, "Error running validator: %d") + result = result.replace(script_path, "") # pylint: disable=E1103 + if need_dump: +- print result ++ print(result) + return + + if need_golden: +@@ -72,9 +72,9 @@ + with open(golden_file, 'rt') as golden: + golden_text = golden.read() + if golden_text == result: +- print 'OK' ++ print('OK') + else: +- print 'ERROR: Golden output mismatch' ++ print('ERROR: Golden output mismatch') + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/third_party/node/node.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/third_party/node/node.py 2025-01-16 02:26:08.583763005 +0800 +@@ -28,7 +28,7 @@ + stdout, stderr = process.communicate() + + if process.returncode is not 0: +- print('%s failed:\n%s' % (cmd, stdout + stderr)) ++ print(('%s failed:\n%s' % (cmd, stdout + stderr))) + exit(process.returncode) + + return stdout +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/third_party/pyjson5/src/benchmarks/run.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/third_party/pyjson5/src/benchmarks/run.py 2025-01-16 02:26:08.583763005 +0800 +@@ -13,7 +13,7 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from __future__ import print_function ++ + + import argparse + import json +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/third_party/pyjson5/src/json5/host.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/third_party/pyjson5/src/json5/host.py 2025-01-16 02:26:08.583763005 +0800 +@@ -21,7 +21,7 @@ + + if sys.version_info[0] < 3: + # pylint: disable=redefined-builtin +- str = unicode ++ str = str + + + class Host(object): +@@ -47,7 +47,7 @@ + def mkdtemp(self, **kwargs): + return tempfile.mkdtemp(**kwargs) + +- def print_(self, msg=u'', end=u'\n', stream=None): ++ def print_(self, msg='', end='\n', stream=None): + stream = stream or self.stdout + stream.write(str(msg) + end) + stream.flush() +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/third_party/pyjson5/src/json5/lib.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/third_party/pyjson5/src/json5/lib.py 2025-01-16 02:26:08.583763005 +0800 +@@ -21,7 +21,7 @@ + + if sys.version_info[0] < 3: + # pylint: disable=redefined-builtin +- str = unicode ++ str = str + + + def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, +@@ -125,12 +125,12 @@ + + t = type(obj) + if obj == True: +- return u'true' ++ return 'true' + elif obj == False: +- return u'false' ++ return 'false' + elif obj == None: +- return u'null' +- elif t == type('') or t == type(u''): ++ return 'null' ++ elif t == type('') or t == type(''): + single = "'" in obj + double = '"' in obj + if single and double: +@@ -142,13 +142,13 @@ + elif t is float or t is int: + return str(obj) + elif t is dict: +- return u'{' + u','.join([ +- _dumpkey(k) + u':' + dumps(v) for k, v in obj.items() ++ return '{' + ','.join([ ++ _dumpkey(k) + ':' + dumps(v) for k, v in list(obj.items()) + ]) + '}' + elif t is list: +- return u'[' + ','.join([dumps(el) for el in obj]) + u']' ++ return '[' + ','.join([dumps(el) for el in obj]) + ']' + else: # pragma: no cover +- return u'' ++ return '' + + + def dump(obj, fp, **kwargs): +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/third_party/pyjson5/src/json5/parser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/third_party/pyjson5/src/json5/parser.py 2025-01-16 02:26:08.583763005 +0800 +@@ -5,9 +5,9 @@ + + if sys.version_info[0] < 3: + # pylint: disable=redefined-builtin +- chr = unichr ++ chr = chr + range = xrange +- str = unicode ++ str = str + + + class Parser(object): +@@ -191,10 +191,10 @@ + self._ch('\f') + + def _ws__c6_(self): +- self._ch('\u00a0') ++ self._ch('\\u00a0') + + def _ws__c7_(self): +- self._ch(u'\ufeff') ++ self._ch('\ufeff') + + def _ws__c8_(self): + self._push('ws__c8') +@@ -223,10 +223,10 @@ + self._ch('\n') + + def _eol__c3_(self): +- self._ch(u'\u2028') ++ self._ch('\u2028') + + def _eol__c4_(self): +- self._ch(u'\u2029') ++ self._ch('\u2029') + + def _comment_(self): + self._choose([self._comment__c0_, self._comment__c1_]) +@@ -705,10 +705,10 @@ + self._seq([self._bslash_, self._unicode_esc_]) + + def _id_continue__c8_(self): +- self._ch(u'\u200c') ++ self._ch('\u200c') + + def _id_continue__c9_(self): +- self._ch(u'\u200d') ++ self._ch('\u200d') + + def _num_literal_(self): + self._choose([self._num_literal__c0_, self._num_literal__c1_, +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/third_party/pyjson5/src/json5/fakes/host_fake.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/third_party/pyjson5/src/json5/fakes/host_fake.py 2025-01-16 02:26:08.583763005 +0800 +@@ -17,7 +17,7 @@ + + if sys.version_info[0] < 3: + # pylint: disable=redefined-builtin +- str = unicode ++ str = str + + + class FakeHost(object): +@@ -104,7 +104,7 @@ + self.dirs.add(self.last_tmpdir) + return self.last_tmpdir + +- def print_(self, msg=u'', end=u'\n', stream=None): ++ def print_(self, msg='', end='\n', stream=None): + stream = stream or self.stdout + stream.write(str(msg) + str(end)) + stream.flush() +--- a/src/3rdparty/chromium/third_party/devtools-frontend/src/third_party/typescript/ts_library.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/devtools-frontend/src/third_party/typescript/ts_library.py 2025-01-16 02:26:08.583763005 +0800 +@@ -121,7 +121,7 @@ + try: + json.dump(tsconfig, generated_tsconfig) + except Exception as e: +- print('Encountered error while writing generated tsconfig in location %s:' % tsconfig_output_location) ++ print(('Encountered error while writing generated tsconfig in location %s:' % tsconfig_output_location)) + print(e) + return 1 + +@@ -134,7 +134,7 @@ + found_errors, stderr = runTsc(tsconfig_location=tsconfig_output_location) + if found_errors: + print('') +- print('TypeScript compilation failed. Used tsconfig %s' % opts.tsconfig_output_location) ++ print(('TypeScript compilation failed. Used tsconfig %s' % opts.tsconfig_output_location)) + print('') + print(stderr) + print('') +--- a/src/3rdparty/chromium/third_party/dom_distiller_js/dist/python/plugin_pb2.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/dom_distiller_js/dist/python/plugin_pb2.py 2025-01-16 02:26:08.583763005 +0800 +@@ -36,7 +36,7 @@ + _descriptor.FieldDescriptor( + name='parameter', full_name='google.protobuf.compiler.CodeGeneratorRequest.parameter', index=1, + number=2, type=9, cpp_type=9, label=1, +- has_default_value=False, default_value=unicode("", "utf-8"), ++ has_default_value=False, default_value=str("", "utf-8"), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), +@@ -71,21 +71,21 @@ + _descriptor.FieldDescriptor( + name='name', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.name', index=0, + number=1, type=9, cpp_type=9, label=1, +- has_default_value=False, default_value=unicode("", "utf-8"), ++ has_default_value=False, default_value=str("", "utf-8"), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='insertion_point', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point', index=1, + number=2, type=9, cpp_type=9, label=1, +- has_default_value=False, default_value=unicode("", "utf-8"), ++ has_default_value=False, default_value=str("", "utf-8"), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='content', full_name='google.protobuf.compiler.CodeGeneratorResponse.File.content', index=2, + number=15, type=9, cpp_type=9, label=1, +- has_default_value=False, default_value=unicode("", "utf-8"), ++ has_default_value=False, default_value=str("", "utf-8"), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), +@@ -112,7 +112,7 @@ + _descriptor.FieldDescriptor( + name='error', full_name='google.protobuf.compiler.CodeGeneratorResponse.error', index=0, + number=1, type=9, cpp_type=9, label=1, +- has_default_value=False, default_value=unicode("", "utf-8"), ++ has_default_value=False, default_value=str("", "utf-8"), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), +@@ -142,17 +142,13 @@ + DESCRIPTOR.message_types_by_name['CodeGeneratorRequest'] = _CODEGENERATORREQUEST + DESCRIPTOR.message_types_by_name['CodeGeneratorResponse'] = _CODEGENERATORRESPONSE + +-class CodeGeneratorRequest(_message.Message): +- __metaclass__ = _reflection.GeneratedProtocolMessageType ++class CodeGeneratorRequest(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): + DESCRIPTOR = _CODEGENERATORREQUEST + + # @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorRequest) + +-class CodeGeneratorResponse(_message.Message): +- __metaclass__ = _reflection.GeneratedProtocolMessageType +- +- class File(_message.Message): +- __metaclass__ = _reflection.GeneratedProtocolMessageType ++class CodeGeneratorResponse(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): ++ class File(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): + DESCRIPTOR = _CODEGENERATORRESPONSE_FILE + + # @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse.File) +--- a/src/3rdparty/chromium/third_party/dom_distiller_js/protoc_plugins/util/plugin_protos.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/dom_distiller_js/protoc_plugins/util/plugin_protos.py 2025-01-16 02:26:08.583763005 +0800 +@@ -37,7 +37,7 @@ + return dict((v.split('=') for v in self.proto.parameter.split(','))) + + def GetAllFiles(self): +- files = map(ProtoFile, self.proto.proto_file) ++ files = list(map(ProtoFile, self.proto.proto_file)) + for f in files: + assert f.Filename() in self.proto.file_to_generate + return files +@@ -123,7 +123,7 @@ + def GetDependencies(self): + # import is not supported + assert [] == self.proto.dependency +- return map(types.GetProtoFileForFilename, self.proto.dependency) ++ return list(map(types.GetProtoFileForFilename, self.proto.dependency)) + + def JavaFilename(self): + return '/'.join(self.JavaQualifiedOuterClass().split('.')) + '.java' +@@ -167,7 +167,7 @@ + return types.TitleCase(self.proto.name) + + def GetFields(self): +- return map(ProtoField, self.proto.field) ++ return list(map(ProtoField, self.proto.field)) + + def GetMessages(self): + return [ProtoMessage(n, self.qualified_types) +@@ -273,7 +273,7 @@ + return types.TitleCase(self.proto.name) + + def Values(self): +- return map(ProtoEnumValue, self.proto.value) ++ return list(map(ProtoEnumValue, self.proto.value)) + + + class ProtoEnumValue(object): +--- a/src/3rdparty/chromium/third_party/dom_distiller_js/protoc_plugins/util/writer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/dom_distiller_js/protoc_plugins/util/writer.py 2025-01-16 02:26:08.583763005 +0800 +@@ -31,7 +31,7 @@ + s = fmt.format(**kwargs) + s = s.rstrip('\n') + lines = s.split('\n') +- lines = map(lambda s: (' ' * self.indent + s).rstrip(), lines) ++ lines = [(' ' * self.indent + s).rstrip() for s in lines] + self.value.extend(lines) + + def AddError(self, fmt, **kwargs): +--- a/src/3rdparty/chromium/third_party/flatbuffers/src/android/jni/msbuild.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/flatbuffers/src/android/jni/msbuild.py 2025-01-16 02:26:08.584846320 +0800 +@@ -67,7 +67,7 @@ + msbuilds.append({ 'ver':match.group(), 'exe':file }) + msbuilds.sort(lambda x, y: compare_version(x['ver'], y['ver']), reverse=True) + if len(msbuilds) == 0: +- print "Unable to find MSBuild.\n" ++ print("Unable to find MSBuild.\n") + return -1; + cmd = [msbuilds[0]['exe']] + cmd.extend(sys.argv[1:]) +--- a/src/3rdparty/chromium/third_party/flatbuffers/src/python/setup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/flatbuffers/src/python/setup.py 2025-01-16 02:26:08.584846320 +0800 +@@ -36,8 +36,8 @@ + # Publications using datetime versions should only be made from master + # to represent the HEAD moving forward. + version = datetime.utcnow().strftime('%Y%m%d%H%M%S') +- print("VERSION environment variable not set, using datetime instead: {}" +- .format(version)) ++ print(("VERSION environment variable not set, using datetime instead: {}" ++ .format(version))) + + return version + +--- a/src/3rdparty/chromium/third_party/flatbuffers/src/python/flatbuffers/compat.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/flatbuffers/src/python/flatbuffers/compat.py 2025-01-16 02:26:08.584846320 +0800 +@@ -32,7 +32,7 @@ + memoryview_type = memoryview + struct_bool_decl = "?" + else: +- string_types = (unicode,) ++ string_types = (str,) + if PY26 or PY27: + binary_types = (str,bytearray) + else: +--- a/src/3rdparty/chromium/third_party/flatbuffers/src/samples/sample_binary.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/flatbuffers/src/samples/sample_binary.py 2025-01-16 02:26:08.584846320 +0800 +@@ -53,7 +53,7 @@ + + MyGame.Sample.Monster.MonsterStartInventoryVector(builder, 10) + # Note: Since we prepend the bytes, this loop iterates in reverse order. +- for i in reversed(range(0, 10)): ++ for i in reversed(list(range(0, 10))): + builder.PrependByte(i) + inv = builder.EndVector(10) + +@@ -106,13 +106,13 @@ + assert monster.Pos().Z() == 3.0 + + # Get and test the `inventory` FlatBuffer `vector`. +- for i in xrange(monster.InventoryLength()): ++ for i in range(monster.InventoryLength()): + assert monster.Inventory(i) == i + + # Get and test the `weapons` FlatBuffer `vector` of `table`s. + expected_weapon_names = ['Sword', 'Axe'] + expected_weapon_damages = [3, 5] +- for i in xrange(monster.WeaponsLength()): ++ for i in range(monster.WeaponsLength()): + assert monster.Weapons(i).Name() == expected_weapon_names[i] + assert monster.Weapons(i).Damage() == expected_weapon_damages[i] + +@@ -131,7 +131,7 @@ + assert union_weapon.Name() == "Axe" + assert union_weapon.Damage() == 5 + +- print 'The FlatBuffer was successfully created and verified!' ++ print('The FlatBuffer was successfully created and verified!') + + if __name__ == '__main__': + main() +--- a/src/3rdparty/chromium/third_party/freetype/src/src/tools/chktrcmp.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/freetype/src/src/tools/chktrcmp.py 2025-01-16 02:26:08.584846320 +0800 +@@ -24,20 +24,20 @@ + + for i in range( 1, len( sys.argv ) ): + if sys.argv[i].startswith( "--help" ): +- print "Usage: %s [option]" % sys.argv[0] +- print "Search used-but-defined and defined-but-not-used trace_XXX macros" +- print "" +- print " --help:" +- print " Show this help" +- print "" +- print " --src-dirs=dir1:dir2:..." +- print " Specify the directories of C source files to be checked" +- print " Default is %s" % ":".join( SRC_FILE_DIRS ) +- print "" +- print " --def-files=file1:file2:..." +- print " Specify the header files including FT_TRACE_DEF()" +- print " Default is %s" % ":".join( TRACE_DEF_FILES ) +- print "" ++ print("Usage: %s [option]" % sys.argv[0]) ++ print("Search used-but-defined and defined-but-not-used trace_XXX macros") ++ print("") ++ print(" --help:") ++ print(" Show this help") ++ print("") ++ print(" --src-dirs=dir1:dir2:...") ++ print(" Specify the directories of C source files to be checked") ++ print(" Default is %s" % ":".join( SRC_FILE_DIRS )) ++ print("") ++ print(" --def-files=file1:file2:...") ++ print(" Specify the header files including FT_TRACE_DEF()") ++ print(" Default is %s" % ":".join( TRACE_DEF_FILES )) ++ print("") + exit(0) + if sys.argv[i].startswith( "--src-dirs=" ): + SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" ) +@@ -86,8 +86,8 @@ + component_name = trace_def_pat_opn.sub( '', hdr_line ) + component_name = trace_def_pat_cls.sub( '', component_name ) + if component_name in KNOWN_COMPONENT: +- print "trace component %s is defined twice, see %s and fttrace.h:%d" % \ +- ( component_name, KNOWN_COMPONENT[component_name], line_num ) ++ print("trace component %s is defined twice, see %s and fttrace.h:%d" % \ ++ ( component_name, KNOWN_COMPONENT[component_name], line_num )) + else: + KNOWN_COMPONENT[component_name] = "%s:%d" % \ + ( os.path.basename( f ), line_num ) +@@ -97,18 +97,18 @@ + # Compare the used and defined trace macros. + # + +-print "# Trace component used in the implementations but not defined in fttrace.h." +-cmpnt = USED_COMPONENT.keys() ++print("# Trace component used in the implementations but not defined in fttrace.h.") ++cmpnt = list(USED_COMPONENT.keys()) + cmpnt.sort() + for c in cmpnt: + if c not in KNOWN_COMPONENT: +- print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) ) ++ print("Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) )) + +-print "# Trace component is defined but not used in the implementations." +-cmpnt = KNOWN_COMPONENT.keys() ++print("# Trace component is defined but not used in the implementations.") ++cmpnt = list(KNOWN_COMPONENT.keys()) + cmpnt.sort() + for c in cmpnt: + if c not in USED_COMPONENT: + if c != "any": +- print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] ) ++ print("Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )) + +--- a/src/3rdparty/chromium/third_party/freetype/src/src/tools/cordic.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/freetype/src/src/tools/cordic.py 2025-01-16 02:26:08.584846320 +0800 +@@ -7,8 +7,8 @@ + shrink = 1.0 + comma = "" + +-print "" +-print "table of arctan( 1/2^n ) for PI = " + repr(units/65536.0) + " units" ++print("") ++print("table of arctan( 1/2^n ) for PI = " + repr(units/65536.0) + " units") + + for n in range(1,32): + +@@ -25,9 +25,9 @@ + + shrink /= math.sqrt( 1 + x*x ) + +-print +-print "shrink factor = " + repr( shrink ) +-print "shrink factor 2 = " + repr( int( shrink * (2**32) ) ) +-print "expansion factor = " + repr( 1/shrink ) +-print "" ++print() ++print("shrink factor = " + repr( shrink )) ++print("shrink factor 2 = " + repr( int( shrink * (2**32) ) )) ++print("expansion factor = " + repr( 1/shrink )) ++print("") + +--- a/src/3rdparty/chromium/third_party/freetype/src/src/tools/glnames.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/freetype/src/src/tools/glnames.py 2025-01-16 02:26:08.584846320 +0800 +@@ -5075,7 +5075,7 @@ + letter = word[0] + word = word[1:] + +- if self.children.has_key( letter ): ++ if letter in self.children: + child = self.children[letter] + else: + child = StringNode( letter, 0 ) +@@ -5085,7 +5085,7 @@ + + def optimize( self ): + # optimize all children first +- children = self.children.values() ++ children = list(self.children.values()) + self.children = {} + + for child in children: +@@ -5120,7 +5120,7 @@ + + if self.children: + margin += "| " +- for child in self.children.values(): ++ for child in list(self.children.values()): + child.dump_debug( write, margin ) + + def locate( self, index ): +@@ -5133,7 +5133,7 @@ + if self.value != 0: + index += 2 + +- children = self.children.values() ++ children = list(self.children.values()) + children.sort() + + index += 2 * len( children ) +@@ -5155,7 +5155,7 @@ + storage += struct.pack( "B", val ) + + # write the count +- children = self.children.values() ++ children = list(self.children.values()) + children.sort() + + count = len( children ) +@@ -5290,7 +5290,7 @@ + """main program body""" + + if len( sys.argv ) != 2: +- print __doc__ % sys.argv[0] ++ print(__doc__ % sys.argv[0]) + sys.exit( 1 ) + + file = open( sys.argv[1], "wb" ) +--- a/src/3rdparty/chromium/third_party/glslang/src/build_info.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/glslang/src/build_info.py 2025-01-16 02:26:08.585929635 +0800 +@@ -188,7 +188,7 @@ + except Exception as e: + print(e) + print("\nUsage:\n") +- print(usage.format(sys.argv[0])) ++ print((usage.format(sys.argv[0]))) + sys.exit(1) + + directory = args["directory"] +--- a/src/3rdparty/chromium/third_party/glslang/src/update_glslang_sources.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/glslang/src/update_glslang_sources.py 2025-01-16 02:26:08.585929635 +0800 +@@ -17,7 +17,7 @@ + """Get source files for Glslang and its dependencies from public repositories. + """ + +-from __future__ import print_function ++ + + import argparse + import json +--- a/src/3rdparty/chromium/third_party/google-closure-library/closure/bin/calcdeps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/google-closure-library/closure/bin/calcdeps.py 2025-01-16 02:26:08.585929635 +0800 +@@ -93,7 +93,7 @@ + result.append(os.path.join(directory, filename)) + else: + result.append(ref) +- return map(os.path.normpath, result) ++ return list(map(os.path.normpath, result)) + + + class DependencyInfo(object): +@@ -437,7 +437,7 @@ + inputs = options.inputs + if not inputs: # Parse stdin + logging.info('No inputs specified. Reading from stdin...') +- inputs = filter(None, [line.strip('\n') for line in sys.stdin.readlines()]) ++ inputs = [_f for _f in [line.strip('\n') for line in sys.stdin.readlines()] if _f] + + logging.info('Scanning files...') + inputs = ExpandDirectories(inputs) +--- a/src/3rdparty/chromium/third_party/google-closure-library/closure/bin/build/closurebuilder.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/google-closure-library/closure/bin/build/closurebuilder.py 2025-01-16 02:26:08.585929635 +0800 +@@ -186,7 +186,7 @@ + + + def _WrapGoogModuleSource(src): +- return (u'goog.loadModule(function(exports) {{' ++ return ('goog.loadModule(function(exports) {{' + '"use strict";' + '{0}' + '\n' # terminate any trailing single line comment. +--- a/src/3rdparty/chromium/third_party/google-closure-library/closure/bin/build/depstree.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/google-closure-library/closure/bin/build/depstree.py 2025-01-16 02:26:08.585929635 +0800 +@@ -168,7 +168,7 @@ + self._sources = sources + + def __str__(self): +- source_strs = map(str, self._sources) ++ source_strs = list(map(str, self._sources)) + + return ('Namespace "%s" provided more than once in sources:\n%s\n' % + (self._namespace, '\n'.join(source_strs))) +--- a/src/3rdparty/chromium/third_party/google-closure-library/closure/bin/build/depswriter_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/google-closure-library/closure/bin/build/depswriter_test.py 2025-01-16 02:26:08.585929635 +0800 +@@ -51,7 +51,7 @@ + + def testMakeDepsFileUnicode(self): + sources = {} +- sources['test.js'] = MockSource([u'A'], [u'B', u'C']) ++ sources['test.js'] = MockSource(['A'], ['B', 'C']) + deps = depswriter.MakeDepsFile(sources) + + self.assertEqual( +--- a/src/3rdparty/chromium/third_party/google-closure-library/scripts/http/simple_http_server.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/google-closure-library/scripts/http/simple_http_server.py 2025-01-16 02:26:08.585929635 +0800 +@@ -16,29 +16,29 @@ + """Simple HTTP server. + """ + +-import SimpleHTTPServer +-import SocketServer ++import http.server ++import socketserver + + PORT = 8080 + + + # Simple server to respond to both POST and GET requests. POST requests will + # just respond as normal GETs. +-class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): ++class ServerHandler(http.server.SimpleHTTPRequestHandler): + + def do_GET(self): +- SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self) ++ http.server.SimpleHTTPRequestHandler.do_GET(self) + + def do_POST(self): +- SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self) ++ http.server.SimpleHTTPRequestHandler.do_GET(self) + + + Handler = ServerHandler + + # Allows use to restart server immediately after restarting it. +-SocketServer.ThreadingTCPServer.allow_reuse_address = True ++socketserver.ThreadingTCPServer.allow_reuse_address = True + +-httpd = SocketServer.TCPServer(("", PORT), Handler) ++httpd = socketserver.TCPServer(("", PORT), Handler) + +-print "Serving at: http://%s:%s" % ("localhost", PORT) ++print("Serving at: http://%s:%s" % ("localhost", PORT)) + httpd.serve_forever() +--- a/src/3rdparty/chromium/third_party/googletest/src/googlemock/scripts/fuse_gmock_files.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/googletest/src/googlemock/scripts/fuse_gmock_files.py 2025-01-16 02:26:08.585929635 +0800 +@@ -232,7 +232,7 @@ + # fuse_gmock_files.py GMOCK_ROOT_DIR OUTPUT_DIR + FuseGMock(sys.argv[1], sys.argv[2]) + else: +- print __doc__ ++ print(__doc__) + sys.exit(1) + + +--- a/src/3rdparty/chromium/third_party/googletest/src/googlemock/scripts/pump.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/googletest/src/googlemock/scripts/pump.py 2025-01-16 02:26:08.585929635 +0800 +@@ -62,7 +62,7 @@ + EXPRESSION has Python syntax. + """ + +-from __future__ import print_function ++ + + import io + import os +@@ -844,10 +844,10 @@ + print(output_str,) + else: + output_file = io.open(output_file_path, 'w') +- output_file.write(u'// This file was GENERATED by command:\n') +- output_file.write(u'// %s %s\n' % ++ output_file.write('// This file was GENERATED by command:\n') ++ output_file.write('// %s %s\n' % + (os.path.basename(__file__), os.path.basename(file_path))) +- output_file.write(u'// DO NOT EDIT BY HAND!!!\n\n') ++ output_file.write('// DO NOT EDIT BY HAND!!!\n\n') + output_file.write(output_str) + output_file.close() + +--- a/src/3rdparty/chromium/third_party/googletest/src/googlemock/scripts/generator/cpp/ast.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/googletest/src/googlemock/scripts/generator/cpp/ast.py 2025-01-16 02:26:08.585929635 +0800 +@@ -34,7 +34,7 @@ + import builtins + except ImportError: + # Python 2.x +- import __builtin__ as builtins ++ import builtins as builtins + + import sys + import traceback +@@ -53,10 +53,10 @@ + if not hasattr(builtins, 'next'): + # Support Python 2.5 and earlier. + def next(obj): +- return obj.next() ++ return obj.__next__() + + +-VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3) ++VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = list(range(3)) + + FUNCTION_NONE = 0x00 + FUNCTION_CONST = 0x01 +@@ -1730,7 +1730,7 @@ + try: + for node in builder.Generate(): + if should_print(node): +- print(node.name) ++ print((node.name)) + except KeyboardInterrupt: + return + except: +@@ -1754,10 +1754,10 @@ + if source is None: + continue + +- print('Processing %s' % filename) ++ print(('Processing %s' % filename)) + builder = BuilderFromSource(source, filename) + try: +- entire_ast = filter(None, builder.Generate()) ++ entire_ast = [_f for _f in builder.Generate() if _f] + except KeyboardInterrupt: + return + except: +--- a/src/3rdparty/chromium/third_party/googletest/src/googlemock/scripts/generator/cpp/gmock_class.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/googletest/src/googlemock/scripts/generator/cpp/gmock_class.py 2025-01-16 02:26:08.585929635 +0800 +@@ -163,7 +163,7 @@ + # so we have to make up names here. + # TODO(paulchang): Handle non-type template arguments (e.g. + # template). +- template_arg_count = len(class_node.templated_types.keys()) ++ template_arg_count = len(list(class_node.templated_types.keys())) + template_args = ['T%d' % n for n in range(template_arg_count)] + template_decls = ['typename ' + arg for arg in template_args] + lines.append('template <' + ', '.join(template_decls) + '>') +@@ -230,7 +230,7 @@ + + builder = ast.BuilderFromSource(source, filename) + try: +- entire_ast = filter(None, builder.Generate()) ++ entire_ast = [_f for _f in builder.Generate() if _f] + except KeyboardInterrupt: + return + except: +--- a/src/3rdparty/chromium/third_party/googletest/src/googlemock/scripts/generator/cpp/keywords.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/googletest/src/googlemock/scripts/generator/cpp/keywords.py 2025-01-16 02:26:08.585929635 +0800 +@@ -22,7 +22,7 @@ + import builtins + except ImportError: + # Python 2.x +- import __builtin__ as builtins ++ import builtins as builtins + + + if not hasattr(builtins, 'set'): +--- a/src/3rdparty/chromium/third_party/googletest/src/googlemock/scripts/generator/cpp/tokenize.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/googletest/src/googlemock/scripts/generator/cpp/tokenize.py 2025-01-16 02:26:08.585929635 +0800 +@@ -22,7 +22,7 @@ + import builtins + except ImportError: + # Python 2.x +- import __builtin__ as builtins ++ import builtins as builtins + + + import sys +@@ -55,7 +55,7 @@ + + # Where the token originated from. This can be used for backtracking. + # It is always set to WHENCE_STREAM in this code. +-WHENCE_STREAM, WHENCE_QUEUE = range(2) ++WHENCE_STREAM, WHENCE_QUEUE = list(range(2)) + + + class Token(object): +@@ -276,7 +276,7 @@ + continue + + for token in GetTokens(source): +- print('%-12s: %s' % (token.token_type, token.name)) ++ print(('%-12s: %s' % (token.token_type, token.name))) + # print('\r%6.2f%%' % (100.0 * index / token.end),) + sys.stdout.write('\n') + +--- a/src/3rdparty/chromium/third_party/googletest/src/googlemock/scripts/generator/cpp/utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/googletest/src/googlemock/scripts/generator/cpp/utils.py 2025-01-16 02:26:08.585929635 +0800 +@@ -33,5 +33,5 @@ + fp.close() + except IOError: + if print_error: +- print('Error reading %s: %s' % (filename, sys.exc_info()[1])) ++ print(('Error reading %s: %s' % (filename, sys.exc_info()[1]))) + return None +--- a/src/3rdparty/chromium/third_party/googletest/src/googletest/scripts/fuse_gtest_files.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/googletest/src/googletest/scripts/fuse_gtest_files.py 2025-01-16 02:26:08.585929635 +0800 +@@ -93,8 +93,8 @@ + """ + + if not os.path.isfile(os.path.join(directory, relative_path)): +- print('ERROR: Cannot find %s in directory %s.' % (relative_path, +- directory)) ++ print(('ERROR: Cannot find %s in directory %s.' % (relative_path, ++ directory))) + print('Please either specify a valid project root directory ' + 'or omit it on the command line.') + sys.exit(1) +@@ -122,8 +122,8 @@ + # TODO(wan@google.com): The following user-interaction doesn't + # work with automated processes. We should provide a way for the + # Makefile to force overwriting the files. +- print('%s already exists in directory %s - overwrite it? (y/N) ' % +- (relative_path, output_dir)) ++ print(('%s already exists in directory %s - overwrite it? (y/N) ' % ++ (relative_path, output_dir))) + answer = sys.stdin.readline().strip() + if answer not in ['y', 'Y']: + print('ABORTED.') +--- a/src/3rdparty/chromium/third_party/googletest/src/googletest/scripts/gen_gtest_pred_impl.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/googletest/src/googletest/scripts/gen_gtest_pred_impl.py 2025-01-16 02:26:08.585929635 +0800 +@@ -184,7 +184,7 @@ + def OneTo(n): + """Returns the list [1, 2, 3, ..., n].""" + +- return range(1, n + 1) ++ return list(range(1, n + 1)) + + + def Iter(n, format, sep=''): +@@ -308,12 +308,12 @@ + """Given a file path and a content string + overwrites it with the given content. + """ +- print 'Updating file %s . . .' % path ++ print('Updating file %s . . .' % path) + f = file(path, 'w+') +- print >>f, content, ++ print(content, end=' ', file=f) + f.close() + +- print 'File %s has been updated.' % path ++ print('File %s has been updated.' % path) + + + def GenerateHeader(n): +@@ -720,8 +720,8 @@ + unit test.""" + + if len(sys.argv) != 2: +- print __doc__ +- print 'Author: ' + __author__ ++ print(__doc__) ++ print('Author: ' + __author__) + sys.exit(1) + + n = int(sys.argv[1]) +--- a/src/3rdparty/chromium/third_party/googletest/src/googletest/scripts/release_docs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/googletest/src/googletest/scripts/release_docs.py 2025-01-16 02:26:08.585929635 +0800 +@@ -127,11 +127,11 @@ + def BranchFiles(self): + """Branches the .wiki files needed to be branched.""" + +- print 'Branching %d .wiki files:' % (len(self.files_to_branch),) ++ print('Branching %d .wiki files:' % (len(self.files_to_branch),)) + os.chdir(self.wiki_dir) + for f in self.files_to_branch: + command = 'svn cp %s %s%s' % (f, self.version_prefix, f) +- print command ++ print(command) + os.system(command) + + def UpdateLinksInBranchedFiles(self): +@@ -139,7 +139,7 @@ + for f in self.files_to_branch: + source_file = os.path.join(self.wiki_dir, f) + versioned_file = os.path.join(self.wiki_dir, self.version_prefix + f) +- print 'Updating links in %s.' % (versioned_file,) ++ print('Updating links in %s.' % (versioned_file,)) + text = file(source_file, 'r').read() + new_text = self.search_for_re.sub(self.replace_with, text) + file(versioned_file, 'w').write(new_text) +--- a/src/3rdparty/chromium/third_party/googletest/src/googletest/scripts/upload.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/googletest/src/googletest/scripts/upload.py 2025-01-16 02:26:08.587012950 +0800 +@@ -46,7 +46,7 @@ + # This code is derived from appcfg.py in the App Engine SDK (open source), + # and from ASPN recipe #146306. + +-import cookielib ++import http.cookiejar + import getpass + import logging + import md5 +@@ -57,9 +57,9 @@ + import socket + import subprocess + import sys +-import urllib +-import urllib2 +-import urlparse ++import urllib.request, urllib.parse, urllib.error ++import urllib.request, urllib.error, urllib.parse ++import urllib.parse + + try: + import readline +@@ -94,15 +94,15 @@ + last_email = last_email_file.readline().strip("\n") + last_email_file.close() + prompt += " [%s]" % last_email +- except IOError, e: ++ except IOError as e: + pass +- email = raw_input(prompt + ": ").strip() ++ email = input(prompt + ": ").strip() + if email: + try: + last_email_file = open(last_email_file_name, "w") + last_email_file.write(email) + last_email_file.close() +- except IOError, e: ++ except IOError as e: + pass + else: + email = last_email +@@ -118,20 +118,20 @@ + msg: The string to print. + """ + if verbosity > 0: +- print msg ++ print(msg) + + + def ErrorExit(msg): + """Print an error message to stderr and exit.""" +- print >>sys.stderr, msg ++ print(msg, file=sys.stderr) + sys.exit(1) + + +-class ClientLoginError(urllib2.HTTPError): ++class ClientLoginError(urllib.error.HTTPError): + """Raised to indicate there was an error authenticating with ClientLogin.""" + + def __init__(self, url, code, msg, headers, args): +- urllib2.HTTPError.__init__(self, url, code, msg, headers, None) ++ urllib.error.HTTPError.__init__(self, url, code, msg, headers, None) + self.args = args + self.reason = args["Error"] + +@@ -177,10 +177,10 @@ + def _CreateRequest(self, url, data=None): + """Creates a new urllib request.""" + logging.debug("Creating request for: '%s' with payload:\n%s", url, data) +- req = urllib2.Request(url, data=data) ++ req = urllib.request.Request(url, data=data) + if self.host_override: + req.add_header("Host", self.host_override) +- for key, value in self.extra_headers.iteritems(): ++ for key, value in self.extra_headers.items(): + req.add_header(key, value) + return req + +@@ -204,7 +204,7 @@ + account_type = "HOSTED" + req = self._CreateRequest( + url="https://www.google.com/accounts/ClientLogin", +- data=urllib.urlencode({ ++ data=urllib.parse.urlencode({ + "Email": email, + "Passwd": password, + "service": "ah", +@@ -218,7 +218,7 @@ + response_dict = dict(x.split("=") + for x in response_body.split("\n") if x) + return response_dict["Auth"] +- except urllib2.HTTPError, e: ++ except urllib.error.HTTPError as e: + if e.code == 403: + body = e.read() + response_dict = dict(x.split("=", 1) for x in body.split("\n") if x) +@@ -240,14 +240,14 @@ + continue_location = "http://localhost/" + args = {"continue": continue_location, "auth": auth_token} + req = self._CreateRequest("http://%s/_ah/login?%s" % +- (self.host, urllib.urlencode(args))) ++ (self.host, urllib.parse.urlencode(args))) + try: + response = self.opener.open(req) +- except urllib2.HTTPError, e: ++ except urllib.error.HTTPError as e: + response = e + if (response.code != 302 or + response.info()["location"] != continue_location): +- raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, ++ raise urllib.error.HTTPError(req.get_full_url(), response.code, response.msg, + response.headers, response.fp) + self.authenticated = True + +@@ -270,34 +270,34 @@ + credentials = self.auth_function() + try: + auth_token = self._GetAuthToken(credentials[0], credentials[1]) +- except ClientLoginError, e: ++ except ClientLoginError as e: + if e.reason == "BadAuthentication": +- print >>sys.stderr, "Invalid username or password." ++ print("Invalid username or password.", file=sys.stderr) + continue + if e.reason == "CaptchaRequired": +- print >>sys.stderr, ( ++ print(( + "Please go to\n" + "https://www.google.com/accounts/DisplayUnlockCaptcha\n" +- "and verify you are a human. Then try again.") ++ "and verify you are a human. Then try again."), file=sys.stderr) + break + if e.reason == "NotVerified": +- print >>sys.stderr, "Account not verified." ++ print("Account not verified.", file=sys.stderr) + break + if e.reason == "TermsNotAgreed": +- print >>sys.stderr, "User has not agreed to TOS." ++ print("User has not agreed to TOS.", file=sys.stderr) + break + if e.reason == "AccountDeleted": +- print >>sys.stderr, "The user account has been deleted." ++ print("The user account has been deleted.", file=sys.stderr) + break + if e.reason == "AccountDisabled": +- print >>sys.stderr, "The user account has been disabled." ++ print("The user account has been disabled.", file=sys.stderr) + break + if e.reason == "ServiceDisabled": +- print >>sys.stderr, ("The user's access to the service has been " +- "disabled.") ++ print(("The user's access to the service has been " ++ "disabled."), file=sys.stderr) + break + if e.reason == "ServiceUnavailable": +- print >>sys.stderr, "The service is not available; try again later." ++ print("The service is not available; try again later.", file=sys.stderr) + break + raise + self._GetAuthCookie(auth_token) +@@ -334,7 +334,7 @@ + args = dict(kwargs) + url = "http://%s%s" % (self.host, request_path) + if args: +- url += "?" + urllib.urlencode(args) ++ url += "?" + urllib.parse.urlencode(args) + req = self._CreateRequest(url=url, data=payload) + req.add_header("Content-Type", content_type) + try: +@@ -342,7 +342,7 @@ + response = f.read() + f.close() + return response +- except urllib2.HTTPError, e: ++ except urllib.error.HTTPError as e: + if tries > 3: + raise + elif e.code == 401: +@@ -372,35 +372,35 @@ + Returns: + A urllib2.OpenerDirector object. + """ +- opener = urllib2.OpenerDirector() +- opener.add_handler(urllib2.ProxyHandler()) +- opener.add_handler(urllib2.UnknownHandler()) +- opener.add_handler(urllib2.HTTPHandler()) +- opener.add_handler(urllib2.HTTPDefaultErrorHandler()) +- opener.add_handler(urllib2.HTTPSHandler()) ++ opener = urllib.request.OpenerDirector() ++ opener.add_handler(urllib.request.ProxyHandler()) ++ opener.add_handler(urllib.request.UnknownHandler()) ++ opener.add_handler(urllib.request.HTTPHandler()) ++ opener.add_handler(urllib.request.HTTPDefaultErrorHandler()) ++ opener.add_handler(urllib.request.HTTPSHandler()) + opener.add_handler(urllib2.HTTPErrorProcessor()) + if self.save_cookies: + self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies") +- self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file) ++ self.cookie_jar = http.cookiejar.MozillaCookieJar(self.cookie_file) + if os.path.exists(self.cookie_file): + try: + self.cookie_jar.load() + self.authenticated = True + StatusUpdate("Loaded authentication cookies from %s" % + self.cookie_file) +- except (cookielib.LoadError, IOError): ++ except (http.cookiejar.LoadError, IOError): + # Failed to load cookies - just ignore them. + pass + else: + # Create an empty cookie file with mode 600 +- fd = os.open(self.cookie_file, os.O_CREAT, 0600) ++ fd = os.open(self.cookie_file, os.O_CREAT, 0o600) + os.close(fd) + # Always chmod the cookie file +- os.chmod(self.cookie_file, 0600) ++ os.chmod(self.cookie_file, 0o600) + else: + # Don't save cookies across runs of update.py. +- self.cookie_jar = cookielib.CookieJar() +- opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar)) ++ self.cookie_jar = http.cookiejar.CookieJar() ++ opener.add_handler(urllib.request.HTTPCookieProcessor(self.cookie_jar)) + return opener + + +@@ -575,7 +575,7 @@ + line = p.stdout.readline() + if not line: + break +- print line.strip("\n") ++ print(line.strip("\n")) + output_array.append(line) + output = "".join(output_array) + else: +@@ -583,7 +583,7 @@ + p.wait() + errout = p.stderr.read() + if print_output and errout: +- print >>sys.stderr, errout ++ print(errout, file=sys.stderr) + p.stdout.close() + p.stderr.close() + return output, p.returncode +@@ -629,11 +629,11 @@ + """Show an "are you sure?" prompt if there are unknown files.""" + unknown_files = self.GetUnknownFiles() + if unknown_files: +- print "The following files are not added to version control:" ++ print("The following files are not added to version control:") + for line in unknown_files: +- print line ++ print(line) + prompt = "Are you sure to continue?(y/N) " +- answer = raw_input(prompt).strip() ++ answer = input(prompt).strip() + if answer != "y": + ErrorExit("User aborted") + +@@ -685,13 +685,13 @@ + else: + type = "current" + if len(content) > MAX_UPLOAD_SIZE: +- print ("Not uploading the %s file for %s because it's too large." % +- (type, filename)) ++ print(("Not uploading the %s file for %s because it's too large." % ++ (type, filename))) + file_too_large = True + content = "" + checksum = md5.new(content).hexdigest() + if options.verbose > 0 and not file_too_large: +- print "Uploading %s file for %s" % (type, filename) ++ print("Uploading %s file for %s" % (type, filename)) + url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id) + form_fields = [("filename", filename), + ("status", status), +@@ -713,7 +713,7 @@ + + patches = dict() + [patches.setdefault(v, k) for k, v in patch_list] +- for filename in patches.keys(): ++ for filename in list(patches.keys()): + base_content, new_content, is_binary, status = files[filename] + file_id_str = patches.get(filename) + if file_id_str.find("nobase") != -1: +@@ -770,8 +770,8 @@ + words = line.split() + if len(words) == 2 and words[0] == "URL:": + url = words[1] +- scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) +- username, netloc = urllib.splituser(netloc) ++ scheme, netloc, path, params, query, fragment = urllib.parse.urlparse(url) ++ username, netloc = urllib.parse.splituser(netloc) + if username: + logging.info("Removed username from base URL") + if netloc.endswith("svn.python.org"): +@@ -789,12 +789,12 @@ + logging.info("Guessed CollabNet base = %s", base) + elif netloc.endswith(".googlecode.com"): + path = path + "/" +- base = urlparse.urlunparse(("http", netloc, path, params, ++ base = urllib.parse.urlunparse(("http", netloc, path, params, + query, fragment)) + logging.info("Guessed Google Code base = %s", base) + else: + path = path + "/" +- base = urlparse.urlunparse((scheme, netloc, path, params, ++ base = urllib.parse.urlunparse((scheme, netloc, path, params, + query, fragment)) + logging.info("Guessed base = %s", base) + return base +@@ -1202,8 +1202,8 @@ + rv = [] + for patch in patches: + if len(patch[1]) > MAX_UPLOAD_SIZE: +- print ("Not uploading the patch for " + patch[0] + +- " because the file is too large.") ++ print(("Not uploading the patch for " + patch[0] + ++ " because the file is too large.")) + continue + form_fields = [("filename", patch[0])] + if not options.download_base: +@@ -1211,7 +1211,7 @@ + files = [("data", "data.diff", patch[1])] + ctype, body = EncodeMultipartFormData(form_fields, files) + url = "/%d/upload_patch/%d" % (int(issue), int(patchset)) +- print "Uploading patch for " + patch[0] ++ print("Uploading patch for " + patch[0]) + response_body = rpc_server.Send(url, body, content_type=ctype) + lines = response_body.splitlines() + if not lines or lines[0] != "OK": +@@ -1238,7 +1238,8 @@ + out, returncode = RunShellWithReturnCode(["hg", "root"]) + if returncode == 0: + return MercurialVCS(options, out.strip()) +- except OSError, (errno, message): ++ except OSError as xxx_todo_changeme: ++ (errno, message) = xxx_todo_changeme.args + if errno != 2: # ENOENT -- they don't have hg installed. + raise + +@@ -1254,7 +1255,8 @@ + "--is-inside-work-tree"]) + if returncode == 0: + return GitVCS(options) +- except OSError, (errno, message): ++ except OSError as xxx_todo_changeme1: ++ (errno, message) = xxx_todo_changeme1.args + if errno != 2: # ENOENT -- they don't have git installed. + raise + +@@ -1301,12 +1303,12 @@ + data = vcs.GenerateDiff(args) + files = vcs.GetBaseFiles(data) + if verbosity >= 1: +- print "Upload server:", options.server, "(change with -s/--server)" ++ print("Upload server:", options.server, "(change with -s/--server)") + if options.issue: + prompt = "Message describing this patch set: " + else: + prompt = "New issue subject: " +- message = options.message or raw_input(prompt).strip() ++ message = options.message or input(prompt).strip() + if not message: + ErrorExit("A non-empty message is required") + rpc_server = GetRpcServer(options) +@@ -1339,7 +1341,7 @@ + # Send a hash of all the base file so the server can determine if a copy + # already exists in an earlier patchset. + base_hashes = "" +- for file, info in files.iteritems(): ++ for file, info in files.items(): + if not info[0] is None: + checksum = md5.new(info[0]).hexdigest() + if base_hashes: +@@ -1353,7 +1355,7 @@ + if not options.download_base: + form_fields.append(("content_upload", "1")) + if len(data) > MAX_UPLOAD_SIZE: +- print "Patch is large, so uploading file patches separately." ++ print("Patch is large, so uploading file patches separately.") + uploaded_diff_file = [] + form_fields.append(("separate_patches", "1")) + else: +@@ -1393,7 +1395,7 @@ + try: + RealMain(sys.argv) + except KeyboardInterrupt: +- print ++ print() + StatusUpdate("Interrupted.") + sys.exit(1) + +--- a/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/check-c-linkage-decls.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/check-c-linkage-decls.py 2025-01-16 02:26:08.587012950 +0800 +@@ -14,13 +14,13 @@ + for x in HBHEADERS: + with open (x, 'r', encoding='utf-8') as f: content = f.read () + if ('HB_BEGIN_DECLS' not in content) or ('HB_END_DECLS' not in content): +- print ('Ouch, file %s does not have HB_BEGIN_DECLS / HB_END_DECLS, but it should' % x) ++ print(('Ouch, file %s does not have HB_BEGIN_DECLS / HB_END_DECLS, but it should' % x)) + stat = 1 + + for x in HBSOURCES: + with open (x, 'r', encoding='utf-8') as f: content = f.read () + if ('HB_BEGIN_DECLS' in content) or ('HB_END_DECLS' in content): +- print ('Ouch, file %s has HB_BEGIN_DECLS / HB_END_DECLS, but it shouldn\'t' % x) ++ print(('Ouch, file %s has HB_BEGIN_DECLS / HB_END_DECLS, but it shouldn\'t' % x)) + stat = 1 + + sys.exit (stat) +--- a/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/check-externs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/check-externs.py 2025-01-16 02:26:08.587012950 +0800 +@@ -14,7 +14,7 @@ + with open (x, 'r', encoding='utf-8') as f: content = f.read () + for s in re.findall (r'\n.+\nhb_.+\n', content): + if not s.startswith ('\nHB_EXTERN '): +- print ('failure on:', s) ++ print(('failure on:', s)) + stat = 1 + + sys.exit (stat) +--- a/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/check-header-guards.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/check-header-guards.py 2025-01-16 02:26:08.587012950 +0800 +@@ -16,7 +16,7 @@ + tag = x.upper ().replace ('.', '_').replace ('-', '_') + with open (x, 'r', encoding='utf-8') as f: content = f.read () + if len (re.findall (tag + r'\b', content)) != 3: +- print ('Ouch, header file %s does not have correct preprocessor guards' % x) ++ print(('Ouch, header file %s does not have correct preprocessor guards' % x)) + stat = 1 + + sys.exit (stat) +--- a/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/check-includes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/check-includes.py 2025-01-16 02:26:08.587012950 +0800 +@@ -17,7 +17,7 @@ + with open (x, 'r', encoding='utf-8') as f: content = f.read () + first = re.findall (r'#.*include.*', content)[0] + if first not in ['#include "hb.h"', '#include "hb-common.h"']: +- print ('failure on %s' % x) ++ print(('failure on %s' % x)) + stat = 1 + + print ('Checking that source files #include a private header first (or none)') +@@ -26,14 +26,14 @@ + includes = re.findall (r'#.*include.*', content) + if includes: + if not len (re.findall (r'"hb.*\.hh"', includes[0])): +- print ('failure on %s' % x) ++ print(('failure on %s' % x)) + stat = 1 + + print ('Checking that there is no #include ') + for x in HBHEADERS + HBSOURCES: + with open (x, 'r', encoding='utf-8') as f: content = f.read () + if re.findall ('#.*include.*<.*hb', content): +- print ('failure on %s' % x) ++ print(('failure on %s' % x)) + stat = 1 + + sys.exit (stat) +--- a/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/check-libstdc++.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/check-libstdc++.py 2025-01-16 02:26:08.587012950 +0800 +@@ -26,10 +26,10 @@ + so = os.path.join (libs, 'lib%s.%s' % (soname, suffix)) + if not os.path.exists (so): continue + +- print ('Checking that we are not linking to libstdc++ or libc++ in %s' % so) ++ print(('Checking that we are not linking to libstdc++ or libc++ in %s' % so)) + ldd_result = subprocess.check_output (ldd + [so]) + if (b'libstdc++' in ldd_result) or (b'libc++' in ldd_result): +- print ('Ouch, %s is linked to libstdc++ or libc++' % so) ++ print(('Ouch, %s is linked to libstdc++ or libc++' % so)) + stat = 1 + + tested = True +--- a/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/check-static-inits.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/check-static-inits.py 2025-01-16 02:26:08.587012950 +0800 +@@ -27,12 +27,12 @@ + # Checking that no object file has static initializers + for l in re.findall (r'^.*\.[cd]tors.*$', result, re.MULTILINE): + if not re.match (r'.*\b0+\b', l): +- print ('Ouch, %s has static initializers/finalizers' % obj) ++ print(('Ouch, %s has static initializers/finalizers' % obj)) + stat = 1 + + # Checking that no object file has lazy static C++ constructors/destructors or other such stuff + if ('__cxa_' in result) and ('__ubsan_handle' not in result): +- print ('Ouch, %s has lazy static C++ constructors/destructors or other such stuff' % obj) ++ print(('Ouch, %s has lazy static C++ constructors/destructors or other such stuff' % obj)) + stat = 1 + + sys.exit (stat) +--- a/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/check-symbols.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/check-symbols.py 2025-01-16 02:26:08.587012950 +0800 +@@ -41,17 +41,17 @@ + + prefix = (symprefix + os.path.basename (so)).replace ('libharfbuzz', 'hb').replace ('-', '_').split ('.')[0] + +- print ('Checking that %s does not expose internal symbols' % so) ++ print(('Checking that %s does not expose internal symbols' % so)) + suspicious_symbols = [x for x in EXPORTED_SYMBOLS if not re.match (r'^%s(_|$)' % prefix, x)] + if suspicious_symbols: +- print ('Ouch, internal symbols exposed:', suspicious_symbols) ++ print(('Ouch, internal symbols exposed:', suspicious_symbols)) + stat = 1 + + def_path = os.path.join (builddir, soname + '.def') + if not os.path.exists (def_path): +- print ('\'%s\' not found; skipping' % def_path) ++ print(('\'%s\' not found; skipping' % def_path)) + else: +- print ('Checking that %s has the same symbol list as %s' % (so, def_path)) ++ print(('Checking that %s has the same symbol list as %s' % (so, def_path))) + with open (def_path, 'r', encoding='utf-8') as f: def_file = f.read () + diff_result = list (difflib.context_diff ( + def_file.splitlines (), +@@ -61,7 +61,7 @@ + )) + + if diff_result: +- print ('\n'.join (diff_result)) ++ print(('\n'.join (diff_result))) + stat = 1 + + tested = True +--- a/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/gen-emoji-table.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/gen-emoji-table.py 2025-01-16 02:26:08.587012950 +0800 +@@ -47,7 +47,7 @@ + print (" * on file with this header:") + print (" *") + for l in header: +- print (" * %s" % (l.strip())) ++ print((" * %s" % (l.strip()))) + print (" */") + print () + print ("#ifndef HB_UNICODE_EMOJI_TABLE_HH") +@@ -56,7 +56,7 @@ + print ('#include "hb-unicode.hh"') + print () + +-for typ, s in ranges.items(): ++for typ, s in list(ranges.items()): + if typ != "Extended_Pictographic": continue + + arr = dict() +--- a/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/gen-os2-unicode-ranges.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/gen-os2-unicode-ranges.py 2025-01-16 02:26:08.587012950 +0800 +@@ -45,6 +45,6 @@ + end = ("0x%X" % ranges[1]).rjust(8) + bit = ("%s" % ranges[2]).rjust(3) + +- print (" {%s, %s, %s}, // %s" % (start, end, bit, ranges[3])) ++ print((" {%s, %s, %s}, // %s" % (start, end, bit, ranges[3]))) + + print ("""};""") +--- a/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/gen-ucd-table.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/gen-ucd-table.py 2025-01-16 02:26:08.587012950 +0800 +@@ -37,15 +37,15 @@ + if u['dm'] != '#' and u['dt'] == 'can' and not (0xAC00 <= i < 0xAC00+11172)} + ce = {i for i,u in enumerate(ucd) if u['Comp_Ex'] == 'Y'} + +-assert not any(v for v in dm.values() if len(v) not in (1,2)) +-dm1 = sorted(set(v for v in dm.values() if len(v) == 1)) ++assert not any(v for v in list(dm.values()) if len(v) not in (1,2)) ++dm1 = sorted(set(v for v in list(dm.values()) if len(v) == 1)) + assert all((v[0] >> 16) in (0,2) for v in dm1) + dm1_p0_array = ['0x%04Xu' % (v[0] & 0xFFFF) for v in dm1 if (v[0] >> 16) == 0] + dm1_p2_array = ['0x%04Xu' % (v[0] & 0xFFFF) for v in dm1 if (v[0] >> 16) == 2] + dm1_order = {v:i+1 for i,v in enumerate(dm1)} + + dm2 = sorted((v+(i if i not in ce and not ccc[i] else 0,), v) +- for i,v in dm.items() if len(v) == 2) ++ for i,v in list(dm.items()) if len(v) == 2) + + filt = lambda v: ((v[0] & 0xFFFFF800) == 0x0000 and + (v[1] & 0xFFFFFF80) == 0x0300 and +@@ -95,7 +95,7 @@ + print(" *") + print(" * ./gen-ucd-table.py ucd.nounihan.grouped.xml") + print(" *") +-print(" * on file with this description:", ucdxml.description) ++print((" * on file with this description:", ucdxml.description)) + print(" */") + print() + print("#ifndef HB_UCD_TABLE_HH") +--- a/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/gen-vowel-constraints.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/gen-vowel-constraints.py 2025-01-16 02:26:08.587012950 +0800 +@@ -110,7 +110,7 @@ + cases = collections.defaultdict (set) + for first, rest in sorted (self._c.items ()): + cases[rest.__str__ (index + 1, depth + 2)].add (first) +- for body, labels in sorted (cases.items (), key=lambda b_ls: sorted (b_ls[1])[0]): ++ for body, labels in sorted (list(cases.items ()), key=lambda b_ls: sorted (b_ls[1])[0]): + for i, cp in enumerate (sorted (labels)): + if i % 4 == 0: + s.append (self._indent (depth + 1)) +@@ -150,15 +150,15 @@ + print ('/*') + print (' * The following functions are generated by running:') + print (' *') +-print (' * %s ms-use/IndicShapingInvalidCluster.txt Scripts.txt' % sys.argv[0]) ++print((' * %s ms-use/IndicShapingInvalidCluster.txt Scripts.txt' % sys.argv[0])) + print (' *') + print (' * on files with these headers:') + print (' *') + for line in constraints_header: +- print (' * %s' % line.strip ()) ++ print((' * %s' % line.strip ())) + print (' *') + for line in scripts_header: +- print (' * %s' % line.strip ()) ++ print((' * %s' % line.strip ())) + print (' */') + + print () +@@ -206,8 +206,8 @@ + print (' switch ((unsigned) buffer->props.script)') + print (' {') + +-for script, constraints in sorted (constraints.items (), key=lambda s_c: script_order[s_c[0]]): +- print (' case HB_SCRIPT_{}:'.format (script.upper ())) ++for script, constraints in sorted (list(constraints.items ()), key=lambda s_c: script_order[s_c[0]]): ++ print((' case HB_SCRIPT_{}:'.format (script.upper ()))) + print (' for (buffer->idx = 0; buffer->idx + 1 < count && buffer->successful;)') + print (' {') + print ('\tbool matched = false;') +--- a/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/sample.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/harfbuzz-ng/src/src/sample.py 2025-01-16 02:26:08.587012950 +0800 +@@ -60,4 +60,4 @@ + x_offset = pos.x_offset + y_offset = pos.y_offset + +- print ("gid%d=%d@%d,%d+%d" % (gid, cluster, x_advance, x_offset, y_offset)) ++ print(("gid%d=%d@%d,%d+%d" % (gid, cluster, x_advance, x_offset, y_offset))) +--- a/src/3rdparty/chromium/third_party/hyphenation-patterns/src/de/create_chr.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/hyphenation-patterns/src/de/create_chr.py 2025-01-16 02:26:08.587012950 +0800 +@@ -31,6 +31,6 @@ + for c in sorted(all_chars): + # Since Android uses the chr files to map uppercase to lowercase, + # map lowercase sharp s to uppercase sharp s instead of SS. +- uppercase = u'\u1E9E' if c == u'\u00DF' else c.upper() ++ uppercase = '\u1E9E' if c == '\u00DF' else c.upper() + chr_file.write('%s%s\n' % (c, uppercase)) + +--- a/src/3rdparty/chromium/third_party/icu/scripts/make_data_assembly.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/icu/scripts/make_data_assembly.py 2025-01-16 02:26:08.587012950 +0800 +@@ -99,4 +99,4 @@ + + output.write("\n") + output.close() +-print "Generated " + output_file ++print("Generated " + output_file) +--- a/src/3rdparty/chromium/third_party/icu/source/data/BUILDRULES.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/icu/source/data/BUILDRULES.py 2025-01-16 02:26:08.587012950 +0800 +@@ -3,7 +3,7 @@ + + # Python 2/3 Compatibility (ICU-20299) + # TODO(ICU-20301): Remove this. +-from __future__ import print_function ++ + + from icutools.databuilder import * + from icutools.databuilder import utils +--- a/src/3rdparty/chromium/third_party/icu/source/python/icutools/databuilder/__main__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/icu/source/python/icutools/databuilder/__main__.py 2025-01-16 02:26:08.587012950 +0800 +@@ -3,7 +3,7 @@ + + # Python 2/3 Compatibility (ICU-20299) + # TODO(ICU-20301): Remove this. +-from __future__ import print_function ++ + + import argparse + import glob as pyglob +--- a/src/3rdparty/chromium/third_party/icu/source/python/icutools/databuilder/filtration.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/icu/source/python/icutools/databuilder/filtration.py 2025-01-16 02:26:08.589179580 +0800 +@@ -3,7 +3,7 @@ + + # Python 2/3 Compatibility (ICU-20299) + # TODO(ICU-20301): Remove this. +-from __future__ import print_function ++ + + from abc import abstractmethod + from collections import defaultdict +@@ -353,7 +353,7 @@ + + new_requests = [] + i = 0 +- for rules, filter_files in unique_rules.items(): ++ for rules, filter_files in list(unique_rules.items()): + base_filter_file = filter_files[0] + new_requests += [ + PrintFileRequest( +@@ -406,7 +406,7 @@ + # Add the filter generation requests to the beginning so that by default + # they are made before genrb gets run (order is required by windirect) + new_requests = [] +- for filter_info in collected.values(): ++ for filter_info in list(collected.values()): + new_requests += filter_info.make_requests() + new_requests += all_requests + return new_requests +--- a/src/3rdparty/chromium/third_party/icu/source/python/icutools/databuilder/request_types.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/icu/source/python/icutools/databuilder/request_types.py 2025-01-16 02:26:08.589179580 +0800 +@@ -3,7 +3,7 @@ + + # Python 2/3 Compatibility (ICU-20299) + # TODO(ICU-20301): Remove this. +-from __future__ import print_function ++ + + from abc import abstractmethod + import copy +@@ -106,7 +106,7 @@ + + def _del_at(self, i): + del self.input_files[i] +- for _, v in self.format_with.items(): ++ for _, v in list(self.format_with.items()): + if isinstance(v, list): + assert len(v) == len(self.input_files) + 1 + del v[i] +@@ -177,7 +177,7 @@ + super(RepeatedExecutionRequest, self)._del_at(i) + del self.output_files[i] + del self.specific_dep_files[i] +- for _, v in self.repeat_with.items(): ++ for _, v in list(self.repeat_with.items()): + if isinstance(v, list): + del v[i] + +@@ -222,7 +222,7 @@ + def _del_at(self, i): + super(RepeatedOrSingleExecutionRequest, self)._del_at(i) + del self.output_files[i] +- for _, v in self.repeat_with.items(): ++ for _, v in list(self.repeat_with.items()): + if isinstance(v, list): + del v[i] + +--- a/src/3rdparty/chromium/third_party/icu/source/python/icutools/databuilder/utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/icu/source/python/icutools/databuilder/utils.py 2025-01-16 02:26:08.589179580 +0800 +@@ -3,7 +3,7 @@ + + # Python 2/3 Compatibility (ICU-20299) + # TODO(ICU-20301): Remove this. +-from __future__ import print_function ++ + + import sys + +@@ -46,7 +46,7 @@ + return LOCAL_DIRNAME_SUBSTITUTIONS[variable] + dirname[sep_idx:] + print( + "Error: Local directory must be absolute, or relative to one of: " + +- (", ".join("$%s" % v for v in LOCAL_DIRNAME_SUBSTITUTIONS.keys())), ++ (", ".join("$%s" % v for v in list(LOCAL_DIRNAME_SUBSTITUTIONS.keys()))), + file=sys.stderr + ) + exit(1) +@@ -76,13 +76,13 @@ + def repeated_execution_request_looper(request): + # dictionary of lists to list of dictionaries: + ld = [ +- dict(zip(request.repeat_with, t)) +- for t in zip(*request.repeat_with.values()) ++ dict(list(zip(request.repeat_with, t))) ++ for t in zip(*list(request.repeat_with.values())) + ] + if not ld: + # No special options given in repeat_with + ld = [{} for _ in range(len(request.input_files))] +- return zip(ld, request.specific_dep_files, request.input_files, request.output_files) ++ return list(zip(ld, request.specific_dep_files, request.input_files, request.output_files)) + + + def format_single_request_command(request, cmd_template, common_vars): +--- a/src/3rdparty/chromium/third_party/icu/source/python/icutools/databuilder/renderers/common_exec.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/icu/source/python/icutools/databuilder/renderers/common_exec.py 2025-01-16 02:26:08.589179580 +0800 +@@ -17,7 +17,7 @@ + for request in requests: + status = run_helper(request, common_vars, verbose=verbose, **kwargs) + if status != 0: +- print("!!! ERROR executing above command line: exit code %d" % status) ++ print(("!!! ERROR executing above command line: exit code %d" % status)) + return 1 + if verbose: + print("All data build commands executed") +@@ -43,7 +43,7 @@ + FILENAME = request.output_file.filename, + ) + if verbose: +- print("Printing to file: %s" % output_path) ++ print(("Printing to file: %s" % output_path)) + with open(output_path, "w") as f: + f.write(request.content) + return 0 +@@ -57,7 +57,7 @@ + FILENAME = request.output_file.filename, + ) + if verbose: +- print("Copying file to: %s" % output_path) ++ print(("Copying file to: %s" % output_path)) + shutil.copyfile(input_path, output_path) + return 0 + if isinstance(request, VariableRequest): +@@ -130,7 +130,7 @@ + os.environ["COMSPEC"] = 'powershell' + changed_windows_comspec = True + if verbose: +- print("Running: %s" % command_line) ++ print(("Running: %s" % command_line)) + returncode = subprocess.call( + command_line, + shell = True +--- a/src/3rdparty/chromium/third_party/icu/source/python/icutools/databuilder/renderers/makefile.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/icu/source/python/icutools/databuilder/renderers/makefile.py 2025-01-16 02:26:08.589179580 +0800 +@@ -3,7 +3,7 @@ + + # Python 2/3 Compatibility (ICU-20299) + # TODO(ICU-20301): Remove this. +-from __future__ import print_function ++ + + from . import * + from .. import * +--- a/src/3rdparty/chromium/third_party/icu/source/tools/icu-file-utf8-check.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/icu/source/tools/icu-file-utf8-check.py 2025-01-16 02:26:08.589179580 +0800 +@@ -23,7 +23,7 @@ + # Only files from the ICU github repository are checked. + # No changes are made to the repository; only the working copy will be altered. + +-from __future__ import print_function ++ + + import sys + import os +--- a/src/3rdparty/chromium/third_party/inspector_protocol/check_protocol_compatibility.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/inspector_protocol/check_protocol_compatibility.py 2025-01-16 02:26:08.589179580 +0800 +@@ -45,7 +45,7 @@ + # + # Adding --show_changes to the command line prints out a list of valid public API changes. + +-from __future__ import print_function ++ + import copy + import os.path + import optparse +@@ -221,7 +221,7 @@ + for item in obj: + normalize_types(item, domain_name, types) + elif isinstance(obj, dict): +- for key, value in obj.items(): ++ for key, value in list(obj.items()): + if key == "$ref" and value.find(".") == -1: + obj[key] = "%s.%s" % (domain_name, value) + elif key == "id": +--- a/src/3rdparty/chromium/third_party/inspector_protocol/code_generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/inspector_protocol/code_generator.py 2025-01-16 02:26:08.589179580 +0800 +@@ -18,10 +18,10 @@ + import pdl + + try: +- unicode ++ str + except NameError: + # Define unicode for Py3 +- def unicode(s, *_): ++ def str(s, *_): + return s + + # Path handling for libraries and templates +@@ -39,7 +39,7 @@ + def json_to_object(data, output_base, config_base): + def json_object_hook(object_dict): + items = [(k, os.path.join(config_base, v) if k == "path" else v) +- for (k, v) in object_dict.items()] ++ for (k, v) in list(object_dict.items())] + items = [(k, os.path.join(output_base, v) if k == "output" else v) + for (k, v) in items] + keys, values = list(zip(*items)) +@@ -66,12 +66,12 @@ + + try: + cmdline_parser = argparse.ArgumentParser() +- cmdline_parser.add_argument("--output_base", type=unicode, required=True) +- cmdline_parser.add_argument("--jinja_dir", type=unicode, required=True) +- cmdline_parser.add_argument("--config", type=unicode, required=True) ++ cmdline_parser.add_argument("--output_base", type=str, required=True) ++ cmdline_parser.add_argument("--jinja_dir", type=str, required=True) ++ cmdline_parser.add_argument("--config", type=str, required=True) + cmdline_parser.add_argument("--config_value", default=[], action="append") + cmdline_parser.add_argument( +- "--inspector_protocol_dir", type=unicode, required=True, ++ "--inspector_protocol_dir", type=str, required=True, + help=("directory with code_generator.py and C++ encoding / binding " + "libraries, relative to the root of the source tree.")) + arg_options = cmdline_parser.parse_args() +@@ -682,9 +682,9 @@ + config, "base_string_adapter.cc")), base_string_adapter_cc_templates) + + # Make gyp / make generatos happy, otherwise make rebuilds world. +- inputs_ts = max(map(os.path.getmtime, inputs)) ++ inputs_ts = max(list(map(os.path.getmtime, inputs))) + up_to_date = True +- for output_file in outputs.keys(): ++ for output_file in list(outputs.keys()): + if (not os.path.exists(output_file) + or os.path.getmtime(output_file) < inputs_ts): + up_to_date = False +@@ -692,7 +692,7 @@ + if up_to_date: + sys.exit() + +- for file_name, content in outputs.items(): ++ for file_name, content in list(outputs.items()): + out_file = open(file_name, "w") + out_file.write(content) + out_file.close() +--- a/src/3rdparty/chromium/third_party/inspector_protocol/pdl.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/inspector_protocol/pdl.py 2025-01-16 02:26:08.589179580 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + import collections + import json + import os.path +--- a/src/3rdparty/chromium/third_party/inspector_protocol/roll.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/inspector_protocol/roll.py 2025-01-16 02:26:08.589179580 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + import argparse + import sys + import os +--- a/src/3rdparty/chromium/third_party/jinja2/_compat.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/_compat.py 2025-01-16 02:26:08.589179580 +0800 +@@ -18,15 +18,15 @@ + + + if not PY2: +- unichr = chr ++ chr = chr + range_type = range + text_type = str + string_types = (str,) + integer_types = (int,) + +- iterkeys = lambda d: iter(d.keys()) +- itervalues = lambda d: iter(d.values()) +- iteritems = lambda d: iter(d.items()) ++ iterkeys = lambda d: iter(list(d.keys())) ++ itervalues = lambda d: iter(list(d.values())) ++ iteritems = lambda d: iter(list(d.items())) + + import pickle + from io import BytesIO, StringIO +@@ -47,23 +47,23 @@ + encode_filename = _identity + + else: +- unichr = unichr +- text_type = unicode ++ chr = chr ++ text_type = str + range_type = xrange +- string_types = (str, unicode) +- integer_types = (int, long) ++ string_types = (str, str) ++ integer_types = (int, int) + +- iterkeys = lambda d: d.iterkeys() +- itervalues = lambda d: d.itervalues() +- iteritems = lambda d: d.iteritems() ++ iterkeys = lambda d: iter(d.keys()) ++ itervalues = lambda d: iter(d.values()) ++ iteritems = lambda d: iter(d.items()) + +- import cPickle as pickle +- from cStringIO import StringIO as BytesIO, StringIO ++ import pickle as pickle ++ from io import StringIO as BytesIO, StringIO + NativeStringIO = BytesIO + + exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') + +- from itertools import imap, izip, ifilter ++ + intern = intern + + def implements_iterator(cls): +@@ -77,7 +77,7 @@ + return cls + + def encode_filename(filename): +- if isinstance(filename, unicode): ++ if isinstance(filename, str): + return filename.encode('utf-8') + return filename + +@@ -96,4 +96,4 @@ + try: + from urllib.parse import quote_from_bytes as url_quote + except ImportError: +- from urllib import quote as url_quote ++ from urllib.parse import quote as url_quote +--- a/src/3rdparty/chromium/third_party/jinja2/asyncfilters.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/asyncfilters.py 2025-01-16 02:26:08.589179580 +0800 +@@ -75,7 +75,7 @@ + + + @asyncfiltervariant(filters.do_join) +-async def do_join(eval_ctx, value, d=u'', attribute=None): ++async def do_join(eval_ctx, value, d='', attribute=None): + return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute) + + +--- a/src/3rdparty/chromium/third_party/jinja2/compiler.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/compiler.py 2025-01-16 02:26:08.590262895 +0800 +@@ -688,7 +688,7 @@ + public_names[0]) + else: + self.writeline('context.exported_vars.update((%s))' % +- ', '.join(imap(repr, public_names))) ++ ', '.join(map(repr, public_names))) + + # -- Statement Visitors + +@@ -1019,7 +1019,7 @@ + discarded_names[0]) + else: + self.writeline('context.exported_vars.difference_' +- 'update((%s))' % ', '.join(imap(repr, discarded_names))) ++ 'update((%s))' % ', '.join(map(repr, discarded_names))) + + def visit_For(self, node, frame): + loop_frame = frame.inner() +@@ -1206,7 +1206,7 @@ + with_frame = frame.inner() + with_frame.symbols.analyze_node(node) + self.enter_frame(with_frame) +- for idx, (target, expr) in enumerate(izip(node.targets, node.values)): ++ for idx, (target, expr) in enumerate(zip(node.targets, node.values)): + self.newline() + self.visit(target, with_frame) + self.write(' = ') +--- a/src/3rdparty/chromium/third_party/jinja2/constants.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/constants.py 2025-01-16 02:26:08.590262895 +0800 +@@ -11,7 +11,7 @@ + + + #: list of lorem ipsum words used by the lipsum() helper function +-LOREM_IPSUM_WORDS = u'''\ ++LOREM_IPSUM_WORDS = '''\ + a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at + auctor augue bibendum blandit class commodo condimentum congue consectetuer + consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus +--- a/src/3rdparty/chromium/third_party/jinja2/debug.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/debug.py 2025-01-16 02:26:08.590262895 +0800 +@@ -103,7 +103,7 @@ + def render_as_html(self, full=False): + """Return a unicode string with the traceback as rendered HTML.""" + from jinja2.debugrenderer import render_traceback +- return u'%s\n\n' % ( ++ return '%s\n\n' % ( + render_traceback(self, full=full), + self.render_as_text().decode('utf-8', 'replace') + ) +--- a/src/3rdparty/chromium/third_party/jinja2/defaults.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/defaults.py 2025-01-16 02:26:08.590262895 +0800 +@@ -53,4 +53,4 @@ + + + # export all constants +-__all__ = tuple(x for x in locals().keys() if x.isupper()) ++__all__ = tuple(x for x in list(locals().keys()) if x.isupper()) +--- a/src/3rdparty/chromium/third_party/jinja2/environment.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/environment.py 2025-01-16 02:26:08.590262895 +0800 +@@ -402,7 +402,7 @@ + + def iter_extensions(self): + """Iterates over the extensions by priority.""" +- return iter(sorted(self.extensions.values(), ++ return iter(sorted(list(self.extensions.values()), + key=lambda x: x.priority)) + + def getitem(self, obj, argument): +@@ -674,11 +674,11 @@ + import imp + import marshal + py_header = imp.get_magic() + \ +- u'\xff\xff\xff\xff'.encode('iso-8859-15') ++ '\xff\xff\xff\xff'.encode('iso-8859-15') + + # Python 3.3 added a source filesize to the header + if sys.version_info >= (3, 3): +- py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15') ++ py_header += '\x00\x00\x00\x00'.encode('iso-8859-15') + + def write_file(filename, data, mode): + if zip: +@@ -754,7 +754,7 @@ + filter_func = lambda x: '.' in x and \ + x.rsplit('.', 1)[1] in extensions + if filter_func is not None: +- x = list(ifilter(filter_func, x)) ++ x = list(filter(filter_func, x)) + return x + + def handle_exception(self, exc_info=None, rendered=False, source_hint=None): +@@ -842,8 +842,8 @@ + from the function unchanged. + """ + if not names: +- raise TemplatesNotFound(message=u'Tried to select from an empty list ' +- u'of templates.') ++ raise TemplatesNotFound(message='Tried to select from an empty list ' ++ 'of templates.') + globals = self.make_globals(globals) + for name in names: + if isinstance(name, Template): +@@ -1124,7 +1124,7 @@ + @property + def debug_info(self): + """The debug info mapping.""" +- return [tuple(imap(int, x.split('='))) for x in ++ return [tuple(map(int, x.split('='))) for x in + self._debug_info.split('&')] + + def __repr__(self): +--- a/src/3rdparty/chromium/third_party/jinja2/exceptions.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/exceptions.py 2025-01-16 02:26:08.590262895 +0800 +@@ -28,7 +28,7 @@ + return message.decode('utf-8', 'replace') + + def __unicode__(self): +- return self.message or u'' ++ return self.message or '' + else: + def __init__(self, message=None): + Exception.__init__(self, message) +@@ -71,8 +71,8 @@ + + def __init__(self, names=(), message=None): + if message is None: +- message = u'none of the templates given were found: ' + \ +- u', '.join(imap(text_type, names)) ++ message = 'none of the templates given were found: ' + \ ++ ', '.join(map(text_type, names)) + TemplateNotFound.__init__(self, names and names[-1] or None, message) + self.templates = list(names) + +@@ -113,7 +113,7 @@ + if line: + lines.append(' ' + line.strip()) + +- return u'\n'.join(lines) ++ return '\n'.join(lines) + + + class TemplateAssertionError(TemplateSyntaxError): +--- a/src/3rdparty/chromium/third_party/jinja2/ext.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/ext.py 2025-01-16 02:26:08.590262895 +0800 +@@ -398,7 +398,7 @@ + if variables: + node = nodes.Mod(node, nodes.Dict([ + nodes.Pair(nodes.Const(key), value) +- for key, value in variables.items() ++ for key, value in list(variables.items()) + ])) + return nodes.Output([node]) + +--- a/src/3rdparty/chromium/third_party/jinja2/filters.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/filters.py 2025-01-16 02:26:08.590262895 +0800 +@@ -107,7 +107,7 @@ + pass + if itemiter is None: + return unicode_urlencode(value) +- return u'&'.join(unicode_urlencode(k) + '=' + ++ return '&'.join(unicode_urlencode(k) + '=' + + unicode_urlencode(v, for_qs=True) + for k, v in itemiter) + +@@ -174,13 +174,13 @@ + As you can see it automatically prepends a space in front of the item + if the filter returned something unless the second parameter is false. + """ +- rv = u' '.join( +- u'%s="%s"' % (escape(key), escape(value)) ++ rv = ' '.join( ++ '%s="%s"' % (escape(key), escape(value)) + for key, value in iteritems(d) + if value is not None and not isinstance(value, Undefined) + ) + if autospace and rv: +- rv = u' ' + rv ++ rv = ' ' + rv + if _eval_ctx.autoescape: + rv = Markup(rv) + return rv +@@ -239,7 +239,7 @@ + + return value + +- return sorted(value.items(), key=sort_func, reverse=reverse) ++ return sorted(list(value.items()), key=sort_func, reverse=reverse) + + + @environmentfilter +@@ -352,7 +352,7 @@ + return _min_or_max(environment, value, max, case_sensitive, attribute) + + +-def do_default(value, default_value=u'', boolean=False): ++def do_default(value, default_value='', boolean=False): + """If the value is undefined it will return the passed default value, + otherwise the value of the variable: + +@@ -375,7 +375,7 @@ + + + @evalcontextfilter +-def do_join(eval_ctx, value, d=u'', attribute=None): ++def do_join(eval_ctx, value, d='', attribute=None): + """Return a string which is the concatenation of the strings in the + sequence. The separator between elements is an empty string per + default, you can define it with the optional parameter: +@@ -398,11 +398,11 @@ + The `attribute` parameter was added. + """ + if attribute is not None: +- value = imap(make_attrgetter(eval_ctx.environment, attribute), value) ++ value = map(make_attrgetter(eval_ctx.environment, attribute), value) + + # no automatic escaping? joining is a lot eaiser then + if not eval_ctx.autoescape: +- return text_type(d).join(imap(text_type, value)) ++ return text_type(d).join(map(text_type, value)) + + # if the delimiter doesn't have an html representation we check + # if any of the items has. If yes we do a coercion to Markup +@@ -421,7 +421,7 @@ + return d.join(value) + + # no html involved, to normal joining +- return soft_unicode(d).join(imap(soft_unicode, value)) ++ return soft_unicode(d).join(map(soft_unicode, value)) + + + def do_center(value, width=80): +@@ -554,17 +554,17 @@ + ), stacklevel=2) + first = indentfirst + +- s += u'\n' # this quirk is necessary for splitlines method +- indention = u' ' * width ++ s += '\n' # this quirk is necessary for splitlines method ++ indention = ' ' * width + + if blank: +- rv = (u'\n' + indention).join(s.splitlines()) ++ rv = ('\n' + indention).join(s.splitlines()) + else: + lines = s.splitlines() + rv = lines.pop(0) + + if lines: +- rv += u'\n' + u'\n'.join( ++ rv += '\n' + '\n'.join( + indention + line if line else line for line in lines + ) + +@@ -869,7 +869,7 @@ + attributes. Also the `start` parameter was moved on to the right. + """ + if attribute is not None: +- iterable = imap(make_attrgetter(environment, attribute), iterable) ++ iterable = map(make_attrgetter(environment, attribute), iterable) + return sum(iterable, start) + + +--- a/src/3rdparty/chromium/third_party/jinja2/lexer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/lexer.py 2025-01-16 02:26:08.590262895 +0800 +@@ -56,55 +56,55 @@ + newline_re = re.compile(r'(\r\n|\r|\n)') + + # internal the tokens and keep references to them +-TOKEN_ADD = intern('add') +-TOKEN_ASSIGN = intern('assign') +-TOKEN_COLON = intern('colon') +-TOKEN_COMMA = intern('comma') +-TOKEN_DIV = intern('div') +-TOKEN_DOT = intern('dot') +-TOKEN_EQ = intern('eq') +-TOKEN_FLOORDIV = intern('floordiv') +-TOKEN_GT = intern('gt') +-TOKEN_GTEQ = intern('gteq') +-TOKEN_LBRACE = intern('lbrace') +-TOKEN_LBRACKET = intern('lbracket') +-TOKEN_LPAREN = intern('lparen') +-TOKEN_LT = intern('lt') +-TOKEN_LTEQ = intern('lteq') +-TOKEN_MOD = intern('mod') +-TOKEN_MUL = intern('mul') +-TOKEN_NE = intern('ne') +-TOKEN_PIPE = intern('pipe') +-TOKEN_POW = intern('pow') +-TOKEN_RBRACE = intern('rbrace') +-TOKEN_RBRACKET = intern('rbracket') +-TOKEN_RPAREN = intern('rparen') +-TOKEN_SEMICOLON = intern('semicolon') +-TOKEN_SUB = intern('sub') +-TOKEN_TILDE = intern('tilde') +-TOKEN_WHITESPACE = intern('whitespace') +-TOKEN_FLOAT = intern('float') +-TOKEN_INTEGER = intern('integer') +-TOKEN_NAME = intern('name') +-TOKEN_STRING = intern('string') +-TOKEN_OPERATOR = intern('operator') +-TOKEN_BLOCK_BEGIN = intern('block_begin') +-TOKEN_BLOCK_END = intern('block_end') +-TOKEN_VARIABLE_BEGIN = intern('variable_begin') +-TOKEN_VARIABLE_END = intern('variable_end') +-TOKEN_RAW_BEGIN = intern('raw_begin') +-TOKEN_RAW_END = intern('raw_end') +-TOKEN_COMMENT_BEGIN = intern('comment_begin') +-TOKEN_COMMENT_END = intern('comment_end') +-TOKEN_COMMENT = intern('comment') +-TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin') +-TOKEN_LINESTATEMENT_END = intern('linestatement_end') +-TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin') +-TOKEN_LINECOMMENT_END = intern('linecomment_end') +-TOKEN_LINECOMMENT = intern('linecomment') +-TOKEN_DATA = intern('data') +-TOKEN_INITIAL = intern('initial') +-TOKEN_EOF = intern('eof') ++TOKEN_ADD = sys.intern('add') ++TOKEN_ASSIGN = sys.intern('assign') ++TOKEN_COLON = sys.intern('colon') ++TOKEN_COMMA = sys.intern('comma') ++TOKEN_DIV = sys.intern('div') ++TOKEN_DOT = sys.intern('dot') ++TOKEN_EQ = sys.intern('eq') ++TOKEN_FLOORDIV = sys.intern('floordiv') ++TOKEN_GT = sys.intern('gt') ++TOKEN_GTEQ = sys.intern('gteq') ++TOKEN_LBRACE = sys.intern('lbrace') ++TOKEN_LBRACKET = sys.intern('lbracket') ++TOKEN_LPAREN = sys.intern('lparen') ++TOKEN_LT = sys.intern('lt') ++TOKEN_LTEQ = sys.intern('lteq') ++TOKEN_MOD = sys.intern('mod') ++TOKEN_MUL = sys.intern('mul') ++TOKEN_NE = sys.intern('ne') ++TOKEN_PIPE = sys.intern('pipe') ++TOKEN_POW = sys.intern('pow') ++TOKEN_RBRACE = sys.intern('rbrace') ++TOKEN_RBRACKET = sys.intern('rbracket') ++TOKEN_RPAREN = sys.intern('rparen') ++TOKEN_SEMICOLON = sys.intern('semicolon') ++TOKEN_SUB = sys.intern('sub') ++TOKEN_TILDE = sys.intern('tilde') ++TOKEN_WHITESPACE = sys.intern('whitespace') ++TOKEN_FLOAT = sys.intern('float') ++TOKEN_INTEGER = sys.intern('integer') ++TOKEN_NAME = sys.intern('name') ++TOKEN_STRING = sys.intern('string') ++TOKEN_OPERATOR = sys.intern('operator') ++TOKEN_BLOCK_BEGIN = sys.intern('block_begin') ++TOKEN_BLOCK_END = sys.intern('block_end') ++TOKEN_VARIABLE_BEGIN = sys.intern('variable_begin') ++TOKEN_VARIABLE_END = sys.intern('variable_end') ++TOKEN_RAW_BEGIN = sys.intern('raw_begin') ++TOKEN_RAW_END = sys.intern('raw_end') ++TOKEN_COMMENT_BEGIN = sys.intern('comment_begin') ++TOKEN_COMMENT_END = sys.intern('comment_end') ++TOKEN_COMMENT = sys.intern('comment') ++TOKEN_LINESTATEMENT_BEGIN = sys.intern('linestatement_begin') ++TOKEN_LINESTATEMENT_END = sys.intern('linestatement_end') ++TOKEN_LINECOMMENT_BEGIN = sys.intern('linecomment_begin') ++TOKEN_LINECOMMENT_END = sys.intern('linecomment_end') ++TOKEN_LINECOMMENT = sys.intern('linecomment') ++TOKEN_DATA = sys.intern('data') ++TOKEN_INITIAL = sys.intern('initial') ++TOKEN_EOF = sys.intern('eof') + + # bind operators to token types + operators = { +@@ -235,7 +235,7 @@ + lineno, type, value = (property(itemgetter(x)) for x in range(3)) + + def __new__(cls, lineno, type, value): +- return tuple.__new__(cls, (lineno, intern(str(type)), value)) ++ return tuple.__new__(cls, (lineno, sys.intern(str(type)), value)) + + def __str__(self): + if self.type in reverse_operators: +--- a/src/3rdparty/chromium/third_party/jinja2/nativetypes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/nativetypes.py 2025-01-16 02:26:08.590262895 +0800 +@@ -23,7 +23,7 @@ + if len(head) == 1: + out = head[0] + else: +- out = u''.join([text_type(v) for v in chain(head, nodes)]) ++ out = ''.join([text_type(v) for v in chain(head, nodes)]) + + try: + return literal_eval(out) +--- a/src/3rdparty/chromium/third_party/jinja2/nodes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/nodes.py 2025-01-16 02:26:08.590262895 +0800 +@@ -137,7 +137,7 @@ + len(self.fields), + len(self.fields) != 1 and 's' or '' + )) +- for name, arg in izip(self.fields, fields): ++ for name, arg in zip(self.fields, fields): + setattr(self, name, arg) + for attr in self.attributes: + setattr(self, attr, attributes.pop(attr, None)) +--- a/src/3rdparty/chromium/third_party/jinja2/parser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/parser.py 2025-01-16 02:26:08.590262895 +0800 +@@ -61,7 +61,7 @@ + def _fail_ut_eof(self, name, end_token_stack, lineno): + expected = [] + for exprs in end_token_stack: +- expected.extend(imap(describe_token_expr, exprs)) ++ expected.extend(map(describe_token_expr, exprs)) + if end_token_stack: + currently_looking = ' or '.join( + "'%s'" % describe_token_expr(expr) +--- a/src/3rdparty/chromium/third_party/jinja2/runtime.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/runtime.py 2025-01-16 02:26:08.591346209 +0800 +@@ -43,17 +43,17 @@ + def markup_join(seq): + """Concatenation that escapes if necessary and converts to unicode.""" + buf = [] +- iterator = imap(soft_unicode, seq) ++ iterator = map(soft_unicode, seq) + for arg in iterator: + buf.append(arg) + if hasattr(arg, '__html__'): +- return Markup(u'').join(chain(buf, iterator)) ++ return Markup('').join(chain(buf, iterator)) + return concat(buf) + + + def unicode_join(seq): + """Simple args to unicode conversion and concatenation.""" +- return concat(imap(text_type, seq)) ++ return concat(map(text_type, seq)) + + + def new_context(environment, template_name, blocks, vars=None, +@@ -315,7 +315,7 @@ + + # register the context as mapping if possible + try: +- from collections import Mapping ++ from collections.abc import Mapping + Mapping.register(Context) + except ImportError: + pass +@@ -657,7 +657,7 @@ + return id(type(self)) + + def __str__(self): +- return u'' ++ return '' + + def __len__(self): + return 0 +@@ -666,9 +666,9 @@ + if 0: + yield None + +- def __nonzero__(self): ++ def __bool__(self): + return False +- __bool__ = __nonzero__ ++# __bool__ = __nonzero__ + + def __repr__(self): + return 'Undefined' +@@ -774,12 +774,12 @@ + def __str__(self): + if self._undefined_hint is None: + if self._undefined_obj is missing: +- return u'{{ %s }}' % self._undefined_name ++ return '{{ %s }}' % self._undefined_name + return '{{ no such element: %s[%r] }}' % ( + object_type_repr(self._undefined_obj), + self._undefined_name + ) +- return u'{{ undefined value printed: %s }}' % self._undefined_hint ++ return '{{ undefined value printed: %s }}' % self._undefined_hint + + + @implements_to_string +--- a/src/3rdparty/chromium/third_party/jinja2/sandbox.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/sandbox.py 2025-01-16 02:26:08.591346209 +0800 +@@ -14,7 +14,7 @@ + """ + import types + import operator +-from collections import Mapping ++from collections.abc import Mapping + from jinja2.environment import Environment + from jinja2.exceptions import SecurityError + from jinja2._compat import string_types, PY2 +@@ -65,7 +65,7 @@ + # on python 2.x we can register the user collection types + try: + from UserDict import UserDict, DictMixin +- from UserList import UserList ++ from collections.abc import UserList + _mutable_mapping_types += (UserDict, DictMixin) + _mutable_set_types += (UserList,) + except ImportError: +@@ -79,7 +79,7 @@ + pass + + #: register Python 2.6 abstract base classes +-from collections import MutableSet, MutableMapping, MutableSequence ++from collections.abc import MutableSet, MutableMapping, MutableSequence + _mutable_set_types += (MutableSet,) + _mutable_mapping_types += (MutableMapping,) + _mutable_sequence_types += (MutableSequence,) +@@ -148,7 +148,7 @@ + """A range that can't generate ranges with a length of more than + MAX_RANGE items. + """ +- rng = range(*args) ++ rng = list(range(*args)) + if len(rng) > MAX_RANGE: + raise OverflowError('range too big, maximum size for range is %d' % + MAX_RANGE) +--- a/src/3rdparty/chromium/third_party/jinja2/utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jinja2/utils.py 2025-01-16 02:26:08.591346209 +0800 +@@ -36,7 +36,7 @@ + # internal code + internal_code = set() + +-concat = u''.join ++concat = ''.join + + _slash_escape = '\\/' not in json.dumps('/') + +@@ -232,7 +232,7 @@ + middle = '%s' % (middle, middle) + if lead + middle + trail != word: + words[i] = lead + middle + trail +- return u''.join(words) ++ return ''.join(words) + + + def generate_lorem_ipsum(n=5, html=True, min=20, max=100): +@@ -272,7 +272,7 @@ + p.append(word) + + # ensure that the paragraph ends with a dot. +- p = u' '.join(p) ++ p = ' '.join(p) + if p.endswith(','): + p = p[:-1] + '.' + elif not p.endswith('.'): +@@ -280,8 +280,8 @@ + result.append(p) + + if not html: +- return u'\n\n'.join(result) +- return Markup(u'\n'.join(u'

%s

' % escape(x) for x in result)) ++ return '\n\n'.join(result) ++ return Markup('\n'.join('

%s

' % escape(x) for x in result)) + + + def unicode_urlencode(obj, charset='utf-8', for_qs=False): +@@ -449,15 +449,15 @@ + + def iteritems(self): + """Iterate over all items.""" +- return iter(self.items()) ++ return iter(list(self.items())) + + def values(self): + """Return a list of all values.""" +- return [x[1] for x in self.items()] ++ return [x[1] for x in list(self.items())] + + def itervalue(self): + """Iterate over all values.""" +- return iter(self.values()) ++ return iter(list(self.values())) + + def keys(self): + """Return a list of all keys ordered by most recent usage.""" +@@ -482,7 +482,7 @@ + + # register the LRU cache as mutable mapping if possible + try: +- from collections import MutableMapping ++ from collections.abc import MutableMapping + MutableMapping.register(LRUCache) + except ImportError: + pass +@@ -563,10 +563,10 @@ + if dumper is None: + dumper = json.dumps + rv = dumper(obj, **kwargs) \ +- .replace(u'<', u'\\u003c') \ +- .replace(u'>', u'\\u003e') \ +- .replace(u'&', u'\\u0026') \ +- .replace(u"'", u'\\u0027') ++ .replace('<', '\\u003c') \ ++ .replace('>', '\\u003e') \ ++ .replace('&', '\\u0026') \ ++ .replace("'", '\\u0027') + return Markup(rv) + + +@@ -589,7 +589,7 @@ + """Returns the current item.""" + return self.items[self.pos] + +- def next(self): ++ def __next__(self): + """Goes one item ahead and returns it.""" + rv = self.current + self.pos = (self.pos + 1) % len(self.items) +@@ -601,14 +601,14 @@ + class Joiner(object): + """A joining helper for templates.""" + +- def __init__(self, sep=u', '): ++ def __init__(self, sep=', '): + self.sep = sep + self.used = False + + def __call__(self): + if not self.used: + self.used = True +- return u'' ++ return '' + return self.sep + + +--- a/src/3rdparty/chromium/third_party/jsoncpp/source/amalgamate.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jsoncpp/source/amalgamate.py 2025-01-16 02:26:08.591346209 +0800 +@@ -83,7 +83,7 @@ + header.add_text("#endif //ifndef JSON_AMALGAMATED_H_INCLUDED") + + target_header_path = os.path.join(os.path.dirname(target_source_path), header_include_path) +- print("Writing amalgamated header to %r" % target_header_path) ++ print(("Writing amalgamated header to %r" % target_header_path)) + header.write_to(target_header_path) + + base, ext = os.path.splitext(header_include_path) +@@ -107,7 +107,7 @@ + + target_forward_header_path = os.path.join(os.path.dirname(target_source_path), + forward_header_include_path) +- print("Writing amalgamated forward header to %r" % target_forward_header_path) ++ print(("Writing amalgamated forward header to %r" % target_forward_header_path)) + header.write_to(target_forward_header_path) + + print("Amalgamating source...") +@@ -129,7 +129,7 @@ + source.add_file(os.path.join(SRC_PATH, "json_value.cpp")) + source.add_file(os.path.join(SRC_PATH, "json_writer.cpp")) + +- print("Writing amalgamated source to %r" % target_source_path) ++ print(("Writing amalgamated source to %r" % target_source_path)) + source.write_to(target_source_path) + + def main(): +--- a/src/3rdparty/chromium/third_party/jsoncpp/source/doxybuild.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jsoncpp/source/doxybuild.py 2025-01-16 02:26:08.591346209 +0800 +@@ -1,7 +1,7 @@ + """Script to generate doxygen documentation. + """ +-from __future__ import print_function +-from __future__ import unicode_literals ++ ++ + from devtools import tarball + from contextlib import contextmanager + import subprocess +--- a/src/3rdparty/chromium/third_party/jsoncpp/source/devtools/antglob.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jsoncpp/source/devtools/antglob.py 2025-01-16 02:26:08.591346209 +0800 +@@ -5,7 +5,7 @@ + # recognized in your jurisdiction. + # See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +-from __future__ import print_function ++ + from dircache import listdir + import re + import fnmatch +@@ -92,7 +92,7 @@ + return re.compile(''.join(rex)) + + def _as_list(l): +- if isinstance(l, basestring): ++ if isinstance(l, str): + return l.split() + return l + +--- a/src/3rdparty/chromium/third_party/jsoncpp/source/devtools/batchbuild.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jsoncpp/source/devtools/batchbuild.py 2025-01-16 02:26:08.591346209 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import print_function ++ + import collections + import itertools + import json +@@ -32,7 +32,7 @@ + for values_by_name in self.prepend_envs: + for var, value in list(values_by_name.items()): + var = var.upper() +- if type(value) is unicode: ++ if type(value) is str: + value = value.encode(sys.getdefaultencoding()) + if var in environ: + environ[var] = value + os.pathsep + environ[var] +--- a/src/3rdparty/chromium/third_party/jsoncpp/source/devtools/fixeol.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jsoncpp/source/devtools/fixeol.py 2025-01-16 02:26:08.591346209 +0800 +@@ -3,7 +3,7 @@ + # recognized in your jurisdiction. + # See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +-from __future__ import print_function ++ + import os.path + import sys + +--- a/src/3rdparty/chromium/third_party/jsoncpp/source/devtools/licenseupdater.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jsoncpp/source/devtools/licenseupdater.py 2025-01-16 02:26:08.591346209 +0800 +@@ -1,6 +1,6 @@ + """Updates the license text in source file. + """ +-from __future__ import print_function ++ + + # An existing license is found if the file starts with the string below, + # and ends with the first blank line. +--- a/src/3rdparty/chromium/third_party/jstemplate/compile.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/jstemplate/compile.py 2025-01-16 02:26:08.591346209 +0800 +@@ -5,9 +5,9 @@ + + """Combines the javascript files needed by jstemplate into a single file.""" + +-import httplib ++import http.client + import sys +-import urllib ++import urllib.request, urllib.parse, urllib.error + + + def main(): +@@ -21,8 +21,8 @@ + # Define the parameters for the POST request and encode them in a URL-safe + # format. See http://code.google.com/closure/compiler/docs/api-ref.html for + # API reference. +- params = urllib.urlencode( +- map(lambda src: ('js_code', file(src).read()), srcs) + ++ params = urllib.parse.urlencode( ++ [('js_code', file(src).read()) for src in srcs] + + [ + ('compilation_level', 'ADVANCED_OPTIMIZATIONS'), + ('output_format', 'text'), +@@ -31,7 +31,7 @@ + + # Always use the following value for the Content-type header. + headers = {'Content-type': 'application/x-www-form-urlencoded'} +- conn = httplib.HTTPSConnection('closure-compiler.appspot.com') ++ conn = http.client.HTTPSConnection('closure-compiler.appspot.com') + conn.request('POST', '/compile', params, headers) + response = conn.getresponse() + out_file = file(out, 'w') +--- a/src/3rdparty/chromium/third_party/libaddressinput/chromium/tools/require_fields.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libaddressinput/chromium/tools/require_fields.py 2025-01-16 02:26:08.591346209 +0800 +@@ -4,7 +4,7 @@ + # found in the LICENSE file. + + import json +-import urllib ++import urllib.request, urllib.parse, urllib.error + from sys import exit as sys_exit + + +@@ -38,12 +38,12 @@ + for country in _COUNTRIES: + url = _I18N_URL % country + try: +- data = json.load(urllib.urlopen(url)) ++ data = json.load(urllib.request.urlopen(url)) + except Exception as e: +- print 'Error: could not load %s' % url ++ print('Error: could not load %s' % url) + return 1 + if 'require' in data: +- print '%s: %s' % (country, data['require']) ++ print('%s: %s' % (country, data['require'])) + return 0 + + +--- a/src/3rdparty/chromium/third_party/libaom/source/libaom/tools/cpplint.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libaom/source/libaom/tools/cpplint.py 2025-01-16 02:26:08.592429524 +0800 +@@ -408,7 +408,7 @@ + # False positives include C-style multi-line comments and multi-line strings + # but those have always been troublesome for cpplint. + _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( +- r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)') ++ r'[ =()](' + ('|'.join(list(_ALT_TOKEN_REPLACEMENT.keys()))) + r')(?=[ (]|$)') + + + # These constants define types of headers for use with +@@ -745,7 +745,7 @@ + + def PrintErrorCounts(self): + """Print a summary of errors by category, and the total.""" +- for category, count in self.errors_by_category.iteritems(): ++ for category, count in self.errors_by_category.items(): + sys.stderr.write('Category \'%s\' errors found: %d\n' % + (category, count)) + sys.stderr.write('Total errors found: %d\n' % self.error_count) +@@ -1230,7 +1230,7 @@ + On finding matching endchar: (index just after matching endchar, 0) + Otherwise: (-1, new depth at end of this line) + """ +- for i in xrange(startpos, len(line)): ++ for i in range(startpos, len(line)): + if line[i] == startchar: + depth += 1 + elif line[i] == endchar: +@@ -1303,7 +1303,7 @@ + On finding matching startchar: (index at matching startchar, 0) + Otherwise: (-1, new depth at beginning of this line) + """ +- for i in xrange(endpos, -1, -1): ++ for i in range(endpos, -1, -1): + if line[i] == endchar: + depth += 1 + elif line[i] == startchar: +@@ -1363,7 +1363,7 @@ + + # We'll say it should occur by line 10. Don't forget there's a + # dummy line at the front. +- for line in xrange(1, min(len(lines), 11)): ++ for line in range(1, min(len(lines), 11)): + if re.search(r'Copyright', lines[line], re.I): break + else: # means no copyright line was found + error(filename, 0, 'legal/copyright', 5, +@@ -1488,7 +1488,7 @@ + error: The function to call with any errors found. + """ + for linenum, line in enumerate(lines): +- if u'\ufffd' in line: ++ if '\ufffd' in line: + error(filename, linenum, 'readability/utf8', 5, + 'Line contains invalid UTF-8 (or Unicode replacement character).') + if '\0' in line: +@@ -2312,7 +2312,7 @@ + + if starting_func: + body_found = False +- for start_linenum in xrange(linenum, clean_lines.NumLines()): ++ for start_linenum in range(linenum, clean_lines.NumLines()): + start_line = lines[start_linenum] + joined_line += ' ' + start_line.lstrip() + if Search(r'(;|})', start_line): # Declarations and trivial functions +@@ -2835,7 +2835,7 @@ + trailing_text = '' + if endpos > -1: + trailing_text = endline[endpos:] +- for offset in xrange(endlinenum + 1, ++ for offset in range(endlinenum + 1, + min(endlinenum + 3, clean_lines.NumLines() - 1)): + trailing_text += clean_lines.elided[offset] + if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text): +@@ -3205,7 +3205,7 @@ + expression = lines[linenum][start_pos + 1:end_pos - 1] + else: + expression = lines[linenum][start_pos + 1:] +- for i in xrange(linenum + 1, end_line): ++ for i in range(linenum + 1, end_line): + expression += lines[i] + expression += last_line[0:end_pos - 1] + +@@ -3333,7 +3333,7 @@ + The width of the line in column positions, accounting for Unicode + combining characters and wide characters. + """ +- if isinstance(line, unicode): ++ if isinstance(line, str): + width = 0 + for uc in unicodedata.normalize('NFC', line): + if unicodedata.east_asian_width(uc) in ('W', 'F'): +@@ -3663,7 +3663,7 @@ + + # Give opening punctuations to get the matching close-punctuations. + matching_punctuation = {'(': ')', '{': '}', '[': ']'} +- closing_punctuation = set(matching_punctuation.itervalues()) ++ closing_punctuation = set(matching_punctuation.values()) + + # Find the position to start extracting text. + match = re.search(start_pattern, text, re.M) +@@ -4078,7 +4078,7 @@ + # Found the matching < on an earlier line, collect all + # pieces up to current line. + line = '' +- for i in xrange(startline, linenum + 1): ++ for i in range(startline, linenum + 1): + line += clean_lines.elided[i].strip() + + # Check for non-const references in function parameters. A single '&' may +@@ -4117,7 +4117,7 @@ + # Don't see a whitelisted function on this line. Actually we + # didn't see any function name on this line, so this is likely a + # multi-line parameter list. Try a bit harder to catch this case. +- for i in xrange(2): ++ for i in range(2): + if (linenum > i and + Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): + check_params = False +@@ -4392,7 +4392,7 @@ + required = {} # A map of header name to linenumber and the template entity. + # Example of required: { '': (1219, 'less<>') } + +- for linenum in xrange(clean_lines.NumLines()): ++ for linenum in range(clean_lines.NumLines()): + line = clean_lines.elided[linenum] + if not line or line[0] == '#': + continue +@@ -4440,7 +4440,7 @@ + + # include_state is modified during iteration, so we iterate over a copy of + # the keys. +- header_keys = include_state.keys() ++ header_keys = list(include_state.keys()) + for header in header_keys: + (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) + fullpath = common_path + header +@@ -4560,7 +4560,7 @@ + + RemoveMultiLineComments(filename, lines, error) + clean_lines = CleansedLines(lines) +- for line in xrange(clean_lines.NumLines()): ++ for line in range(clean_lines.NumLines()): + ProcessLine(filename, file_extension, clean_lines, line, + include_state, function_state, nesting_state, error, + extra_check_functions) +--- a/src/3rdparty/chromium/third_party/libaom/source/libaom/tools/gen_constrained_tokenset.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libaom/source/libaom/tools/gen_constrained_tokenset.py 2025-01-16 02:26:08.592429524 +0800 +@@ -108,7 +108,7 @@ + for q in range(1, 256): + parray = get_quantized_spareto(q / 256., beta, bits, first_token) + assert parray.sum() == 2**bits +- print '{', ', '.join('%d' % i for i in parray), '},' ++ print('{', ', '.join('%d' % i for i in parray), '},') + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/libaom/source/libaom/tools/intersect-diffs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libaom/source/libaom/tools/intersect-diffs.py 2025-01-16 02:26:08.592429524 +0800 +@@ -71,7 +71,7 @@ + break + + if out_hunks: +- print FormatDiffHunks(out_hunks) ++ print(FormatDiffHunks(out_hunks)) + sys.exit(1) + + if __name__ == "__main__": +--- a/src/3rdparty/chromium/third_party/libaom/source/libaom/tools/lint-hunks.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libaom/source/libaom/tools/lint-hunks.py 2025-01-16 02:26:08.592429524 +0800 +@@ -12,7 +12,7 @@ + """Performs style checking on each diff hunk.""" + import getopt + import os +-import StringIO ++import io + import subprocess + import sys + +@@ -65,17 +65,17 @@ + try: + try: + opts, args = getopt.getopt(argv[1:], SHORT_OPTIONS, LONG_OPTIONS) +- except getopt.error, msg: ++ except getopt.error as msg: + raise Usage(msg) + + # process options + for o, _ in opts: + if o in ("-h", "--help"): +- print __doc__ ++ print(__doc__) + sys.exit(0) + + if args and len(args) > 1: +- print __doc__ ++ print(__doc__) + sys.exit(0) + + # Find the fully qualified path to the root of the tree +@@ -97,7 +97,7 @@ + file_affected_line_map = {} + p = Subprocess(diff_cmd, stdout=subprocess.PIPE) + stdout = p.communicate()[0] +- for hunk in diff.ParseDiffHunks(StringIO.StringIO(stdout)): ++ for hunk in diff.ParseDiffHunks(io.StringIO(stdout)): + filename = hunk.right.filename[2:] + if filename not in file_affected_line_map: + file_affected_line_map[filename] = set() +@@ -105,7 +105,7 @@ + + # Run each affected file through cpplint + lint_failed = False +- for filename, affected_lines in file_affected_line_map.iteritems(): ++ for filename, affected_lines in file_affected_line_map.items(): + if filename.split(".")[-1] not in ("c", "h", "cc"): + continue + +@@ -129,17 +129,17 @@ + continue + warning_line_num = int(fields[1]) + if warning_line_num in affected_lines: +- print "%s:%d:%s"%(filename, warning_line_num, +- ":".join(fields[2:])) ++ print("%s:%d:%s"%(filename, warning_line_num, ++ ":".join(fields[2:]))) + lint_failed = True + + # Set exit code if any relevant lint errors seen + if lint_failed: + return 1 + +- except Usage, err: +- print >>sys.stderr, err +- print >>sys.stderr, "for help use --help" ++ except Usage as err: ++ print(err, file=sys.stderr) ++ print("for help use --help", file=sys.stderr) + return 2 + + if __name__ == "__main__": +--- a/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/cpplint.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/cpplint.py 2025-01-16 02:26:08.593512839 +0800 +@@ -408,7 +408,7 @@ + # False positives include C-style multi-line comments and multi-line strings + # but those have always been troublesome for cpplint. + _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( +- r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)') ++ r'[ =()](' + ('|'.join(list(_ALT_TOKEN_REPLACEMENT.keys()))) + r')(?=[ (]|$)') + + + # These constants define types of headers for use with +@@ -745,7 +745,7 @@ + + def PrintErrorCounts(self): + """Print a summary of errors by category, and the total.""" +- for category, count in self.errors_by_category.iteritems(): ++ for category, count in self.errors_by_category.items(): + sys.stderr.write('Category \'%s\' errors found: %d\n' % + (category, count)) + sys.stderr.write('Total errors found: %d\n' % self.error_count) +@@ -1230,7 +1230,7 @@ + On finding matching endchar: (index just after matching endchar, 0) + Otherwise: (-1, new depth at end of this line) + """ +- for i in xrange(startpos, len(line)): ++ for i in range(startpos, len(line)): + if line[i] == startchar: + depth += 1 + elif line[i] == endchar: +@@ -1303,7 +1303,7 @@ + On finding matching startchar: (index at matching startchar, 0) + Otherwise: (-1, new depth at beginning of this line) + """ +- for i in xrange(endpos, -1, -1): ++ for i in range(endpos, -1, -1): + if line[i] == endchar: + depth += 1 + elif line[i] == startchar: +@@ -1363,7 +1363,7 @@ + + # We'll say it should occur by line 10. Don't forget there's a + # dummy line at the front. +- for line in xrange(1, min(len(lines), 11)): ++ for line in range(1, min(len(lines), 11)): + if re.search(r'Copyright', lines[line], re.I): break + else: # means no copyright line was found + error(filename, 0, 'legal/copyright', 5, +@@ -1488,7 +1488,7 @@ + error: The function to call with any errors found. + """ + for linenum, line in enumerate(lines): +- if u'\ufffd' in line: ++ if '\ufffd' in line: + error(filename, linenum, 'readability/utf8', 5, + 'Line contains invalid UTF-8 (or Unicode replacement character).') + if '\0' in line: +@@ -2312,7 +2312,7 @@ + + if starting_func: + body_found = False +- for start_linenum in xrange(linenum, clean_lines.NumLines()): ++ for start_linenum in range(linenum, clean_lines.NumLines()): + start_line = lines[start_linenum] + joined_line += ' ' + start_line.lstrip() + if Search(r'(;|})', start_line): # Declarations and trivial functions +@@ -2835,7 +2835,7 @@ + trailing_text = '' + if endpos > -1: + trailing_text = endline[endpos:] +- for offset in xrange(endlinenum + 1, ++ for offset in range(endlinenum + 1, + min(endlinenum + 3, clean_lines.NumLines() - 1)): + trailing_text += clean_lines.elided[offset] + if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text): +@@ -3205,7 +3205,7 @@ + expression = lines[linenum][start_pos + 1:end_pos - 1] + else: + expression = lines[linenum][start_pos + 1:] +- for i in xrange(linenum + 1, end_line): ++ for i in range(linenum + 1, end_line): + expression += lines[i] + expression += last_line[0:end_pos - 1] + +@@ -3333,7 +3333,7 @@ + The width of the line in column positions, accounting for Unicode + combining characters and wide characters. + """ +- if isinstance(line, unicode): ++ if isinstance(line, str): + width = 0 + for uc in unicodedata.normalize('NFC', line): + if unicodedata.east_asian_width(uc) in ('W', 'F'): +@@ -3663,7 +3663,7 @@ + + # Give opening punctuations to get the matching close-punctuations. + matching_punctuation = {'(': ')', '{': '}', '[': ']'} +- closing_punctuation = set(matching_punctuation.itervalues()) ++ closing_punctuation = set(matching_punctuation.values()) + + # Find the position to start extracting text. + match = re.search(start_pattern, text, re.M) +@@ -4078,7 +4078,7 @@ + # Found the matching < on an earlier line, collect all + # pieces up to current line. + line = '' +- for i in xrange(startline, linenum + 1): ++ for i in range(startline, linenum + 1): + line += clean_lines.elided[i].strip() + + # Check for non-const references in function parameters. A single '&' may +@@ -4117,7 +4117,7 @@ + # Don't see a whitelisted function on this line. Actually we + # didn't see any function name on this line, so this is likely a + # multi-line parameter list. Try a bit harder to catch this case. +- for i in xrange(2): ++ for i in range(2): + if (linenum > i and + Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): + check_params = False +@@ -4392,7 +4392,7 @@ + required = {} # A map of header name to linenumber and the template entity. + # Example of required: { '': (1219, 'less<>') } + +- for linenum in xrange(clean_lines.NumLines()): ++ for linenum in range(clean_lines.NumLines()): + line = clean_lines.elided[linenum] + if not line or line[0] == '#': + continue +@@ -4440,7 +4440,7 @@ + + # include_state is modified during iteration, so we iterate over a copy of + # the keys. +- header_keys = include_state.keys() ++ header_keys = list(include_state.keys()) + for header in header_keys: + (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) + fullpath = common_path + header +@@ -4560,7 +4560,7 @@ + + RemoveMultiLineComments(filename, lines, error) + clean_lines = CleansedLines(lines) +- for line in xrange(clean_lines.NumLines()): ++ for line in range(clean_lines.NumLines()): + ProcessLine(filename, file_extension, clean_lines, line, + include_state, function_state, nesting_state, error, + extra_check_functions) +--- a/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/intersect-diffs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/intersect-diffs.py 2025-01-16 02:26:08.593512839 +0800 +@@ -69,7 +69,7 @@ + break + + if out_hunks: +- print FormatDiffHunks(out_hunks) ++ print(FormatDiffHunks(out_hunks)) + sys.exit(1) + + if __name__ == "__main__": +--- a/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/lint-hunks.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/lint-hunks.py 2025-01-16 02:26:08.593512839 +0800 +@@ -10,7 +10,7 @@ + """Performs style checking on each diff hunk.""" + import getopt + import os +-import StringIO ++import io + import subprocess + import sys + +@@ -63,17 +63,17 @@ + try: + try: + opts, args = getopt.getopt(argv[1:], SHORT_OPTIONS, LONG_OPTIONS) +- except getopt.error, msg: ++ except getopt.error as msg: + raise Usage(msg) + + # process options + for o, _ in opts: + if o in ("-h", "--help"): +- print __doc__ ++ print(__doc__) + sys.exit(0) + + if args and len(args) > 1: +- print __doc__ ++ print(__doc__) + sys.exit(0) + + # Find the fully qualified path to the root of the tree +@@ -95,7 +95,7 @@ + file_affected_line_map = {} + p = Subprocess(diff_cmd, stdout=subprocess.PIPE) + stdout = p.communicate()[0] +- for hunk in diff.ParseDiffHunks(StringIO.StringIO(stdout)): ++ for hunk in diff.ParseDiffHunks(io.StringIO(stdout)): + filename = hunk.right.filename[2:] + if filename not in file_affected_line_map: + file_affected_line_map[filename] = set() +@@ -103,7 +103,7 @@ + + # Run each affected file through cpplint + lint_failed = False +- for filename, affected_lines in file_affected_line_map.iteritems(): ++ for filename, affected_lines in file_affected_line_map.items(): + if filename.split(".")[-1] not in ("c", "h", "cc"): + continue + if filename.startswith("third_party"): +@@ -129,17 +129,17 @@ + continue + warning_line_num = int(fields[1]) + if warning_line_num in affected_lines: +- print "%s:%d:%s"%(filename, warning_line_num, +- ":".join(fields[2:])) ++ print("%s:%d:%s"%(filename, warning_line_num, ++ ":".join(fields[2:]))) + lint_failed = True + + # Set exit code if any relevant lint errors seen + if lint_failed: + return 1 + +- except Usage, err: +- print >>sys.stderr, err +- print >>sys.stderr, "for help use --help" ++ except Usage as err: ++ print(err, file=sys.stderr) ++ print("for help use --help", file=sys.stderr) + return 2 + + if __name__ == "__main__": +--- a/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/Anandan.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/Anandan.py 2025-01-16 02:26:08.593512839 +0800 +@@ -36,7 +36,7 @@ + self.c_mins = [] + self.e_maxs = [] + self.e_mins = [] +- for l in xrange(self.levels + 1): ++ for l in range(self.levels + 1): + c_max, c_min, e_max, e_min = self.get_curvature(self.cur_Is[l]) + self.c_maxs.append(c_max) + self.c_mins.append(c_min) +@@ -71,11 +71,11 @@ + c_min = np.zeros((self.num_row, self.num_col)) + e_max = np.zeros((self.num_row, self.num_col, 2)) + e_min = np.zeros((self.num_row, self.num_col, 2)) +- for r in xrange(self.num_row): +- for c in xrange(self.num_col): ++ for r in range(self.num_row): ++ for c in range(self.num_col): + h11, h12, h21, h22 = 0, 0, 0, 0 +- for i in xrange(r * self.blk_sz, r * self.blk_sz + self.blk_sz): +- for j in xrange(c * self.blk_sz, c * self.blk_sz + self.blk_sz): ++ for i in range(r * self.blk_sz, r * self.blk_sz + self.blk_sz): ++ for j in range(c * self.blk_sz, c * self.blk_sz + self.blk_sz): + if 0 <= i < self.height - 1 and 0 <= j < self.width - 1: + Ix = I[i][j + 1] - I[i][j] + Iy = I[i + 1][j] - I[i][j] +@@ -99,8 +99,8 @@ + + def get_ssd(self, cur_I, ref_I, center, mv): + ssd = 0 +- for r in xrange(int(center[0]), int(center[0]) + self.blk_sz): +- for c in xrange(int(center[1]), int(center[1]) + self.blk_sz): ++ for r in range(int(center[0]), int(center[0]) + self.blk_sz): ++ for c in range(int(center[1]), int(center[1]) + self.blk_sz): + if 0 <= r < self.height and 0 <= c < self.width: + tr, tc = r + int(mv[0]), c + int(mv[1]) + if 0 <= tr < self.height and 0 <= tc < self.width: +@@ -119,8 +119,8 @@ + def region_match(self, l, last_mvs, radius): + mvs = np.zeros((self.num_row, self.num_col, 2)) + min_ssds = np.zeros((self.num_row, self.num_col)) +- for r in xrange(self.num_row): +- for c in xrange(self.num_col): ++ for r in range(self.num_row): ++ for c in range(self.num_col): + center = np.array([r * self.blk_sz, c * self.blk_sz]) + #use overlap hierarchy policy + init_mvs = [] +@@ -134,8 +134,8 @@ + min_ssd = None + min_mv = None + for init_mv in init_mvs: +- for i in xrange(-2, 3): +- for j in xrange(-2, 3): ++ for i in range(-2, 3): ++ for j in range(-2, 3): + mv = init_mv + np.array([i, j]) * radius + ssd = self.get_ssd(self.cur_Is[l], self.ref_Is[l], center, mv) + if min_ssd is None or ssd < min_ssd: +@@ -159,8 +159,8 @@ + c_min = self.c_mins[l] + e_max = self.e_maxs[l] + e_min = self.e_mins[l] +- for r in xrange(self.num_row): +- for c in xrange(self.num_col): ++ for r in range(self.num_row): ++ for c in range(self.num_col): + w_max = c_max[r, c] / ( + self.k1 + self.k2 * min_ssds[r, c] + self.k3 * c_max[r, c]) + w_min = c_min[r, c] / ( +@@ -182,12 +182,12 @@ + + def motion_field_estimation(self): + last_mvs = None +- for l in xrange(self.levels, -1, -1): ++ for l in range(self.levels, -1, -1): + mvs, min_ssds = self.region_match(l, last_mvs, 2**l) + uvs = np.zeros(mvs.shape) +- for _ in xrange(self.max_iter): ++ for _ in range(self.max_iter): + uvs = self.smooth(uvs, mvs, min_ssds, l) + last_mvs = uvs +- for r in xrange(self.num_row): +- for c in xrange(self.num_col): ++ for r in range(self.num_row): ++ for c in range(self.num_col): + self.mf[r, c] = uvs[r, c] +--- a/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/Exhaust.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/Exhaust.py 2025-01-16 02:26:08.593512839 +0800 +@@ -44,8 +44,8 @@ + ref_x = cur_x + ref_y = cur_y + #search all validate positions and select the one with minimum distortion +- for y in xrange(cur_y - self.wnd_sz, cur_y + self.wnd_sz): +- for x in xrange(cur_x - self.wnd_sz, cur_x + self.wnd_sz): ++ for y in range(cur_y - self.wnd_sz, cur_y + self.wnd_sz): ++ for x in range(cur_x - self.wnd_sz, cur_x + self.wnd_sz): + if 0 <= x < self.width - self.blk_sz and 0 <= y < self.height - self.blk_sz: + loss = self.block_dist(cur_r, cur_c, [y - cur_y, x - cur_x], + self.metric) +@@ -56,8 +56,8 @@ + return ref_x, ref_y + + def motion_field_estimation(self): +- for i in xrange(self.num_row): +- for j in xrange(self.num_col): ++ for i in range(self.num_row): ++ for j in range(self.num_col): + ref_x, ref_y = self.search(i, j) + self.mf[i, j] = np.array( + [ref_y - i * self.blk_sz, ref_x - j * self.blk_sz]) +@@ -119,8 +119,8 @@ + ref_y = cur_y + #search all validate positions and select the one with minimum distortion + # as well as weighted neighbor loss +- for y in xrange(cur_y - self.wnd_sz, cur_y + self.wnd_sz): +- for x in xrange(cur_x - self.wnd_sz, cur_x + self.wnd_sz): ++ for y in range(cur_y - self.wnd_sz, cur_y + self.wnd_sz): ++ for x in range(cur_x - self.wnd_sz, cur_x + self.wnd_sz): + if 0 <= x < self.width - self.blk_sz and 0 <= y < self.height - self.blk_sz: + dist_loss = self.block_dist(cur_r, cur_c, [y - cur_y, x - cur_x], + self.metric) +@@ -133,8 +133,8 @@ + return ref_x, ref_y + + def motion_field_estimation(self): +- for i in xrange(self.num_row): +- for j in xrange(self.num_col): ++ for i in range(self.num_row): ++ for j in range(self.num_col): + ref_x, ref_y = self.search(i, j) + self.mf[i, j] = np.array( + [ref_y - i * self.blk_sz, ref_x - j * self.blk_sz]) +@@ -178,14 +178,14 @@ + + def getFeatureScore(self): + fs = np.zeros((self.num_row, self.num_col)) +- for r in xrange(self.num_row): +- for c in xrange(self.num_col): ++ for r in range(self.num_row): ++ for c in range(self.num_col): + IxIx = 0 + IyIy = 0 + IxIy = 0 + #get ssd surface +- for x in xrange(self.blk_sz - 1): +- for y in xrange(self.blk_sz - 1): ++ for x in range(self.blk_sz - 1): ++ for y in range(self.blk_sz - 1): + ox = c * self.blk_sz + x + oy = r * self.blk_sz + y + Ix = self.cur_yuv[oy, ox + 1, 0] - self.cur_yuv[oy, ox, 0] +@@ -214,8 +214,8 @@ + ref_x = cur_x + ref_y = cur_y + #search all validate positions and select the one with minimum distortion +- for y in xrange(cur_y - self.wnd_sz, cur_y + self.wnd_sz): +- for x in xrange(cur_x - self.wnd_sz, cur_x + self.wnd_sz): ++ for y in range(cur_y - self.wnd_sz, cur_y + self.wnd_sz): ++ for x in range(cur_x - self.wnd_sz, cur_x + self.wnd_sz): + if 0 <= x < self.width - self.blk_sz and 0 <= y < self.height - self.blk_sz: + loss = self.block_dist(cur_r, cur_c, [y - cur_y, x - cur_x], + self.metric) +@@ -231,8 +231,8 @@ + + def smooth(self, uvs, mvs): + sm_uvs = np.zeros(uvs.shape) +- for r in xrange(self.num_row): +- for c in xrange(self.num_col): ++ for r in range(self.num_row): ++ for c in range(self.num_col): + avg_uv = np.array([0.0, 0.0]) + for i, j in {(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)}: + if 0 <= i < self.num_row and 0 <= j < self.num_col: +@@ -248,12 +248,12 @@ + def motion_field_estimation(self): + #get matching results + mvs = np.zeros(self.mf.shape) +- for r in xrange(self.num_row): +- for c in xrange(self.num_col): ++ for r in range(self.num_row): ++ for c in range(self.num_col): + ref_x, ref_y = self.search(r, c) + mvs[r, c] = np.array([ref_y - r * self.blk_sz, ref_x - c * self.blk_sz]) + #add smoothness constraint + uvs = np.zeros(self.mf.shape) +- for _ in xrange(self.max_iter): ++ for _ in range(self.max_iter): + uvs = self.smooth(uvs, mvs) + self.mf = uvs +--- a/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/GroundTruth.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/GroundTruth.py 2025-01-16 02:26:08.593512839 +0800 +@@ -33,9 +33,9 @@ + if gt_path: + with open(gt_path) as gt_file: + lines = gt_file.readlines() +- for i in xrange(len(lines)): ++ for i in range(len(lines)): + info = lines[i].split(';') +- for j in xrange(len(info)): ++ for j in range(len(info)): + x, y = info[j].split(',') + #-, - stands for nothing + if x == '-' or y == '-': +--- a/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/HornSchunck.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/HornSchunck.py 2025-01-16 02:26:08.593512839 +0800 +@@ -45,8 +45,8 @@ + cur_I = np.zeros((self.num_row, self.num_col)) + ref_I = np.zeros((self.num_row, self.num_col)) + #use average intensity as block's intensity +- for i in xrange(self.num_row): +- for j in xrange(self.num_col): ++ for i in range(self.num_row): ++ for j in range(self.num_col): + r = i * self.blk_sz + c = j * self.blk_sz + cur_I[i, j] = np.mean(self.cur_yuv[r:r + self.blk_sz, c:c + self.blk_sz, +@@ -64,8 +64,8 @@ + Iy = np.zeros((self.num_row, self.num_col)) + It = np.zeros((self.num_row, self.num_col)) + sz = self.blk_sz +- for i in xrange(self.num_row - 1): +- for j in xrange(self.num_col - 1): ++ for i in range(self.num_row - 1): ++ for j in range(self.num_col - 1): + """ + Ix: + (i ,j) <--- (i ,j+1) +@@ -96,8 +96,8 @@ + Iy[i, j] /= count + count = 0 + #It: +- for r in xrange(i, i + 2): +- for c in xrange(j, j + 2): ++ for r in range(i, i + 2): ++ for c in range(j, j + 2): + if 0 <= r < self.num_row and 0 <= c < self.num_col: + It[i, j] += (self.ref_I[r, c] - self.cur_I[r, c]) + count += 1 +@@ -118,8 +118,8 @@ + | | | + 1/12 --- 1/6 --- 1/12 + """ +- for i in xrange(self.num_row): +- for j in xrange(self.num_col): ++ for i in range(self.num_row): ++ for j in range(self.num_col): + for r, c in {(-1, 0), (1, 0), (0, -1), (0, 1)}: + if 0 <= i + r < self.num_row and 0 <= j + c < self.num_col: + avg[i, j] += self.mf[i + r, j + c] / 6.0 +@@ -151,8 +151,8 @@ + + N = 2 * self.num_row * self.num_col + b = np.zeros((N, 1)) +- for i in xrange(self.num_row): +- for j in xrange(self.num_col): ++ for i in range(self.num_row): ++ for j in range(self.num_col): + """(IxIx+alpha^2)u+IxIy.v-alpha^2~u IxIy.u+(IyIy+alpha^2)v-alpha^2~v""" + u_idx = i * 2 * self.num_col + 2 * j + v_idx = u_idx + 1 +@@ -206,7 +206,7 @@ + M_inv = inv(M) + uv = M_inv.dot(b) + +- for i in xrange(self.num_row): +- for j in xrange(self.num_col): ++ for i in range(self.num_row): ++ for j in range(self.num_col): + self.mf[i, j, 0] = uv[i * 2 * self.num_col + 2 * j + 1, 0] * self.blk_sz + self.mf[i, j, 1] = uv[i * 2 * self.num_col + 2 * j, 0] * self.blk_sz +--- a/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/MotionEST.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/MotionEST.py 2025-01-16 02:26:08.593512839 +0800 +@@ -73,8 +73,8 @@ + def distortion(self, mask=None, metric=MSE): + loss = 0 + count = 0 +- for i in xrange(self.num_row): +- for j in xrange(self.num_col): ++ for i in range(self.num_row): ++ for j in range(self.num_col): + if mask is not None and mask[i, j]: + continue + loss += self.block_dist(i, j, self.mf[i, j], metric) +@@ -88,8 +88,8 @@ + count = 0 + gt = ground_truth.mf + mask = ground_truth.mask +- for i in xrange(self.num_row): +- for j in xrange(self.num_col): ++ for i in range(self.num_row): ++ for j in range(self.num_col): + if mask is not None and mask[i][j]: + continue + loss += LA.norm(gt[i, j] - self.mf[i, j]) +--- a/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/SearchSmooth.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/SearchSmooth.py 2025-01-16 02:26:08.593512839 +0800 +@@ -38,10 +38,10 @@ + + def getRefLocalDiff(self, mvs): + m, n = self.num_row, self.num_col +- localDiff = [[] for _ in xrange(m)] ++ localDiff = [[] for _ in range(m)] + blk_sz = self.blk_sz +- for r in xrange(m): +- for c in xrange(n): ++ for r in range(m): ++ for c in range(n): + I_row = 0 + I_col = 0 + #get ssd surface +@@ -78,8 +78,8 @@ + def smooth(self, uvs, mvs): + sm_uvs = np.zeros(uvs.shape) + blk_sz = self.blk_sz +- for r in xrange(self.num_row): +- for c in xrange(self.num_col): ++ for r in range(self.num_row): ++ for c in range(self.num_col): + nb_uv = np.array([0.0, 0.0]) + for i, j in {(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)}: + if 0 <= i < self.num_row and 0 <= j < self.num_col: +@@ -112,7 +112,7 @@ + mvs = self.search.mf + #add smoothness constraint + uvs = mvs / self.blk_sz +- for _ in xrange(self.max_iter): ++ for _ in range(self.max_iter): + uvs = self.smooth(uvs, mvs) + self.mf = uvs * self.blk_sz + +@@ -144,10 +144,10 @@ + + def getRefLocalDiff(self, mvs): + m, n = self.num_row, self.num_col +- localDiff = [[] for _ in xrange(m)] ++ localDiff = [[] for _ in range(m)] + blk_sz = self.blk_sz +- for r in xrange(m): +- for c in xrange(n): ++ for r in range(m): ++ for c in range(n): + I_row = 0 + I_col = 0 + #get ssd surface +@@ -184,8 +184,8 @@ + def smooth(self, uvs, mvs): + sm_uvs = np.zeros(uvs.shape) + blk_sz = self.blk_sz +- for r in xrange(self.num_row): +- for c in xrange(self.num_col): ++ for r in range(self.num_row): ++ for c in range(self.num_col): + nb_uv = np.array([0.0, 0.0]) + for i, j in {(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)}: + if 0 <= i < self.num_row and 0 <= j < self.num_col: +@@ -216,6 +216,6 @@ + mvs = self.search.mf + #add smoothness constraint + uvs = mvs / self.blk_sz +- for _ in xrange(self.max_iter): ++ for _ in range(self.max_iter): + uvs = self.smooth(uvs, mvs) + self.mf = uvs * self.blk_sz +--- a/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/Util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/MotionEST/Util.py 2025-01-16 02:26:08.593512839 +0800 +@@ -29,16 +29,16 @@ + height = img_rgba.size[1] + num_row = height // blk_sz + num_col = width // blk_sz +- for i in xrange(num_row): ++ for i in range(num_row): + left = (0, i * blk_sz) + right = (width, i * blk_sz) + draw.line([left, right], fill=(0, 0, 255, 255)) +- for j in xrange(num_col): ++ for j in range(num_col): + up = (j * blk_sz, 0) + down = (j * blk_sz, height) + draw.line([up, down], fill=(0, 0, 255, 255)) +- for i in xrange(num_row): +- for j in xrange(num_col): ++ for i in range(num_row): ++ for j in range(num_col): + center = (j * blk_sz + 0.5 * blk_sz, i * blk_sz + 0.5 * blk_sz) + """mf[i,j][0] is the row shift and mf[i,j][1] is the column shift In PIL coordinates, head[0] is x (column shift) and head[1] is y (row shift).""" + head = (center[0] + mf[i, j][1], center[1] + mf[i, j][0]) +--- a/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/genY4M/genY4M.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/3D-Reconstruction/genY4M/genY4M.py 2025-01-16 02:26:08.593512839 +0800 +@@ -44,9 +44,9 @@ + for f in frames: + y4m.write("FRAME\n") + px = f.load() +- for k in xrange(3): +- for i in xrange(0, f.height, r_step[k]): +- for j in xrange(0, f.width, c_step[k]): ++ for k in range(3): ++ for i in range(0, f.height, r_step[k]): ++ for j in range(0, f.width, c_step[k]): + yuv = px[j, i] + y4m.write(chr(yuv[k])) + +@@ -66,16 +66,16 @@ + else: + frames.append((idx, img)) + if len(frames) == 0: +- print("No frames in directory: " + args.frame_path) ++ print(("No frames in directory: " + args.frame_path)) + sys.exit() + print("----------------------Y4M Info----------------------") +- print("width: %d" % frames[0][1].width) +- print("height: %d" % frames[0][1].height) +- print("#frame: %d" % len(frames)) +- print("frame rate: %s" % args.frame_rate) +- print("interlacing: %s" % args.interlacing) +- print("pixel ratio: %s" % args.pix_ratio) +- print("color space: %s" % args.color_space) ++ print(("width: %d" % frames[0][1].width)) ++ print(("height: %d" % frames[0][1].height)) ++ print(("#frame: %d" % len(frames))) ++ print(("frame rate: %s" % args.frame_rate)) ++ print(("interlacing: %s" % args.interlacing)) ++ print(("pixel ratio: %s" % args.pix_ratio)) ++ print(("color space: %s" % args.color_space)) + print("----------------------------------------------------") + + print("Generating ...") +--- a/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/non_greedy_mv/non_greedy_mv.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libvpx/source/libvpx/tools/non_greedy_mv/non_greedy_mv.py 2025-01-16 02:26:08.593512839 +0800 +@@ -185,11 +185,11 @@ + im = axes[1][1].imshow(mv_mode_arr) + #axes[1][1].figure.colorbar(im, ax=axes[1][1]) + +- print rf_idx, frame_idx, ref_frame_idx, gf_frame_offset, ref_gf_frame_offset, len(mv_ls) ++ print(rf_idx, frame_idx, ref_frame_idx, gf_frame_offset, ref_gf_frame_offset, len(mv_ls)) + + flatten_mv_mode = mv_mode_arr.flatten() + zero_mv_count = sum(flatten_mv_mode == 0); + new_mv_count = sum(flatten_mv_mode == 1); + ref_mv_count = sum(flatten_mv_mode == 2) + sum(flatten_mv_mode == 3); +- print zero_mv_count, new_mv_count, ref_mv_count ++ print(zero_mv_count, new_mv_count, ref_mv_count) + plt.show() +--- a/src/3rdparty/chromium/third_party/libxml/chromium/roll.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libxml/chromium/roll.py 2025-01-16 02:26:08.593512839 +0800 +@@ -236,7 +236,7 @@ + + def __exit__(self, exc_type, exc_value, traceback): + if exc_value: +- print('was in %s; %s before that' % (self.path, self.prev_path)) ++ print(('was in %s; %s before that' % (self.path, self.prev_path))) + os.chdir(self.prev_path) + + +@@ -330,14 +330,14 @@ + with WorkingDir(temp_src_path): + os.remove('.gitignore') + for patch in PATCHES: +- print('applying %s' % patch) ++ print(('applying %s' % patch)) + subprocess.check_call( + 'patch -p1 --fuzz=0 < %s' % os.path.join( + src_path, THIRD_PARTY_LIBXML_SRC, '..', 'chromium', patch), + shell=True) + + with WorkingDir(temp_config_path): +- print('../src/autogen.sh %s' % XML_CONFIGURE_OPTIONS) ++ print(('../src/autogen.sh %s' % XML_CONFIGURE_OPTIONS)) + subprocess.check_call(['../src/autogen.sh'] + XML_CONFIGURE_OPTIONS) + subprocess.check_call(['make', 'dist-all']) + +@@ -354,7 +354,7 @@ + # Export the upstream git repo. + try: + temp_dir = tempfile.mkdtemp() +- print('temporary directory: %s' % temp_dir) ++ print(('temporary directory: %s' % temp_dir)) + + commit, tar_file = prepare_libxml_distribution( + src_path, libxml2_repo_path, temp_dir) +--- a/src/3rdparty/chromium/third_party/libxml/src/check-relaxng-test-suite.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libxml/src/check-relaxng-test-suite.py 2025-01-16 02:26:08.593512839 +0800 +@@ -4,7 +4,7 @@ + import os + try: + # Python 2 +- from StringIO import StringIO ++ from io import StringIO + except ImportError: + # Python 3 + from io import StringIO +@@ -81,7 +81,7 @@ + while child != None: + if child.type != 'text': + instance = instance + child.serialize() +- child = child.next ++ child = child.__next__ + + try: + doc = libxml2.parseDoc(instance) +@@ -122,7 +122,7 @@ + while child != None: + if child.type != 'text': + instance = instance + child.serialize() +- child = child.next ++ child = child.__next__ + + try: + doc = libxml2.parseDoc(instance) +@@ -162,7 +162,7 @@ + while child != None: + if child.type != 'text': + schema = schema + child.serialize() +- child = child.next ++ child = child.__next__ + + try: + rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema)) +@@ -188,7 +188,7 @@ + while child != None: + if child.type != 'text': + schema = schema + child.serialize() +- child = child.next ++ child = child.__next__ + + try: + rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema)) +@@ -231,7 +231,7 @@ + while child != None: + if child.type != 'text': + res = res + child.serialize() +- child = child.next ++ child = child.__next__ + resources[name] = res + + # +@@ -272,7 +272,7 @@ + nb_schemas_tests, node.lineNo(), sections)) + resources = {} + if debug: +- print("test %d line %d" % (nb_schemas_tests, node.lineNo())) ++ print(("test %d line %d" % (nb_schemas_tests, node.lineNo()))) + + dirs = node.xpathEval('dir') + for dir in dirs: +@@ -284,16 +284,16 @@ + tsts = node.xpathEval('incorrect') + if tsts != []: + if len(tsts) != 1: +- print("warning test line %d has more than one example" %(node.lineNo())) ++ print(("warning test line %d has more than one example" %(node.lineNo()))) + schema = handle_incorrect(tsts[0]) + else: + tsts = node.xpathEval('correct') + if tsts != []: + if len(tsts) != 1: +- print("warning test line %d has more than one example"% (node.lineNo())) ++ print(("warning test line %d has more than one example"% (node.lineNo()))) + schema = handle_correct(tsts[0]) + else: +- print("warning line %d has no nor child" % (node.lineNo())) ++ print(("warning line %d has no nor child" % (node.lineNo()))) + + nb_schemas_tests = nb_schemas_tests + 1; + +@@ -340,7 +340,7 @@ + for section in sections: + msg = msg + section.content + " " + if quiet == 0: +- print("Tests for section %s" % (msg)) ++ print(("Tests for section %s" % (msg))) + for test in node.xpathEval('testCase'): + handle_testCase(test) + for test in node.xpathEval('testSuite'): +@@ -351,17 +351,17 @@ + msg = "" + for section in sections: + msg = msg + section.content + " " +- print("Result of tests for section %s" % (msg)) ++ print(("Result of tests for section %s" % (msg))) + if nb_schemas_tests != old_schemas_tests: +- print("found %d test schemas: %d success %d failures" % ( ++ print(("found %d test schemas: %d success %d failures" % ( + nb_schemas_tests - old_schemas_tests, + nb_schemas_success - old_schemas_success, +- nb_schemas_failed - old_schemas_failed)) ++ nb_schemas_failed - old_schemas_failed))) + if nb_instances_tests != old_instances_tests: +- print("found %d test instances: %d success %d failures" % ( ++ print(("found %d test instances: %d success %d failures" % ( + nb_instances_tests - old_instances_tests, + nb_instances_success - old_instances_success, +- nb_instances_failed - old_instances_failed)) ++ nb_instances_failed - old_instances_failed))) + # + # Parse the conf file + # +@@ -370,7 +370,7 @@ + libxml2.setEntityLoader(resolver) + root = testsuite.getRootElement() + if root.name != 'testSuite': +- print("%s doesn't start with a testSuite element, aborting" % (CONF)) ++ print(("%s doesn't start with a testSuite element, aborting" % (CONF))) + sys.exit(1) + if quiet == 0: + print("Running Relax NG testsuite") +@@ -379,11 +379,11 @@ + if quiet == 0: + print("\nTOTAL:\n") + if quiet == 0 or nb_schemas_failed != 0: +- print("found %d test schemas: %d success %d failures" % ( +- nb_schemas_tests, nb_schemas_success, nb_schemas_failed)) ++ print(("found %d test schemas: %d success %d failures" % ( ++ nb_schemas_tests, nb_schemas_success, nb_schemas_failed))) + if quiet == 0 or nb_instances_failed != 0: +- print("found %d test instances: %d success %d failures" % ( +- nb_instances_tests, nb_instances_success, nb_instances_failed)) ++ print(("found %d test instances: %d success %d failures" % ( ++ nb_instances_tests, nb_instances_success, nb_instances_failed))) + + testsuite.freeDoc() + +@@ -394,5 +394,5 @@ + if quiet == 0: + print("OK") + else: +- print("Memory leak %d bytes" % (libxml2.debugMemory(1))) ++ print(("Memory leak %d bytes" % (libxml2.debugMemory(1)))) + libxml2.dumpMemory() +--- a/src/3rdparty/chromium/third_party/libxml/src/check-relaxng-test-suite2.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libxml/src/check-relaxng-test-suite2.py 2025-01-16 02:26:08.593512839 +0800 +@@ -4,7 +4,7 @@ + import os + try: + # Python 2 +- from StringIO import StringIO ++ from io import StringIO + except ImportError: + # Python 3 + from io import StringIO +@@ -70,7 +70,7 @@ + while child != None: + if child.type != 'text': + instance = instance + child.serialize() +- child = child.next ++ child = child.__next__ + + # mem = libxml2.debugMemory(1); + try: +@@ -86,7 +86,7 @@ + return + + if debug: +- print("instance line %d" % (node.lineNo())) ++ print(("instance line %d" % (node.lineNo()))) + + try: + ctxt = schema.relaxNGNewValidCtxt() +@@ -123,7 +123,7 @@ + while child != None: + if child.type != 'text': + instance = instance + child.serialize() +- child = child.next ++ child = child.__next__ + + # mem = libxml2.debugMemory(1); + +@@ -139,7 +139,7 @@ + return + + if debug: +- print("instance line %d" % (node.lineNo())) ++ print(("instance line %d" % (node.lineNo()))) + + try: + ctxt = schema.relaxNGNewValidCtxt() +@@ -176,7 +176,7 @@ + while child != None: + if child.type != 'text': + schema = schema + child.serialize() +- child = child.next ++ child = child.__next__ + + try: + rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema)) +@@ -202,7 +202,7 @@ + while child != None: + if child.type != 'text': + schema = schema + child.serialize() +- child = child.next ++ child = child.__next__ + + try: + rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema)) +@@ -245,7 +245,7 @@ + while child != None: + if child.type != 'text': + res = res + child.serialize() +- child = child.next ++ child = child.__next__ + resources[name] = res + + # +@@ -286,7 +286,7 @@ + nb_schemas_tests, node.lineNo(), sections)) + resources = {} + if debug: +- print("test %d line %d" % (nb_schemas_tests, node.lineNo())) ++ print(("test %d line %d" % (nb_schemas_tests, node.lineNo()))) + + dirs = node.xpathEval('dir') + for dir in dirs: +@@ -298,16 +298,16 @@ + tsts = node.xpathEval('incorrect') + if tsts != []: + if len(tsts) != 1: +- print("warning test line %d has more than one example" %(node.lineNo())) ++ print(("warning test line %d has more than one example" %(node.lineNo()))) + schema = handle_incorrect(tsts[0]) + else: + tsts = node.xpathEval('correct') + if tsts != []: + if len(tsts) != 1: +- print("warning test line %d has more than one example"% (node.lineNo())) ++ print(("warning test line %d has more than one example"% (node.lineNo()))) + schema = handle_correct(tsts[0]) + else: +- print("warning line %d has no nor child" % (node.lineNo())) ++ print(("warning line %d has no nor child" % (node.lineNo()))) + + nb_schemas_tests = nb_schemas_tests + 1; + +@@ -353,7 +353,7 @@ + for section in sections: + msg = msg + section.content + " " + if quiet == 0: +- print("Tests for section %s" % (msg)) ++ print(("Tests for section %s" % (msg))) + for test in node.xpathEval('testCase'): + handle_testCase(test) + for test in node.xpathEval('testSuite'): +@@ -364,17 +364,17 @@ + msg = "" + for section in sections: + msg = msg + section.content + " " +- print("Result of tests for section %s" % (msg)) ++ print(("Result of tests for section %s" % (msg))) + if nb_schemas_tests != old_schemas_tests: +- print("found %d test schemas: %d success %d failures" % ( ++ print(("found %d test schemas: %d success %d failures" % ( + nb_schemas_tests - old_schemas_tests, + nb_schemas_success - old_schemas_success, +- nb_schemas_failed - old_schemas_failed)) ++ nb_schemas_failed - old_schemas_failed))) + if nb_instances_tests != old_instances_tests: +- print("found %d test instances: %d success %d failures" % ( ++ print(("found %d test instances: %d success %d failures" % ( + nb_instances_tests - old_instances_tests, + nb_instances_success - old_instances_success, +- nb_instances_failed - old_instances_failed)) ++ nb_instances_failed - old_instances_failed))) + # + # Parse the conf file + # +@@ -393,7 +393,7 @@ + libxml2.setEntityLoader(resolver) + root = testsuite.getRootElement() + if root.name != 'testSuite': +- print("%s doesn't start with a testSuite element, aborting" % (CONF)) ++ print(("%s doesn't start with a testSuite element, aborting" % (CONF))) + sys.exit(1) + if quiet == 0: + print("Running Relax NG testsuite") +@@ -402,11 +402,11 @@ + if quiet == 0: + print("\nTOTAL:\n") + if quiet == 0 or nb_schemas_failed != 0: +- print("found %d test schemas: %d success %d failures" % ( +- nb_schemas_tests, nb_schemas_success, nb_schemas_failed)) ++ print(("found %d test schemas: %d success %d failures" % ( ++ nb_schemas_tests, nb_schemas_success, nb_schemas_failed))) + if quiet == 0 or nb_instances_failed != 0: +- print("found %d test instances: %d success %d failures" % ( +- nb_instances_tests, nb_instances_success, nb_instances_failed)) ++ print(("found %d test instances: %d success %d failures" % ( ++ nb_instances_tests, nb_instances_success, nb_instances_failed))) + + log.close() + testsuite.freeDoc() +@@ -418,5 +418,5 @@ + if quiet == 0: + print("OK") + else: +- print("Memory leak %d bytes" % (libxml2.debugMemory(1))) ++ print(("Memory leak %d bytes" % (libxml2.debugMemory(1)))) + libxml2.dumpMemory() +--- a/src/3rdparty/chromium/third_party/libxml/src/check-xinclude-test-suite.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libxml/src/check-xinclude-test-suite.py 2025-01-16 02:26:08.593512839 +0800 +@@ -48,7 +48,7 @@ + error_nr = 0 + error_msg = '' + +- print("testXInclude(%s, %s)" % (filename, id)) ++ print(("testXInclude(%s, %s)" % (filename, id))) + return 1 + + def runTest(test, basedir): +@@ -64,20 +64,20 @@ + id = test.prop('id') + type = test.prop('type') + if uri is None: +- print("Test without ID:", uri) ++ print(("Test without ID:", uri)) + return -1 + if id is None: +- print("Test without URI:", id) ++ print(("Test without URI:", id)) + return -1 + if type is None: +- print("Test without URI:", id) ++ print(("Test without URI:", id)) + return -1 + if basedir != None: + URI = basedir + "/" + uri + else: + URI = uri + if os.access(URI, os.R_OK) == 0: +- print("Test %s missing: base %s uri %s" % (URI, basedir, uri)) ++ print(("Test %s missing: base %s uri %s" % (URI, basedir, uri))) + return -1 + + expected = None +@@ -93,7 +93,7 @@ + if basedir != None: + output = basedir + "/" + output + if os.access(output, os.R_OK) == 0: +- print("Result for %s missing: %s" % (id, output)) ++ print(("Result for %s missing: %s" % (id, output))) + output = None + else: + try: +@@ -101,7 +101,7 @@ + expected = f.read() + outputfile = output + except: +- print("Result for %s unreadable: %s" % (id, output)) ++ print(("Result for %s unreadable: %s" % (id, output))) + + try: + # print("testing %s" % (URI)) +@@ -113,13 +113,13 @@ + if res >= 0 and expected != None: + result = doc.serialize() + if result != expected: +- print("Result for %s differs" % (id)) ++ print(("Result for %s differs" % (id))) + open("xinclude.res", "w").write(result) + diff = os.popen("diff %s xinclude.res" % outputfile).read() + + doc.freeDoc() + else: +- print("Failed to parse %s" % (URI)) ++ print(("Failed to parse %s" % (URI))) + res = -1 + + +@@ -130,24 +130,24 @@ + test_succeed = test_succeed + 1 + elif res == 0: + test_failed = test_failed + 1 +- print("Test %s: no substitution done ???" % (id)) ++ print(("Test %s: no substitution done ???" % (id))) + elif res < 0: + test_error = test_error + 1 +- print("Test %s: failed valid XInclude processing" % (id)) ++ print(("Test %s: failed valid XInclude processing" % (id))) + elif type == 'error': + if res > 0: + test_error = test_error + 1 +- print("Test %s: failed to detect invalid XInclude processing" % (id)) ++ print(("Test %s: failed to detect invalid XInclude processing" % (id))) + elif res == 0: + test_failed = test_failed + 1 +- print("Test %s: Invalid but no substitution done" % (id)) ++ print(("Test %s: Invalid but no substitution done" % (id))) + elif res < 0: + test_succeed = test_succeed + 1 + elif type == 'optional': + if res > 0: + test_succeed = test_succeed + 1 + else: +- print("Test %s: failed optional test" % (id)) ++ print(("Test %s: failed optional test" % (id))) + + # Log the ontext + if res != 1: +@@ -171,7 +171,7 @@ + def runTestCases(case): + creator = case.prop('creator') + if creator != None: +- print("=>", creator) ++ print(("=>", creator)) + base = case.getBase(None) + basedir = case.prop('basedir') + if basedir != None: +@@ -182,11 +182,11 @@ + runTest(test, base) + if test.name == 'testcases': + runTestCases(test) +- test = test.next ++ test = test.__next__ + + conf = libxml2.parseFile(CONF) + if conf is None: +- print("Unable to load %s" % CONF) ++ print(("Unable to load %s" % CONF)) + sys.exit(1) + + testsuite = conf.getRootElement() +@@ -208,13 +208,13 @@ + old_test_failed = test_failed + old_test_error = test_error + runTestCases(case) +- print(" Ran %d tests: %d succeeded, %d failed and %d generated an error" % ( ++ print((" Ran %d tests: %d succeeded, %d failed and %d generated an error" % ( + test_nr - old_test_nr, test_succeed - old_test_succeed, +- test_failed - old_test_failed, test_error - old_test_error)) +- case = case.next ++ test_failed - old_test_failed, test_error - old_test_error))) ++ case = case.__next__ + + conf.freeDoc() + log.close() + +-print("Ran %d tests: %d succeeded, %d failed and %d generated an error in %.2f s." % ( +- test_nr, test_succeed, test_failed, test_error, time.time() - start)) ++print(("Ran %d tests: %d succeeded, %d failed and %d generated an error in %.2f s." % ( ++ test_nr, test_succeed, test_failed, test_error, time.time() - start))) +--- a/src/3rdparty/chromium/third_party/libxml/src/check-xml-test-suite.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libxml/src/check-xml-test-suite.py 2025-01-16 02:26:08.594596154 +0800 +@@ -89,7 +89,7 @@ + if doc != None: + doc.freeDoc() + if ret == 0 or ctxt.wellFormed() != 0: +- print("%s: error: Well Formedness error not detected" % (id)) ++ print(("%s: error: Well Formedness error not detected" % (id))) + log.write("%s: error: Well Formedness error not detected\n" % (id)) + return 0 + return 1 +@@ -115,7 +115,7 @@ + if doc != None: + doc.freeDoc() + if ret == 0 or ctxt.wellFormed() != 0: +- print("%s: error: Well Formedness error not detected" % (id)) ++ print(("%s: error: Well Formedness error not detected" % (id))) + log.write("%s: error: Well Formedness error not detected\n" % (id)) + return 0 + return 1 +@@ -142,7 +142,7 @@ + if doc != None: + doc.freeDoc() + if ret == 0 or ctxt.wellFormed() != 0: +- print("%s: error: Well Formedness error not detected" % (id)) ++ print(("%s: error: Well Formedness error not detected" % (id))) + log.write("%s: error: Well Formedness error not detected\n" % (id)) + return 0 + return 1 +@@ -167,13 +167,13 @@ + except: + doc = None + if doc is None or ret != 0 or ctxt.wellFormed() == 0: +- print("%s: error: wrongly failed to parse the document" % (id)) ++ print(("%s: error: wrongly failed to parse the document" % (id))) + log.write("%s: error: wrongly failed to parse the document\n" % (id)) + if doc != None: + doc.freeDoc() + return 0 + if error_nr != 0: +- print("%s: warning: WF document generated an error msg" % (id)) ++ print(("%s: warning: WF document generated an error msg" % (id))) + log.write("%s: error: WF document generated an error msg\n" % (id)) + doc.freeDoc() + return 2 +@@ -202,11 +202,11 @@ + if doc != None: + doc.freeDoc() + if ctxt.wellFormed() == 0: +- print("%s: warning: failed to parse the document but accepted" % (id)) ++ print(("%s: warning: failed to parse the document but accepted" % (id))) + log.write("%s: warning: failed to parse the document but accepte\n" % (id)) + return 2 + if error_nr != 0: +- print("%s: warning: WF document generated an error msg" % (id)) ++ print(("%s: warning: WF document generated an error msg" % (id))) + log.write("%s: error: WF document generated an error msg\n" % (id)) + return 2 + return 1 +@@ -231,16 +231,16 @@ + doc = None + valid = ctxt.isValid() + if doc is None: +- print("%s: error: wrongly failed to parse the document" % (id)) ++ print(("%s: error: wrongly failed to parse the document" % (id))) + log.write("%s: error: wrongly failed to parse the document\n" % (id)) + return 0 + if valid == 1: +- print("%s: error: Validity error not detected" % (id)) ++ print(("%s: error: Validity error not detected" % (id))) + log.write("%s: error: Validity error not detected\n" % (id)) + doc.freeDoc() + return 0 + if error_nr == 0: +- print("%s: warning: Validity error not reported" % (id)) ++ print(("%s: warning: Validity error not reported" % (id))) + log.write("%s: warning: Validity error not reported\n" % (id)) + doc.freeDoc() + return 2 +@@ -267,16 +267,16 @@ + doc = None + valid = ctxt.isValid() + if doc is None: +- print("%s: error: wrongly failed to parse the document" % (id)) ++ print(("%s: error: wrongly failed to parse the document" % (id))) + log.write("%s: error: wrongly failed to parse the document\n" % (id)) + return 0 + if valid != 1: +- print("%s: error: Validity check failed" % (id)) ++ print(("%s: error: Validity check failed" % (id))) + log.write("%s: error: Validity check failed\n" % (id)) + doc.freeDoc() + return 0 + if error_nr != 0 or valid != 1: +- print("%s: warning: valid document reported an error" % (id)) ++ print(("%s: warning: valid document reported an error" % (id))) + log.write("%s: warning: valid document reported an error\n" % (id)) + doc.freeDoc() + return 2 +@@ -293,19 +293,19 @@ + uri = test.prop('URI') + id = test.prop('ID') + if uri is None: +- print("Test without ID:", uri) ++ print(("Test without ID:", uri)) + return -1 + if id is None: +- print("Test without URI:", id) ++ print(("Test without URI:", id)) + return -1 + base = test.getBase(None) + URI = libxml2.buildURI(uri, base) + if os.access(URI, os.R_OK) == 0: +- print("Test %s missing: base %s uri %s" % (URI, base, uri)) ++ print(("Test %s missing: base %s uri %s" % (URI, base, uri))) + return -1 + type = test.prop('TYPE') + if type is None: +- print("Test %s missing TYPE" % (id)) ++ print(("Test %s missing TYPE" % (id))) + return -1 + + extra = None +@@ -363,18 +363,18 @@ + profile = case.prop('PROFILE') + if profile != None and \ + profile.find("IBM XML Conformance Test Suite - Production") < 0: +- print("=>", profile) ++ print(("=>", profile)) + test = case.children + while test != None: + if test.name == 'TEST': + runTest(test) + if test.name == 'TESTCASES': + runTestCases(test) +- test = test.next ++ test = test.__next__ + + conf = loadNoentDoc(CONF) + if conf is None: +- print("Unable to load %s" % CONF) ++ print(("Unable to load %s" % CONF)) + sys.exit(1) + + testsuite = conf.getRootElement() +@@ -396,13 +396,13 @@ + old_test_failed = test_failed + old_test_error = test_error + runTestCases(case) +- print(" Ran %d tests: %d succeeded, %d failed and %d generated an error" % ( ++ print((" Ran %d tests: %d succeeded, %d failed and %d generated an error" % ( + test_nr - old_test_nr, test_succeed - old_test_succeed, +- test_failed - old_test_failed, test_error - old_test_error)) +- case = case.next ++ test_failed - old_test_failed, test_error - old_test_error))) ++ case = case.__next__ + + conf.freeDoc() + log.close() + +-print("Ran %d tests: %d succeeded, %d failed and %d generated an error in %.2f s." % ( +- test_nr, test_succeed, test_failed, test_error, time.time() - start)) ++print(("Ran %d tests: %d succeeded, %d failed and %d generated an error in %.2f s." % ( ++ test_nr, test_succeed, test_failed, test_error, time.time() - start))) +--- a/src/3rdparty/chromium/third_party/libxml/src/check-xsddata-test-suite.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libxml/src/check-xsddata-test-suite.py 2025-01-16 02:26:08.594596154 +0800 +@@ -4,7 +4,7 @@ + import os + try: + # Python 2 +- from StringIO import StringIO ++ from io import StringIO + except ImportError: + # Python 3 + from io import StringIO +@@ -69,7 +69,7 @@ + while child != None: + if child.type != 'text': + instance = instance + child.serialize() +- child = child.next ++ child = child.__next__ + + mem = libxml2.debugMemory(1); + try: +@@ -85,7 +85,7 @@ + return + + if debug: +- print("instance line %d" % (node.lineNo())) ++ print(("instance line %d" % (node.lineNo()))) + + try: + ctxt = schema.relaxNGNewValidCtxt() +@@ -96,8 +96,8 @@ + + doc.freeDoc() + if mem != libxml2.debugMemory(1): +- print("validating instance %d line %d leaks" % ( +- nb_instances_tests, node.lineNo())) ++ print(("validating instance %d line %d leaks" % ( ++ nb_instances_tests, node.lineNo()))) + + if ret != 0: + log.write("\nFailed to validate correct instance:\n-----\n") +@@ -122,7 +122,7 @@ + while child != None: + if child.type != 'text': + instance = instance + child.serialize() +- child = child.next ++ child = child.__next__ + + # mem = libxml2.debugMemory(1); + +@@ -138,7 +138,7 @@ + return + + if debug: +- print("instance line %d" % (node.lineNo())) ++ print(("instance line %d" % (node.lineNo()))) + + try: + ctxt = schema.relaxNGNewValidCtxt() +@@ -174,7 +174,7 @@ + while child != None: + if child.type != 'text': + schema = schema + child.serialize() +- child = child.next ++ child = child.__next__ + + try: + rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema)) +@@ -200,7 +200,7 @@ + while child != None: + if child.type != 'text': + schema = schema + child.serialize() +- child = child.next ++ child = child.__next__ + + try: + rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema)) +@@ -243,7 +243,7 @@ + while child != None: + if child.type != 'text': + res = res + child.serialize() +- child = child.next ++ child = child.__next__ + resources[name] = res + + # +@@ -284,7 +284,7 @@ + nb_schemas_tests, node.lineNo(), sections)) + resources = {} + if debug: +- print("test %d line %d" % (nb_schemas_tests, node.lineNo())) ++ print(("test %d line %d" % (nb_schemas_tests, node.lineNo()))) + + dirs = node.xpathEval('dir') + for dir in dirs: +@@ -296,16 +296,16 @@ + tsts = node.xpathEval('incorrect') + if tsts != []: + if len(tsts) != 1: +- print("warning test line %d has more than one example" %(node.lineNo())) ++ print(("warning test line %d has more than one example" %(node.lineNo()))) + schema = handle_incorrect(tsts[0]) + else: + tsts = node.xpathEval('correct') + if tsts != []: + if len(tsts) != 1: +- print("warning test line %d has more than one example"% (node.lineNo())) ++ print(("warning test line %d has more than one example"% (node.lineNo()))) + schema = handle_correct(tsts[0]) + else: +- print("warning line %d has no nor child" % (node.lineNo())) ++ print(("warning line %d has no nor child" % (node.lineNo()))) + + nb_schemas_tests = nb_schemas_tests + 1; + +@@ -351,7 +351,7 @@ + for section in sections: + msg = msg + section.content + " " + if quiet == 0: +- print("Tests for section %s" % (msg)) ++ print(("Tests for section %s" % (msg))) + for test in node.xpathEval('testCase'): + handle_testCase(test) + for test in node.xpathEval('testSuite'): +@@ -363,23 +363,23 @@ + msg = "" + for section in sections: + msg = msg + section.content + " " +- print("Result of tests for section %s" % (msg)) ++ print(("Result of tests for section %s" % (msg))) + elif docs != []: + msg = "" + for doc in docs: + msg = msg + doc.content + " " +- print("Result of tests for %s" % (msg)) ++ print(("Result of tests for %s" % (msg))) + + if nb_schemas_tests != old_schemas_tests: +- print("found %d test schemas: %d success %d failures" % ( ++ print(("found %d test schemas: %d success %d failures" % ( + nb_schemas_tests - old_schemas_tests, + nb_schemas_success - old_schemas_success, +- nb_schemas_failed - old_schemas_failed)) ++ nb_schemas_failed - old_schemas_failed))) + if nb_instances_tests != old_instances_tests: +- print("found %d test instances: %d success %d failures" % ( ++ print(("found %d test instances: %d success %d failures" % ( + nb_instances_tests - old_instances_tests, + nb_instances_success - old_instances_success, +- nb_instances_failed - old_instances_failed)) ++ nb_instances_failed - old_instances_failed))) + # + # Parse the conf file + # +@@ -398,18 +398,18 @@ + libxml2.setEntityLoader(resolver) + root = testsuite.getRootElement() + if root.name != 'testSuite': +- print("%s doesn't start with a testSuite element, aborting" % (CONF)) ++ print(("%s doesn't start with a testSuite element, aborting" % (CONF))) + sys.exit(1) + if quiet == 0: + print("Running Relax NG testsuite") + handle_testSuite(root) + + if quiet == 0 or nb_schemas_failed != 0: +- print("\nTOTAL:\nfound %d test schemas: %d success %d failures" % ( +- nb_schemas_tests, nb_schemas_success, nb_schemas_failed)) ++ print(("\nTOTAL:\nfound %d test schemas: %d success %d failures" % ( ++ nb_schemas_tests, nb_schemas_success, nb_schemas_failed))) + if quiet == 0 or nb_instances_failed != 0: +- print("found %d test instances: %d success %d failures" % ( +- nb_instances_tests, nb_instances_success, nb_instances_failed)) ++ print(("found %d test instances: %d success %d failures" % ( ++ nb_instances_tests, nb_instances_success, nb_instances_failed))) + + testsuite.freeDoc() + +@@ -420,5 +420,5 @@ + if quiet == 0: + print("OK") + else: +- print("Memory leak %d bytes" % (libxml2.debugMemory(1))) ++ print(("Memory leak %d bytes" % (libxml2.debugMemory(1)))) + libxml2.dumpMemory() +--- a/src/3rdparty/chromium/third_party/libxml/src/genUnicode.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libxml/src/genUnicode.py 2025-01-16 02:26:08.594596154 +0800 +@@ -43,7 +43,7 @@ + try: + blocks = open(blockfile, "r") + except: +- print("Missing %s, aborting ..." % blockfile) ++ print(("Missing %s, aborting ..." % blockfile)) + sys.exit(1) + + for line in blocks.readlines(): +@@ -59,7 +59,7 @@ + name = fields[1].strip() + name = name.replace(' ', '') + except: +- print("Failed to process line: %s" % (line)) ++ print(("Failed to process line: %s" % (line))) + continue + start = "0x" + start + end = "0x" + end +@@ -68,7 +68,7 @@ + except: + BlockNames[name] = [(start, end)] + blocks.close() +-print("Parsed %d blocks descriptions" % (len(BlockNames.keys()))) ++print(("Parsed %d blocks descriptions" % (len(list(BlockNames.keys()))))) + + for block in blockAliases: + alias = block.split(':') +@@ -80,7 +80,7 @@ + for r in BlockNames[comp]: + BlockNames[alias[0]].append(r) + else: +- print("Alias %s: %s not in Blocks" % (alias[0], comp)) ++ print(("Alias %s: %s not in Blocks" % (alias[0], comp))) + continue + + # +@@ -96,7 +96,7 @@ + try: + data = open(catfile, "r") + except: +- print("Missing %s, aborting ..." % catfile) ++ print(("Missing %s, aborting ..." % catfile)) + sys.exit(1) + + nbchar = 0; +@@ -122,7 +122,7 @@ + point = point[1:] + name = fields[2] + except: +- print("Failed to process line: %s" % (line)) ++ print(("Failed to process line: %s" % (line))) + continue + + nbchar = nbchar + 1 +@@ -133,7 +133,7 @@ + try: + Categories[name] = [value] + except: +- print("Failed to process line: %s" % (line)) ++ print(("Failed to process line: %s" % (line))) + # update "general category" name + try: + Categories[name[0]].append(value) +@@ -141,16 +141,16 @@ + try: + Categories[name[0]] = [value] + except: +- print("Failed to process line: %s" % (line)) ++ print(("Failed to process line: %s" % (line))) + + blocks.close() +-print("Parsed %d char generating %d categories" % (nbchar, len(Categories.keys()))) ++print(("Parsed %d char generating %d categories" % (nbchar, len(list(Categories.keys()))))) + + # + # The data is now all read. Time to process it into a more useful form. + # + # reduce the number list into ranges +-for cat in Categories.keys(): ++for cat in list(Categories.keys()): + list = Categories[cat] + start = -1 + prev = -1 +--- a/src/3rdparty/chromium/third_party/libxml/src/gentest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libxml/src/gentest.py 2025-01-16 02:26:08.594596154 +0800 +@@ -492,8 +492,8 @@ + test.close() + sys.exit(0) + +-print("Scanned testapi.c: found %d parameters types and %d return types\n" % ( +- len(known_param_types), len(known_return_types))) ++print(("Scanned testapi.c: found %d parameters types and %d return types\n" % ( ++ len(known_param_types), len(known_return_types)))) + test.write("/* CUT HERE: everything below that line is generated */\n") + + +@@ -559,7 +559,7 @@ + break; + vals.append(vname) + if vals == []: +- print("Didn't find any value for enum %s" % (name)) ++ print(("Didn't find any value for enum %s" % (name))) + continue + if module in modules_defines: + test.write("#ifdef %s\n" % (modules_defines[module])) +@@ -613,7 +613,7 @@ + # + desc = file.xpathEval('string(description)') + if desc.find('DEPRECATED') != -1: +- print("Skipping deprecated interface %s" % name) ++ print(("Skipping deprecated interface %s" % name)) + continue; + + test.write("#include \n" % name) +@@ -899,7 +899,7 @@ + try: + functions = ctxt.xpathEval("/api/symbols/function[@file='%s']" % (module)) + except: +- print("Failed to gather functions from module %s" % (module)) ++ print(("Failed to gather functions from module %s" % (module))) + continue; + + # iterate over all functions in the module generating the test +@@ -945,12 +945,12 @@ + } + """); + +-print("Generated test for %d modules and %d functions" %(len(modules), nb_tests)) ++print(("Generated test for %d modules and %d functions" %(len(modules), nb_tests))) + + compare_and_save() + + missing_list = [] +-for missing in missing_types.keys(): ++for missing in list(missing_types.keys()): + if missing == 'va_list' or missing == '...': + continue; + +@@ -958,7 +958,7 @@ + missing_list.append((n, missing)) + + missing_list.sort(key=lambda a: a[0]) +-print("Missing support for %d functions and %d types see missing.lst" % (missing_functions_nr, len(missing_list))) ++print(("Missing support for %d functions and %d types see missing.lst" % (missing_functions_nr, len(missing_list)))) + lst = open("missing.lst", "w") + lst.write("Missing support for %d types" % (len(missing_list))) + lst.write("\n") +@@ -975,7 +975,7 @@ + lst.write("\n") + lst.write("\n") + lst.write("Missing support per module"); +-for module in missing_functions.keys(): ++for module in list(missing_functions.keys()): + lst.write("module %s:\n %s\n" % (module, missing_functions[module])) + + lst.close() +--- a/src/3rdparty/chromium/third_party/libxml/src/regressions.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libxml/src/regressions.py 2025-01-16 02:26:08.594596154 +0800 +@@ -1,5 +1,5 @@ + #!/usr/bin/python -u +-import glob, os, string, sys, thread, time ++import glob, os, string, sys, _thread, time + # import difflib + import libxml2 + +@@ -74,19 +74,19 @@ + rl = len(res) + el = len(exp) + if el != rl: +- print 'Length of expected is %d, result is %d' % (el, rl) ++ print('Length of expected is %d, result is %d' % (el, rl)) + ret = -1 + for i in range(min(el, rl)): + if string.strip(res[i]) != string.strip(exp[i]): +- print '+:%s-:%s' % (res[i], exp[i]) ++ print('+:%s-:%s' % (res[i], exp[i])) + ret = -1 + if el > rl: + for i in range(rl, el): +- print '-:%s' % exp[i] ++ print('-:%s' % exp[i]) + ret = -1 + elif rl > el: + for i in range (el, rl): +- print '+:%s' % res[i] ++ print('+:%s' % res[i]) + ret = -1 + return ret + +@@ -127,7 +127,7 @@ + fname = errbase + filename + ext + expout = open(fname, 'rt') + except: +- print "Can't open result file %s - bypassing test" % fname ++ print("Can't open result file %s - bypassing test" % fname) + return + + noErrors = 0 +@@ -169,29 +169,29 @@ + th2Flag = [] + outfile = [] # lists to contain the pipe data + errfile = [] +- th1 = thread.start_new_thread(readPfile, (pout, outfile, th1Flag)) +- th2 = thread.start_new_thread(readPfile, (perr, errfile, th2Flag)) ++ th1 = _thread.start_new_thread(readPfile, (pout, outfile, th1Flag)) ++ th2 = _thread.start_new_thread(readPfile, (perr, errfile, th2Flag)) + while (len(th1Flag)==0) or (len(th2Flag)==0): + time.sleep(0.001) + if not noResult: + ret = compFiles(outfile, expout, inbase, 'test/') + if ret != 0: +- print 'trouble with %s' % cmd ++ print('trouble with %s' % cmd) + else: + if len(outfile) != 0: + for l in outfile: +- print l +- print 'trouble with %s' % cmd ++ print(l) ++ print('trouble with %s' % cmd) + if experr != None: + ret = compFiles(errfile, experr, inbase, 'test/') + if ret != 0: +- print 'trouble with %s' % cmd ++ print('trouble with %s' % cmd) + else: + if not noErrors: + if len(errfile) != 0: + for l in errfile: +- print l +- print 'trouble with %s' % cmd ++ print(l) ++ print('trouble with %s' % cmd) + + if 'stdin' not in testDescription: + pin.close() +@@ -203,9 +203,9 @@ + testDescription = defaultParams.copy() # set defaults + testDescription.update(description) # override with current ent + if 'testname' in testDescription: +- print "## %s" % testDescription['testname'] ++ print("## %s" % testDescription['testname']) + if not 'file' in testDescription: +- print "No file specified - can't run this test!" ++ print("No file specified - can't run this test!") + return + # Set up the source and results directory paths from the decoded params + dir = '' +@@ -222,7 +222,7 @@ + + testFiles = glob.glob(os.path.abspath(dir + testDescription['file'])) + if testFiles == []: +- print "No files result from '%s'" % testDescription['file'] ++ print("No files result from '%s'" % testDescription['file']) + return + + # Some test programs just don't work (yet). For now we exclude them. +@@ -271,9 +271,9 @@ + self.curText += reader.Value() + + elif reader.NodeType() == 15: # end of element +- print "Defaults have been set to:" +- for k in defaultParams.keys(): +- print " %s : '%s'" % (k, defaultParams[k]) ++ print("Defaults have been set to:") ++ for k in list(defaultParams.keys()): ++ print(" %s : '%s'" % (k, defaultParams[k])) + curClass = rootClass() + return curClass + +@@ -316,8 +316,8 @@ + if reader.Depth() == 0: + return curClass + if reader.Depth() != 1: +- print "Unexpected junk: Level %d, type %d, name %s" % ( +- reader.Depth(), reader.NodeType(), reader.Name()) ++ print("Unexpected junk: Level %d, type %d, name %s" % ( ++ reader.Depth(), reader.NodeType(), reader.Name())) + return curClass + if reader.Name() == 'test': + curClass = testClass() +@@ -330,7 +330,7 @@ + try: + reader = libxml2.newTextReaderFilename(filename) + except: +- print "unable to open %s" % (filename) ++ print("unable to open %s" % (filename)) + return + + curClass = rootClass() +@@ -340,11 +340,11 @@ + ret = reader.Read() + + if ret != 0: +- print "%s : failed to parse" % (filename) ++ print("%s : failed to parse" % (filename)) + + # OK, we're finished with all the routines. Now for the main program:- + if len(sys.argv) != 2: +- print "Usage: maketest {filename}" ++ print("Usage: maketest {filename}") + sys.exit(-1) + + streamFile(sys.argv[1]) +--- a/src/3rdparty/chromium/third_party/libxslt/chromium/roll.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libxslt/chromium/roll.py 2025-01-16 02:26:08.594596154 +0800 +@@ -170,7 +170,7 @@ + + def __exit__(self, exc_type, exc_value, traceback): + if exc_value: +- print('was in %s; %s before that' % (self.path, self.prev_path)) ++ print(('was in %s; %s before that' % (self.path, self.prev_path))) + os.chdir(self.prev_path) + + +@@ -292,7 +292,7 @@ + with WorkingDir(src_path): + try: + temp_dir = tempfile.mkdtemp() +- print('temporary directory is: %s' % temp_dir) ++ print(('temporary directory is: %s' % temp_dir)) + commit, tar_file = prepare_libxslt_distribution( + src_path, repo_path, temp_dir) + +--- a/src/3rdparty/chromium/third_party/libyuv/cleanup_links.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libyuv/cleanup_links.py 2025-01-16 02:26:08.594596154 +0800 +@@ -39,7 +39,7 @@ + + def CleanupLinks(self): + logging.debug('CleanupLinks') +- for source, link_path in self._links_db.iteritems(): ++ for source, link_path in self._links_db.items(): + if source == 'SCHEMA_VERSION': + continue + if os.path.islink(link_path) or sys.platform.startswith('win'): +--- a/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/get_landmines.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/get_landmines.py 2025-01-16 02:26:08.594596154 +0800 +@@ -25,8 +25,8 @@ + # dependency problems, fix the dependency problems instead of adding a + # landmine. + # See the Chromium version in src/build/get_landmines.py for usage examples. +- print 'Clobber to remove GYP artifacts after switching bots to GN.' +- print 'Another try to remove GYP artifacts after switching bots to GN.' ++ print('Clobber to remove GYP artifacts after switching bots to GN.') ++ print('Another try to remove GYP artifacts after switching bots to GN.') + + + def main(): +--- a/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/autoroller/roll_deps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/autoroller/roll_deps.py 2025-01-16 02:26:08.594596154 +0800 +@@ -22,7 +22,7 @@ + import re + import subprocess + import sys +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + + # Skip these dependencies (list without solution name prefix). +@@ -110,7 +110,7 @@ + logging.debug('CMD: %s CWD: %s', ' '.join(command), working_dir) + env = os.environ.copy() + if extra_env: +- assert all(isinstance(value, str) for value in extra_env.values()) ++ assert all(isinstance(value, str) for value in list(extra_env.values())) + logging.debug('extra env: %s', extra_env) + env.update(extra_env) + p = subprocess.Popen(command, stdout=subprocess.PIPE, +@@ -170,7 +170,7 @@ + + def ReadUrlContent(url): + """Connect to a remote host and read the contents. Returns a list of lines.""" +- conn = urllib2.urlopen(url) ++ conn = urllib.request.urlopen(url) + try: + return conn.readlines() + except IOError as e: +@@ -193,7 +193,7 @@ + A list of DepsEntry objects. + """ + result = [] +- for path, depsentry in depsentry_dict.iteritems(): ++ for path, depsentry in depsentry_dict.items(): + if path == dir_path: + result.append(depsentry) + else: +@@ -208,7 +208,7 @@ + """Builds a dict of paths to DepsEntry objects from a raw parsed deps dict.""" + result = {} + def AddDepsEntries(deps_subdict): +- for path, deps_url_spec in deps_subdict.iteritems(): ++ for path, deps_url_spec in deps_subdict.items(): + # The deps url is either an URL and a condition, or just the URL. + if isinstance(deps_url_spec, dict): + if deps_url_spec.get('dep_type') == 'cipd': +@@ -217,7 +217,7 @@ + else: + deps_url = deps_url_spec + +- if not result.has_key(path): ++ if path not in result: + url, revision = deps_url.split('@') if deps_url else (None, None) + result[path] = DepsEntry(path, url, revision) + +@@ -245,7 +245,7 @@ + result = [] + libyuv_entries = BuildDepsentryDict(libyuv_deps) + new_cr_entries = BuildDepsentryDict(new_cr_deps) +- for path, libyuv_deps_entry in libyuv_entries.iteritems(): ++ for path, libyuv_deps_entry in libyuv_entries.items(): + if path in DONT_AUTOROLL_THESE: + continue + cr_deps_entry = new_cr_entries.get(path) +--- a/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/autoroller/unittests/roll_deps_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/autoroller/unittests/roll_deps_test.py 2025-01-16 02:26:08.594596154 +0800 +@@ -85,7 +85,7 @@ + def testVarLookup(self): + local_scope = {'foo': 'wrong', 'vars': {'foo': 'bar'}} + lookup = roll_deps.VarLookup(local_scope) +- self.assertEquals(lookup('foo'), 'bar') ++ self.assertEqual(lookup('foo'), 'bar') + + def testUpdateDepsFile(self): + new_rev = 'aaaaabbbbbcccccdddddeeeeefffff0000011111' +@@ -104,24 +104,24 @@ + vars_dict = local_scope['vars'] + + def assertVar(variable_name): +- self.assertEquals(vars_dict[variable_name], TEST_DATA_VARS[variable_name]) ++ self.assertEqual(vars_dict[variable_name], TEST_DATA_VARS[variable_name]) + assertVar('chromium_git') + assertVar('chromium_revision') +- self.assertEquals(len(local_scope['deps']), 3) ++ self.assertEqual(len(local_scope['deps']), 3) + + def testGetMatchingDepsEntriesReturnsPathInSimpleCase(self): + entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/testing/gtest') +- self.assertEquals(len(entries), 1) +- self.assertEquals(entries[0], DEPS_ENTRIES['src/testing/gtest']) ++ self.assertEqual(len(entries), 1) ++ self.assertEqual(entries[0], DEPS_ENTRIES['src/testing/gtest']) + + def testGetMatchingDepsEntriesHandlesSimilarStartingPaths(self): + entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/testing') +- self.assertEquals(len(entries), 2) ++ self.assertEqual(len(entries), 2) + + def testGetMatchingDepsEntriesHandlesTwoPathsWithIdenticalFirstParts(self): + entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/build') +- self.assertEquals(len(entries), 1) +- self.assertEquals(entries[0], DEPS_ENTRIES['src/build']) ++ self.assertEqual(len(entries), 1) ++ self.assertEqual(entries[0], DEPS_ENTRIES['src/build']) + + def testCalculateChangedDeps(self): + _SetupGitLsRemoteCall(self.fake, +@@ -129,14 +129,14 @@ + libyuv_deps = ParseLocalDepsFile(self._libyuv_depsfile) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile) + changed_deps = CalculateChangedDeps(libyuv_deps, new_cr_deps) +- self.assertEquals(len(changed_deps), 2) +- self.assertEquals(changed_deps[0].path, 'src/build') +- self.assertEquals(changed_deps[0].current_rev, BUILD_OLD_REV) +- self.assertEquals(changed_deps[0].new_rev, BUILD_NEW_REV) +- +- self.assertEquals(changed_deps[1].path, 'src/buildtools') +- self.assertEquals(changed_deps[1].current_rev, BUILDTOOLS_OLD_REV) +- self.assertEquals(changed_deps[1].new_rev, BUILDTOOLS_NEW_REV) ++ self.assertEqual(len(changed_deps), 2) ++ self.assertEqual(changed_deps[0].path, 'src/build') ++ self.assertEqual(changed_deps[0].current_rev, BUILD_OLD_REV) ++ self.assertEqual(changed_deps[0].new_rev, BUILD_NEW_REV) ++ ++ self.assertEqual(changed_deps[1].path, 'src/buildtools') ++ self.assertEqual(changed_deps[1].current_rev, BUILDTOOLS_OLD_REV) ++ self.assertEqual(changed_deps[1].new_rev, BUILDTOOLS_NEW_REV) + + + def _SetupGitLsRemoteCall(cmd_fake, url, revision): +--- a/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/valgrind/chrome_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/valgrind/chrome_tests.py 2025-01-16 02:26:08.594596154 +0800 +@@ -248,22 +248,22 @@ + @staticmethod + def ShowTests(): + test_to_names = {} +- for name, test_function in ChromeTests._test_list.iteritems(): ++ for name, test_function in ChromeTests._test_list.items(): + test_to_names.setdefault(test_function, []).append(name) + + name_to_aliases = {} +- for names in test_to_names.itervalues(): ++ for names in test_to_names.values(): + names.sort(key=lambda name: len(name)) + name_to_aliases[names[0]] = names[1:] + +- print +- print "Available tests:" +- print "----------------" +- for name, aliases in sorted(name_to_aliases.iteritems()): ++ print() ++ print("Available tests:") ++ print("----------------") ++ for name, aliases in sorted(name_to_aliases.items()): + if aliases: +- print " {} (aka {})".format(name, ', '.join(aliases)) ++ print(" {} (aka {})".format(name, ', '.join(aliases))) + else: +- print " {}".format(name) ++ print(" {}".format(name)) + + def SetupLdPath(self, requires_build_dir): + if requires_build_dir: +@@ -629,7 +629,8 @@ + if chunk_num > 10000: + chunk_num = 0 + f.close() +- except IOError, (errno, strerror): ++ except IOError as xxx_todo_changeme: ++ (errno, strerror) = xxx_todo_changeme.args + logging.error("error reading from file %s (%d, %s)" % (chunk_file, + errno, strerror)) + # Save the new chunk size before running the tests. Otherwise if a +@@ -641,7 +642,8 @@ + chunk_num += 1 + f.write("%d" % chunk_num) + f.close() +- except IOError, (errno, strerror): ++ except IOError as xxx_todo_changeme1: ++ (errno, strerror) = xxx_todo_changeme1.args + logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno, + strerror)) + # Since we're running small chunks of the layout tests, it's important to +--- a/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/valgrind/common.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/valgrind/common.py 2025-01-16 02:26:08.594596154 +0800 +@@ -246,11 +246,11 @@ + if not suppcounts: + return False + +- print "-----------------------------------------------------" +- print "Suppressions used:" +- print " count name" +- for (name, count) in sorted(suppcounts.items(), key=lambda (k,v): (v,k)): +- print "%7d %s" % (count, name) +- print "-----------------------------------------------------" ++ print("-----------------------------------------------------") ++ print("Suppressions used:") ++ print(" count name") ++ for (name, count) in sorted(list(suppcounts.items()), key=lambda k_v: (k_v[1],k_v[0])): ++ print("%7d %s" % (count, name)) ++ print("-----------------------------------------------------") + sys.stdout.flush() + return True +--- a/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/valgrind/gdb_helper.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/valgrind/gdb_helper.py 2025-01-16 02:26:08.594596154 +0800 +@@ -72,7 +72,7 @@ + def ResolveAll(self): + ''' Carry out all lookup requests. ''' + self._translation = {} +- for binary in self._binaries.keys(): ++ for binary in list(self._binaries.keys()): + if binary != '' and binary in self._load_addresses: + load_address = self._load_addresses[binary] + addr = ResolveAddressesWithinABinary( +--- a/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/valgrind/memcheck_analyze.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/valgrind/memcheck_analyze.py 2025-01-16 02:26:08.595679469 +0800 +@@ -517,7 +517,7 @@ + file) + try: + parsed_file = parse(file); +- except ExpatError, e: ++ except ExpatError as e: + parse_failed = True + logging.warn("could not parse %s: %s" % (file, e)) + lineno = e.lineno - 1 +@@ -605,7 +605,7 @@ + # Report tool's insanity even if there were errors. + if check_sanity: + remaining_sanity_supp = MemcheckAnalyzer.SANITY_TEST_SUPPRESSIONS +- for (name, count) in suppcounts.iteritems(): ++ for (name, count) in suppcounts.items(): + # Workaround for http://crbug.com/334074 + if (name in remaining_sanity_supp and + remaining_sanity_supp[name] <= count): +@@ -613,7 +613,7 @@ + if remaining_sanity_supp: + logging.error("FAIL! Sanity check failed!") + logging.info("The following test errors were not handled: ") +- for (name, count) in remaining_sanity_supp.iteritems(): ++ for (name, count) in remaining_sanity_supp.items(): + logging.info(" * %dx %s" % (count, name)) + retcode = -3 + +--- a/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/valgrind/valgrind_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/valgrind/valgrind_test.py 2025-01-16 02:26:08.595679469 +0800 +@@ -64,12 +64,12 @@ + } + + def ToolName(self): +- raise NotImplementedError, "This method should be implemented " \ +- "in the tool-specific subclass" ++ raise NotImplementedError("This method should be implemented " \ ++ "in the tool-specific subclass") + + def Analyze(self, check_sanity=False): +- raise NotImplementedError, "This method should be implemented " \ +- "in the tool-specific subclass" ++ raise NotImplementedError("This method should be implemented " \ ++ "in the tool-specific subclass") + + def RegisterOptionParserHook(self, hook): + # Frameworks and tools can add their own flags to the parser. +@@ -164,8 +164,8 @@ + return self.ParseArgv(args) + + def ToolCommand(self): +- raise NotImplementedError, "This method should be implemented " \ +- "in the tool-specific subclass" ++ raise NotImplementedError("This method should be implemented " \ ++ "in the tool-specific subclass") + + def Cleanup(self): + # You may override it in the tool-specific subclass +@@ -337,8 +337,8 @@ + return proc + + def ToolSpecificFlags(self): +- raise NotImplementedError, "This method should be implemented " \ +- "in the tool-specific subclass" ++ raise NotImplementedError("This method should be implemented " \ ++ "in the tool-specific subclass") + + def CreateBrowserWrapper(self, proc, webkit=False): + """The program being run invokes Python or something else that can't stand +@@ -383,8 +383,8 @@ + return indirect_fname + + def CreateAnalyzer(self): +- raise NotImplementedError, "This method should be implemented " \ +- "in the tool-specific subclass" ++ raise NotImplementedError("This method should be implemented " \ ++ "in the tool-specific subclass") + + def GetAnalyzeResults(self, check_sanity=False): + # Glob all the files in the log directory +@@ -414,13 +414,13 @@ + testcase_name = testcase_name[wk_prefix_at + len(wk_layout_prefix):] + except IOError: + pass +- print "=====================================================" +- print " Below is the report for valgrind wrapper PID=%d." % ppid ++ print("=====================================================") ++ print(" Below is the report for valgrind wrapper PID=%d." % ppid) + if testcase_name: +- print " It was used while running the `%s` test." % testcase_name ++ print(" It was used while running the `%s` test." % testcase_name) + else: +- print " You can find the corresponding test" +- print " by searching the above log for 'PID=%d'" % ppid ++ print(" You can find the corresponding test") ++ print(" by searching the above log for 'PID=%d'" % ppid) + sys.stdout.flush() + + ppid_filenames = [f for f in filenames \ +@@ -428,15 +428,15 @@ + # check_sanity won't work with browser wrappers + assert check_sanity == False + ret |= analyzer.Report(ppid_filenames, testcase_name) +- print "=====================================================" ++ print("=====================================================") + sys.stdout.flush() + + if ret != 0: +- print "" +- print "The Valgrind reports are grouped by test names." +- print "Each test has its PID printed in the log when the test was run" +- print "and at the beginning of its Valgrind report." +- print "Hint: you can search for the reports by Ctrl+F -> `=#`" ++ print("") ++ print("The Valgrind reports are grouped by test names.") ++ print("Each test has its PID printed in the log when the test was run") ++ print("and at the beginning of its Valgrind report.") ++ print("Hint: you can search for the reports by Ctrl+F -> `=#`") + sys.stdout.flush() + + return ret +@@ -510,8 +510,8 @@ + platform_name = common.PlatformNames()[0] + except common.NotImplementedError: + platform_name = sys.platform + "(Unknown)" +- raise RuntimeError, "Unknown tool (tool=%s, platform=%s)" % (tool_name, +- platform_name) ++ raise RuntimeError("Unknown tool (tool=%s, platform=%s)" % (tool_name, ++ platform_name)) + + def CreateTool(tool): + return ToolFactory().Create(tool) +--- a/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/valgrind/memcheck/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/libyuv/tools_libyuv/valgrind/memcheck/PRESUBMIT.py 2025-01-16 02:26:08.595679469 +0800 +@@ -36,10 +36,9 @@ + # - 'skip_suppression_name': the next line is a suppression name, skip. + # - 'skip_param': the next line is a system call parameter error, skip. + skip_next_line = False +- for f in filter(lambda x: sup_regex.search(x.LocalPath()), +- input_api.AffectedFiles()): ++ for f in [x for x in input_api.AffectedFiles() if sup_regex.search(x.LocalPath())]: + for line, line_num in zip(f.NewContents(), +- xrange(1, len(f.NewContents()) + 1)): ++ range(1, len(f.NewContents()) + 1)): + line = line.lstrip() + if line.startswith('#') or not line: + continue +@@ -49,7 +48,7 @@ + if 'insert_a_suppression_name_here' in line: + errors.append('"insert_a_suppression_name_here" is not a valid ' + 'suppression name') +- if suppressions.has_key(line): ++ if line in suppressions: + if f.LocalPath() == suppressions[line][1]: + errors.append('suppression with name "%s" at %s line %s ' + 'has already been defined at line %s' % +--- a/src/3rdparty/chromium/third_party/mako/doc/build/conf.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/mako/doc/build/conf.py 2025-01-16 02:26:08.595679469 +0800 +@@ -60,8 +60,8 @@ + master_doc = 'index' + + # General information about the project. +-project = u'Mako' +-copyright = u'the Mako authors and contributors' ++project = 'Mako' ++copyright = 'the Mako authors and contributors' + + # The version info for the project you're documenting, acts as replacement for + # |version| and |release|, also used in various other places throughout the +@@ -209,8 +209,8 @@ + # Grouping the document tree into LaTeX files. List of tuples + # (source start file, target name, title, author, documentclass [howto/manual]). + latex_documents = [ +- ('index', 'mako_%s.tex' % release.replace('.', '_'), u'Mako Documentation', +- u'Mike Bayer', 'manual'), ++ ('index', 'mako_%s.tex' % release.replace('.', '_'), 'Mako Documentation', ++ 'Mike Bayer', 'manual'), + ] + + # The name of an image file (relative to this directory) to place at the top of +@@ -247,18 +247,18 @@ + # One entry per manual page. List of tuples + # (source start file, name, description, authors, manual section). + man_pages = [ +- ('index', 'mako', u'Mako Documentation', +- [u'Mako authors'], 1) ++ ('index', 'mako', 'Mako Documentation', ++ ['Mako authors'], 1) + ] + + + # -- Options for Epub output --------------------------------------------------- + + # Bibliographic Dublin Core info. +-epub_title = u'Mako' +-epub_author = u'Mako authors' +-epub_publisher = u'Mako authors' +-epub_copyright = u'Mako authors' ++epub_title = 'Mako' ++epub_author = 'Mako authors' ++epub_publisher = 'Mako authors' ++epub_copyright = 'Mako authors' + + # The language of the text. It defaults to the language option + # or en if the language is not set. +--- a/src/3rdparty/chromium/third_party/mako/examples/bench/basic.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/mako/examples/bench/basic.py 2025-01-16 02:26:08.595679469 +0800 +@@ -31,7 +31,7 @@ + from cgi import escape + import os + try: +- from StringIO import StringIO ++ from io import StringIO + except ImportError: + from io import StringIO + import sys +@@ -61,7 +61,7 @@ + return template.generate(**data).render('xhtml') + + if verbose: +- print(render()) ++ print((render())) + return render + + def myghty(dirname, verbose=False): +@@ -73,7 +73,7 @@ + interpreter.execute("template.myt", request_args=data, out_buffer=buffer) + return buffer.getvalue() + if verbose: +- print(render()) ++ print((render())) + return render + + def mako(dirname, verbose=False): +@@ -85,7 +85,7 @@ + def render(): + return template.render(title=TITLE, user=USER, list_items=U_ITEMS) + if verbose: +- print(template.code + " " + render()) ++ print((template.code + " " + render())) + return render + mako_inheritance = mako + +@@ -96,7 +96,7 @@ + def render(): + return template.render(title=TITLE, user=USER, list_items=U_ITEMS) + if verbose: +- print(render()) ++ print((render())) + return render + jinja2_inheritance = jinja2 + +@@ -110,9 +110,9 @@ + return template.respond() + + if verbose: +- print(dir(template)) +- print(template.generatedModuleCode()) +- print(render()) ++ print((dir(template))) ++ print((template.generatedModuleCode())) ++ print((render())) + return render + + def django(dirname, verbose=False): +@@ -128,7 +128,7 @@ + return tmpl.render(template.Context(data)) + + if verbose: +- print(render()) ++ print((render())) + return render + + def kid(dirname, verbose=False): +@@ -141,7 +141,7 @@ + return template.serialize(output='xhtml') + + if verbose: +- print(render()) ++ print((render())) + return render + + +@@ -150,7 +150,7 @@ + for engine in engines: + dirname = os.path.join(basepath, engine) + if verbose: +- print('%s:' % engine.capitalize()) ++ print(('%s:' % engine.capitalize())) + print('--------------------------------------------------------') + else: + sys.stdout.write('%s:' % engine.capitalize()) +@@ -161,7 +161,7 @@ + time = t.timeit(number=number) / number + if verbose: + print('--------------------------------------------------------') +- print('%.2f ms' % (1000 * time)) ++ print(('%.2f ms' % (1000 * time))) + if verbose: + print('--------------------------------------------------------') + +--- a/src/3rdparty/chromium/third_party/mako/examples/wsgi/run_wsgi.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/mako/examples/wsgi/run_wsgi.py 2025-01-16 02:26:08.595679469 +0800 +@@ -1,9 +1,9 @@ + #!/usr/bin/python + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function +-from __future__ import unicode_literals ++ ++ ++ ++ + + import cgi, re, os, posixpath, mimetypes + from mako.lookup import TemplateLookup +--- a/src/3rdparty/chromium/third_party/mako/mako/codegen.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/mako/mako/codegen.py 2025-01-16 02:26:08.595679469 +0800 +@@ -278,7 +278,7 @@ + self.compiler.identifiers = module_identifiers + self.printer.writeline( + "_exports = %r" +- % [n.name for n in main_identifiers.topleveldefs.values()] ++ % [n.name for n in list(main_identifiers.topleveldefs.values())] + ) + self.printer.write_blanks(2) + +@@ -381,7 +381,7 @@ + ) + self.printer.writeline("def _mako_generate_namespaces(context):") + +- for node in namespaces.values(): ++ for node in list(namespaces.values()): + if "import" in node.attributes: + self.compiler.has_ns_imports = True + self.printer.start_source(node.lineno) +@@ -490,7 +490,7 @@ + # write closure functions for closures that we define + # right here + to_write = to_write.union( +- [c.funcname for c in identifiers.closuredefs.values()] ++ [c.funcname for c in list(identifiers.closuredefs.values())] + ) + + # remove identifiers that are declared in the argument +@@ -518,7 +518,7 @@ + if toplevel and getattr(self.compiler, "has_ns_imports", False): + self.printer.writeline("_import_ns = {}") + self.compiler.has_imports = True +- for ident, ns in self.compiler.namespaces.items(): ++ for ident, ns in list(self.compiler.namespaces.items()): + if "import" in ns.attributes: + self.printer.writeline( + "_mako_get_namespace(context, %r)." +@@ -757,7 +757,7 @@ + name, + ",".join(pass_args), + "".join( +- ["%s=%s, " % (k, v) for k, v in cache_args.items()] ++ ["%s=%s, " % (k, v) for k, v in list(cache_args.items())] + ), + name, + ) +@@ -777,7 +777,7 @@ + name, + ",".join(pass_args), + "".join( +- ["%s=%s, " % (k, v) for k, v in cache_args.items()] ++ ["%s=%s, " % (k, v) for k, v in list(cache_args.items())] + ), + name, + ), +@@ -1040,7 +1040,7 @@ + # in an enclosing namespace (i.e. names we can just use) + self.declared = ( + set(parent.declared) +- .union([c.name for c in parent.closuredefs.values()]) ++ .union([c.name for c in list(parent.closuredefs.values())]) + .union(parent.locally_declared) + .union(parent.argument_declared) + ) +@@ -1114,8 +1114,8 @@ + list(self.declared), + list(self.locally_declared), + list(self.undeclared), +- [c.name for c in self.topleveldefs.values()], +- [c.name for c in self.closuredefs.values()], ++ [c.name for c in list(self.topleveldefs.values())], ++ [c.name for c in list(self.closuredefs.values())], + self.argument_declared, + ) + ) +--- a/src/3rdparty/chromium/third_party/mako/mako/compat.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/mako/mako/compat.py 2025-01-16 02:26:08.595679469 +0800 +@@ -61,24 +61,23 @@ + + + else: +- import __builtin__ as compat_builtins # noqa ++ import builtins as compat_builtins # noqa + + try: +- from cStringIO import StringIO ++ from io import StringIO + except: +- from StringIO import StringIO ++ from io import StringIO + + byte_buffer = StringIO ++ from urllib.parse import quote_plus, unquote_plus # noqa ++ from html.entities import codepoint2name, name2codepoint # noqa + +- from urllib import quote_plus, unquote_plus # noqa +- from htmlentitydefs import codepoint2name, name2codepoint # noqa +- +- string_types = (basestring,) # noqa ++ string_types = (str,) # noqa + binary_type = str +- text_type = unicode # noqa ++ text_type = str # noqa + + def u(s): +- return unicode(s, "utf-8") # noqa ++ return str(s, "utf-8") # noqa + + def b(s): + return s +@@ -132,14 +131,14 @@ + if py3k: + import _thread as thread + else: +- import thread ++ import _thread + except ImportError: + import dummy_threading as threading # noqa + + if py3k: + import _dummy_thread as thread + else: +- import dummy_thread as thread # noqa ++ import _dummy_thread as thread # noqa + + if win32 or jython: + time_func = time.clock +@@ -173,7 +172,7 @@ + if py3k: + co = fn.__code__ + else: +- co = fn.func_code ++ co = fn.__code__ + + nargs = co.co_argcount + names = co.co_varnames +@@ -190,7 +189,7 @@ + if py3k: + return args, varargs, varkw, fn.__defaults__ + else: +- return args, varargs, varkw, fn.func_defaults ++ return args, varargs, varkw, fn.__defaults__ + + + except ImportError: +--- a/src/3rdparty/chromium/third_party/mako/mako/filters.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/mako/mako/filters.py 2025-01-16 02:26:08.595679469 +0800 +@@ -101,7 +101,7 @@ + self.codepoint2entity = dict( + [ + (c, compat.text_type("&%s;" % n)) +- for c, n in codepoint2name.items() ++ for c, n in list(codepoint2name.items()) + ] + ) + self.name2codepoint = name2codepoint +@@ -183,7 +183,7 @@ + characters with HTML entities, or, if no HTML entity exists for + the character, XML character references:: + +- >>> u'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace') ++ >>> u'The cost was \\u20ac12.'.encode('latin1', 'htmlentityreplace') + 'The cost was €12.' + """ + if isinstance(ex, UnicodeEncodeError): +--- a/src/3rdparty/chromium/third_party/mako/mako/parsetree.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/mako/mako/parsetree.py 2025-01-16 02:26:08.595679469 +0800 +@@ -446,7 +446,7 @@ + + def undeclared_identifiers(self): + return self.filter_args.undeclared_identifiers.difference( +- filters.DEFAULT_ESCAPES.keys() ++ list(filters.DEFAULT_ESCAPES.keys()) + ).union(self.expression_undeclared_identifiers) + + +@@ -505,7 +505,7 @@ + set(res) + .union( + self.filter_args.undeclared_identifiers.difference( +- filters.DEFAULT_ESCAPES.keys() ++ list(filters.DEFAULT_ESCAPES.keys()) + ) + ) + .union(self.expression_undeclared_identifiers) +@@ -568,7 +568,7 @@ + def undeclared_identifiers(self): + return ( + self.filter_args.undeclared_identifiers.difference( +- filters.DEFAULT_ESCAPES.keys() ++ list(filters.DEFAULT_ESCAPES.keys()) + ) + ).union(self.expression_undeclared_identifiers) + +@@ -612,7 +612,7 @@ + ",".join( + [ + "%s=%s" % (k, v) +- for k, v in self.parsed_attributes.items() ++ for k, v in list(self.parsed_attributes.items()) + if k != "args" + ] + ), +--- a/src/3rdparty/chromium/third_party/mako/mako/runtime.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/mako/mako/runtime.py 2025-01-16 02:26:08.595679469 +0800 +@@ -191,7 +191,7 @@ + def __init__(self): + self.nextcaller = None + +- def __nonzero__(self): ++ def __bool__(self): + return self.__bool__() + + def __bool__(self): +@@ -228,7 +228,7 @@ + def __str__(self): + raise NameError("Undefined") + +- def __nonzero__(self): ++ def __bool__(self): + return self.__bool__() + + def __bool__(self): +--- a/src/3rdparty/chromium/third_party/mako/mako/template.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/mako/mako/template.py 2025-01-16 02:26:08.595679469 +0800 +@@ -661,7 +661,7 @@ + ).group(1) + source_map = compat.json.loads(source_map) + source_map["line_map"] = dict( +- (int(k), int(v)) for k, v in source_map["line_map"].items() ++ (int(k), int(v)) for k, v in list(source_map["line_map"].items()) + ) + if full_line_map: + f_line_map = source_map["full_line_map"] = [] +@@ -772,7 +772,7 @@ + if compat.py3k: + return _get_module_info(callable_.__globals__["__name__"]) + else: +- return _get_module_info(callable_.func_globals["__name__"]) ++ return _get_module_info(callable_.__globals__["__name__"]) + + + def _get_module_info(filename): +--- a/src/3rdparty/chromium/third_party/mako/mako/ext/babelplugin.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/mako/mako/ext/babelplugin.py 2025-01-16 02:26:08.595679469 +0800 +@@ -15,7 +15,7 @@ + self.keywords = keywords + self.options = options + self.config = { +- "comment-tags": u" ".join(comment_tags), ++ "comment-tags": " ".join(comment_tags), + "encoding": options.get( + "input_encoding", options.get("encoding", None) + ), +--- a/src/3rdparty/chromium/third_party/mako/mako/ext/extract.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/mako/mako/ext/extract.py 2025-01-16 02:26:08.595679469 +0800 +@@ -24,7 +24,7 @@ + in_translator_comments = False + input_encoding = self.config["encoding"] or "ascii" + comment_tags = list( +- filter(None, re.split(r"\s+", self.config["comment-tags"])) ++ [_f for _f in re.split(r"\s+", self.config["comment-tags"]) if _f] + ) + + for node in nodes: +--- a/src/3rdparty/chromium/third_party/mako/mako/ext/turbogears.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/mako/mako/ext/turbogears.py 2025-01-16 02:26:08.595679469 +0800 +@@ -21,7 +21,7 @@ + + # Pull the options out and initialize the lookup + lookup_options = {} +- for k, v in options.items(): ++ for k, v in list(options.items()): + if k.startswith("mako."): + lookup_options[k[5:]] = v + elif k in ["directories", "filesystem_checks", "module_directory"]: +--- a/src/3rdparty/chromium/third_party/markupsafe/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/markupsafe/__init__.py 2025-01-16 02:26:08.595679469 +0800 +@@ -10,7 +10,7 @@ + """ + import re + from markupsafe._compat import text_type, string_types, int_types, \ +- unichr, PY2 ++ chr, PY2 + + + __all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent'] +@@ -65,7 +65,7 @@ + """ + __slots__ = () + +- def __new__(cls, base=u'', encoding=None, errors='strict'): ++ def __new__(cls, base='', encoding=None, errors='strict'): + if hasattr(base, '__html__'): + base = base.__html__() + if encoding is None: +@@ -105,7 +105,7 @@ + ) + + def join(self, seq): +- return self.__class__(text_type.join(self, map(self.escape, seq))) ++ return self.__class__(text_type.join(self, list(map(self.escape, seq)))) + join.__doc__ = text_type.join.__doc__ + + def split(self, *args, **kwargs): +@@ -131,15 +131,15 @@ + def handle_match(m): + name = m.group(1) + if name in HTML_ENTITIES: +- return unichr(HTML_ENTITIES[name]) ++ return chr(HTML_ENTITIES[name]) + try: + if name[:2] in ('#x', '#X'): +- return unichr(int(name[2:], 16)) ++ return chr(int(name[2:], 16)) + elif name.startswith('#'): +- return unichr(int(name[1:])) ++ return chr(int(name[1:])) + except ValueError: + pass +- return u'' ++ return '' + return _entity_re.sub(handle_match, text_type(self)) + + def striptags(self): +@@ -150,7 +150,7 @@ + >>> Markup("Main » About").striptags() + u'Main \xbb About' + """ +- stripped = u' '.join(_striptags_re.sub('', self).split()) ++ stripped = ' '.join(_striptags_re.sub('', self).split()) + return Markup(stripped).unescape() + + @classmethod +--- a/src/3rdparty/chromium/third_party/markupsafe/_compat.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/markupsafe/_compat.py 2025-01-16 02:26:08.595679469 +0800 +@@ -15,10 +15,10 @@ + if not PY2: + text_type = str + string_types = (str,) +- unichr = chr ++ chr = chr + int_types = (int,) + else: +- text_type = unicode +- string_types = (str, unicode) +- unichr = unichr +- int_types = (int, long) ++ text_type = str ++ string_types = (str, str) ++ chr = chr ++ int_types = (int, int) +--- a/src/3rdparty/chromium/third_party/motemplate/motemplate.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/motemplate/motemplate.py 2025-01-16 02:26:08.596762784 +0800 +@@ -76,7 +76,7 @@ + return len(self._buf[0]) + + def Append(self, string): +- if not isinstance(string, basestring): ++ if not isinstance(string, str): + string = str(string) + self._buf.append(string) + +@@ -85,7 +85,7 @@ + return self._buf[0] + + def _Collapse(self): +- self._buf = [u''.join(self._buf)] ++ self._buf = [''.join(self._buf)] + + def __repr__(self): + return self.ToString() +@@ -108,7 +108,7 @@ + def GetKeys(self): + '''Returns the list of keys that |_value| contains. + ''' +- return self._found.keys() ++ return list(self._found.keys()) + + def Get(self, key): + '''Returns the value for |key|, or None if not found (including if +@@ -134,7 +134,7 @@ + '''Initializes with the initial global contexts, listed in order from most + to least important. + ''' +- self._nodes = map(_Contexts._Node, globals_) ++ self._nodes = list(map(_Contexts._Node, globals_)) + self._first_local = len(self._nodes) + self._value_info = {} + +@@ -496,7 +496,7 @@ + if value is None: + render_state.AddResolutionError(self._id) + return +- string = value if isinstance(value, basestring) else str(value) ++ string = value if isinstance(value, str) else str(value) + render_state.text.Append(string.replace('&', '&') + .replace('<', '<') + .replace('>', '>')) +@@ -516,7 +516,7 @@ + if value is None: + render_state.AddResolutionError(self._id) + return +- string = value if isinstance(value, basestring) else str(value) ++ string = value if isinstance(value, str) else str(value) + render_state.text.Append(string) + + def __repr__(self): +@@ -723,10 +723,10 @@ + if self._args is not None: + def resolve_args(args): + resolved = {} +- for key, value in args.iteritems(): ++ for key, value in args.items(): + if isinstance(value, dict): +- assert len(value.keys()) == 1 +- id_of_partial, partial_args = value.items()[0] ++ assert len(list(value.keys())) == 1 ++ id_of_partial, partial_args = list(value.items())[0] + partial = render_state.contexts.Resolve(id_of_partial.name) + if partial is not None: + resolved[key] = _PartialNodeWithArguments( +--- a/src/3rdparty/chromium/third_party/nasm/find_patches.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/nasm/find_patches.py 2025-01-16 02:26:08.596762784 +0800 +@@ -18,7 +18,7 @@ + find_patches.py origin/merge-m68 > patches.68 + """ + +-from __future__ import print_function ++ + import collections + import os + import re +@@ -207,7 +207,7 @@ + + # For all files that have deleted lines, look for the sha1 that deleted them. + # This is heuristic only; we're looking for "commits that contain some text". +- for filename, deleted_lines in files_to_deleted_lines.items(): ++ for filename, deleted_lines in list(files_to_deleted_lines.items()): + for deleted_line in deleted_lines: + # Make sure that the deleted line is long enough to provide context. + if len(deleted_line) < 4: +@@ -246,7 +246,7 @@ + file=output_file) + print("\n", file=output_file) + wd = os.getcwd() +- for sha1, date in sorted(sha1_to_date.iteritems(), key=lambda (k, v): v): ++ for sha1, date in sorted(iter(sha1_to_date.items()), key=lambda k_v: k_v[1]): + print( + "------------------------------------------------------------------", + file=output_file) +--- a/src/3rdparty/chromium/third_party/nasm/generate_nasm_sources.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/nasm/generate_nasm_sources.py 2025-01-16 02:26:08.596762784 +0800 +@@ -43,24 +43,24 @@ + + def PrintFileList(out, name, files): + if len(files) == 0: +- print >>out, "%s = []" % (name,) ++ print("%s = []" % (name,), file=out) + elif len(files) == 1: +- print >>out, "%s = [ \"%s\" ]" % (name, files[0]) ++ print("%s = [ \"%s\" ]" % (name, files[0]), file=out) + else: +- print >>out, "%s = [" % (name,) ++ print("%s = [" % (name,), file=out) + for f in files: +- print >>out, " \"%s\"," % (f,) +- print >>out, "]" ++ print(" \"%s\"," % (f,), file=out) ++ print("]", file=out) + + def main(): + file_lists = ParseFileLists("Makefile.in") + with open("nasm_sources.gni", "w") as out: +- print >>out, """# Copyright (c) 2018 The Chromium Authors. All rights reserved. ++ print("""# Copyright (c) 2018 The Chromium Authors. All rights reserved. + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + + # This file is created by generate_nasm_sources.py. Do not edit manually. +-""" ++""", file=out) + # Results in duplicated symbols in nasm.c + file_lists['LIBOBJ'].remove('nasmlib/errfile.c') + +--- a/src/3rdparty/chromium/third_party/nasm/travis/nasm-t.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/nasm/travis/nasm-t.py 2025-01-16 02:26:08.596762784 +0800 +@@ -79,7 +79,7 @@ + ref = desc_ids[d['ref']] + own = d.copy() + desc_array[i] = ref.copy() +- for k, v in own.items(): ++ for k, v in list(own.items()): + desc_array[i][k] = v + del desc_array[i]['id'] + return desc_array +@@ -164,36 +164,36 @@ + if args.cmd == 'list': + fmt_entry = '%-32s %s' + desc_array = collect_test_desc_from_dir(args.dir) +- print(fmt_entry % ('Name', 'Description')) ++ print((fmt_entry % ('Name', 'Description'))) + for desc in desc_array: +- print(fmt_entry % (desc['_test-name'], desc['description'])) ++ print((fmt_entry % (desc['_test-name'], desc['description']))) + + def test_abort(test, message): +- print("\t%s: %s" % (test, message)) +- print("=== Test %s ABORT ===" % (test)) ++ print(("\t%s: %s" % (test, message))) ++ print(("=== Test %s ABORT ===" % (test))) + sys.exit(1) + return False + + def test_fail(test, message): +- print("\t%s: %s" % (test, message)) +- print("=== Test %s FAIL ===" % (test)) ++ print(("\t%s: %s" % (test, message))) ++ print(("=== Test %s FAIL ===" % (test))) + return False + + def test_skip(test, message): +- print("\t%s: %s" % (test, message)) +- print("=== Test %s SKIP ===" % (test)) ++ print(("\t%s: %s" % (test, message))) ++ print(("=== Test %s SKIP ===" % (test))) + return True + + def test_over(test): +- print("=== Test %s ERROR OVER ===" % (test)) ++ print(("=== Test %s ERROR OVER ===" % (test))) + return True + + def test_pass(test): +- print("=== Test %s PASS ===" % (test)) ++ print(("=== Test %s PASS ===" % (test))) + return True + + def test_updated(test): +- print("=== Test %s UPDATED ===" % (test)) ++ print(("=== Test %s UPDATED ===" % (test))) + return True + + def run_hexdump(path): +@@ -205,24 +205,24 @@ + return None + + def show_std(stdname, data): +- print("\t--- %s" % (stdname)) ++ print(("\t--- %s" % (stdname))) + for i in data.split("\n"): +- print("\t%s" % i) ++ print(("\t%s" % i)) + print("\t---") + + def cmp_std(from_name, from_data, match_name, match_data): + if from_data != match_data: +- print("\t--- %s" % (from_name)) ++ print(("\t--- %s" % (from_name))) + for i in from_data.split("\n"): +- print("\t%s" % i) +- print("\t--- %s" % (match_name)) ++ print(("\t%s" % i)) ++ print(("\t--- %s" % (match_name))) + for i in match_data.split("\n"): +- print("\t%s" % i) ++ print(("\t%s" % i)) + + diff = difflib.unified_diff(from_data.split("\n"), match_data.split("\n"), + fromfile = from_name, tofile = match_name) + for i in diff: +- print("\t%s" % i.strip("\n")) ++ print(("\t%s" % i.strip("\n"))) + print("\t---") + return False + return True +@@ -234,19 +234,19 @@ + return test_fail(test, "Can't create dumps") + sa = pa.stdout.read().decode("utf-8").strip("\n") + sb = pb.stdout.read().decode("utf-8").strip("\n") +- print("\t--- hexdump %s" % (patha)) ++ print(("\t--- hexdump %s" % (patha))) + for i in sa.split("\n"): +- print("\t%s" % i) +- print("\t--- hexdump %s" % (pathb)) ++ print(("\t%s" % i)) ++ print(("\t--- hexdump %s" % (pathb))) + for i in sb.split("\n"): +- print("\t%s" % i) ++ print(("\t%s" % i)) + pa.stdout.close() + pb.stdout.close() + + diff = difflib.unified_diff(sa.split("\n"), sb.split("\n"), + fromfile = patha, tofile = pathb) + for i in diff: +- print("\t%s" % i.strip("\n")) ++ print(("\t%s" % i.strip("\n"))) + print("\t---") + return True + +@@ -271,7 +271,7 @@ + return opts + + def exec_nasm(desc): +- print("\tProcessing %s" % (desc['_test-name'])) ++ print(("\tProcessing %s" % (desc['_test-name']))) + opts = [args.nasm] + prepare_run_opts(desc) + + nasm_env = os.environ.copy() +@@ -286,7 +286,7 @@ + else: + nasm_env[v[0]] = None + +- print("\tExecuting %s" % (" ".join(opts))) ++ print(("\tExecuting %s" % (" ".join(opts)))) + pnasm = subprocess.Popen(opts, + stdout = subprocess.PIPE, + stderr = subprocess.PIPE, +@@ -315,7 +315,7 @@ + return pnasm, stdout, stderr + + def test_run(desc): +- print("=== Running %s ===" % (desc['_test-name'])) ++ print(("=== Running %s ===" % (desc['_test-name']))) + + pnasm, stdout, stderr = exec_nasm(desc) + if pnasm == None: +@@ -327,7 +327,7 @@ + match = desc['_base-dir'] + os.sep + t['match'] + if desc['_wait'] == 1: + continue +- print("\tComparing %s %s" % (output, match)) ++ print(("\tComparing %s %s" % (output, match))) + if filecmp.cmp(match, output) == False: + show_diff(desc['_test-name'], match, output) + return test_fail(desc['_test-name'], match + " and " + output + " files are different") +@@ -365,7 +365,7 @@ + # + # Compile sources and generate new targets + def test_update(desc): +- print("=== Updating %s ===" % (desc['_test-name'])) ++ print(("=== Updating %s ===" % (desc['_test-name']))) + + if 'update' in desc and desc['update'] == 'false': + return test_skip(desc['_test-name'], "No output provided") +@@ -378,17 +378,17 @@ + if 'output' in t: + output = desc['_base-dir'] + os.sep + t['output'] + match = desc['_base-dir'] + os.sep + t['match'] +- print("\tMoving %s to %s" % (output, match)) ++ print(("\tMoving %s to %s" % (output, match))) + os.rename(output, match) + if 'stdout' in t: + match = desc['_base-dir'] + os.sep + t['stdout'] +- print("\tMoving %s to %s" % ('stdout', match)) ++ print(("\tMoving %s to %s" % ('stdout', match))) + with open(match, "wb") as f: + f.write(stdout.encode("utf-8")) + f.close() + if 'stderr' in t: + match = desc['_base-dir'] + os.sep + t['stderr'] +- print("\tMoving %s to %s" % ('stderr', match)) ++ print(("\tMoving %s to %s" % ('stderr', match))) + with open(match, "wb") as f: + f.write(stderr.encode("utf-8")) + f.close() +--- a/src/3rdparty/chromium/third_party/node/clean_json_attrs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/node/clean_json_attrs.py 2025-01-16 02:26:08.596762784 +0800 +@@ -13,7 +13,7 @@ + + removed = False + +- for key, val in json_dict.items(): ++ for key, val in list(json_dict.items()): + if isinstance(val, dict): + if _remove_attrs(val, attr_pattern): + removed = True +--- a/src/3rdparty/chromium/third_party/node/clean_json_attrs_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/node/clean_json_attrs_test.py 2025-01-16 02:26:08.596762784 +0800 +@@ -37,7 +37,7 @@ + args['attr_pattern'] = '^delete' + self.assertTrue(clean_json_attrs.Clean(**args)) + json_dict = self._read_temp_file('package.json') +- self.assertEquals(['ignore_me', 'version'], sorted(json_dict.keys())) ++ self.assertEqual(['ignore_me', 'version'], sorted(json_dict.keys())) + + def testFilePattern(self): + self._write_temp_file('clean_me.json', {'_where': '/a/b/c'}) +@@ -45,8 +45,8 @@ + args = self._kwargs.copy() + args['file_pattern'] = '^clean_' + self.assertTrue(clean_json_attrs.Clean(**args)) +- self.assertEquals([], self._read_temp_file('clean_me.json').keys()) +- self.assertEquals(['_args'], self._read_temp_file('ignore_me.json').keys()) ++ self.assertEqual([], list(self._read_temp_file('clean_me.json').keys())) ++ self.assertEqual(['_args'], list(self._read_temp_file('ignore_me.json').keys())) + + def testNestedKeys(self): + self._write_temp_file('package.json', { +@@ -62,14 +62,14 @@ + }) + self.assertTrue(clean_json_attrs.Clean(**self._kwargs)) + json_dict = self._read_temp_file('package.json') +- self.assertEquals(['nested', 'version'], sorted(json_dict.keys())) +- self.assertEquals(['also'], json_dict['nested'].keys()) +- self.assertEquals([], json_dict['nested']['also'].keys()) ++ self.assertEqual(['nested', 'version'], sorted(json_dict.keys())) ++ self.assertEqual(['also'], list(json_dict['nested'].keys())) ++ self.assertEqual([], list(json_dict['nested']['also'].keys())) + + def testNothingToRemove(self): + self._write_temp_file('package.json', {'version': '2.0.0'}) + self.assertFalse(clean_json_attrs.Clean(**self._kwargs)) +- self.assertEquals(['version'], self._read_temp_file('package.json').keys()) ++ self.assertEqual(['version'], list(self._read_temp_file('package.json').keys())) + + def testSimple(self): + self._write_temp_file('package.json', { +@@ -78,7 +78,7 @@ + '_where': '/some/path' + }) + self.assertTrue(clean_json_attrs.Clean(**self._kwargs)) +- self.assertEquals(['version'], self._read_temp_file('package.json').keys()) ++ self.assertEqual(['version'], list(self._read_temp_file('package.json').keys())) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/openscreen/src/build/scripts/dir_exists.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/openscreen/src/build/scripts/dir_exists.py 2025-01-16 02:26:08.596762784 +0800 +@@ -8,7 +8,7 @@ + Writes True if the argument is a directory. + """ + +-from __future__ import print_function ++ + + import os.path + import sys +--- a/src/3rdparty/chromium/third_party/openscreen/src/build/scripts/install-sysroot.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/openscreen/src/build/scripts/install-sysroot.py 2025-01-16 02:26:08.596762784 +0800 +@@ -20,7 +20,7 @@ + # data storage, and the sysroots.json file should be kept in sync with Chrome's + # copy of it. + +-from __future__ import print_function ++ + + import hashlib + import json +@@ -36,7 +36,7 @@ + from urllib.request import urlopen + except ImportError: + # Fall back to Python 2's urllib2 +- from urllib2 import urlopen ++ from urllib.request import urlopen + + SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + PARENT_DIR = os.path.dirname(SCRIPT_DIR) +--- a/src/3rdparty/chromium/third_party/openscreen/src/build/scripts/sysroot_ld_path.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/openscreen/src/build/scripts/sysroot_ld_path.py 2025-01-16 02:26:08.596762784 +0800 +@@ -10,7 +10,7 @@ + appropriate linker flags. + """ + +-from __future__ import print_function ++ + import argparse + import glob + import os +--- a/src/3rdparty/chromium/third_party/openscreen/src/testing/libfuzzer/archive_corpus.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/openscreen/src/testing/libfuzzer/archive_corpus.py 2025-01-16 02:26:08.596762784 +0800 +@@ -9,7 +9,7 @@ + Invoked by GN from fuzzer_test.gni. + """ + +-from __future__ import print_function ++ + import argparse + import os + import sys +--- a/src/3rdparty/chromium/third_party/openscreen/src/testing/libfuzzer/gen_fuzzer_config.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/openscreen/src/testing/libfuzzer/gen_fuzzer_config.py 2025-01-16 02:26:08.596762784 +0800 +@@ -8,7 +8,7 @@ + Invoked by GN from fuzzer_test.gni. + """ + +-import ConfigParser ++import configparser + import argparse + import os + import sys +@@ -52,7 +52,7 @@ + args.asan_options or args.msan_options or args.ubsan_options): + return + +- config = ConfigParser.ConfigParser() ++ config = configparser.ConfigParser() + libfuzzer_options = [] + if args.dict: + libfuzzer_options.append(('dict', os.path.basename(args.dict))) +--- a/src/3rdparty/chromium/third_party/openscreen/src/third_party/protobuf/protoc_wrapper.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/openscreen/src/third_party/protobuf/protoc_wrapper.py 2025-01-16 02:26:08.596762784 +0800 +@@ -11,7 +11,7 @@ + - Prevents bad proto names. + """ + +-from __future__ import print_function ++ + import argparse + import os.path + import subprocess +--- a/src/3rdparty/chromium/third_party/openscreen/src/tools/download-clang-update-script.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/openscreen/src/tools/download-clang-update-script.py 2025-01-16 02:26:08.596762784 +0800 +@@ -20,7 +20,8 @@ + import sys + + try: +- from urllib2 import HTTPError, URLError, urlopen ++ from urllib.error import HTTPError, URLError ++ from urllib.request import urlopen + except ImportError: # For Py3 compatibility + from urllib.error import HTTPError, URLError + from urllib.request import urlopen +@@ -36,8 +37,8 @@ + args = parser.parse_args() + + if not args.output: +- print('usage: download-clang-update-script.py ' + +- '--output=tools/clang/scripts/update.py'); ++ print(('usage: download-clang-update-script.py ' + ++ '--output=tools/clang/scripts/update.py')); + return 1 + + script_contents = '' +@@ -45,11 +46,11 @@ + response = urlopen(SCRIPT_DOWNLOAD_URL) + script_contents = response.read() + except HTTPError as e: +- print e.code +- print e.read() ++ print(e.code) ++ print(e.read()) + return 1 + except URLError as e: +- print 'Download failed. Reason: ', e.reason ++ print('Download failed. Reason: ', e.reason) + return 1 + + directory = os.path.dirname(args.output) +--- a/src/3rdparty/chromium/third_party/openscreen/src/tools/cddl/cddl.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/openscreen/src/tools/cddl/cddl.py 2025-01-16 02:26:08.596762784 +0800 +@@ -26,7 +26,7 @@ + log = open(logPath, "a") + + if (args.verbose): +- print("Logging to %s" % logPath) ++ print(("Logging to %s" % logPath)) + else: + log = None + +@@ -79,7 +79,7 @@ + def echoAndRunCommand(commandArray, allowFailure, + logfile = None, verbose = False): + if verbose: +- print("\tExecuting Command: '%s'" % " ".join(commandArray)) ++ print(("\tExecuting Command: '%s'" % " ".join(commandArray))) + + if logfile != None: + process = subprocess.Popen(commandArray, stdout=logfile, stderr=logfile) +@@ -94,7 +94,7 @@ + if not allowFailure: + sys.exit("\t\tERROR: Command failed with error code: '%i'!" % returncode) + elif verbose: +- print("\t\tWARNING: Command failed with error code: '%i'!" % returncode) ++ print(("\t\tWARNING: Command failed with error code: '%i'!" % returncode)) + + def findClangFormat(): + executable = "clang-format" +--- a/src/3rdparty/chromium/third_party/opus/convert_rtcd_assembler.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/opus/convert_rtcd_assembler.py 2025-01-16 02:26:08.597846099 +0800 +@@ -16,22 +16,22 @@ + + def main(argv): + if len(argv) != 3: +- print >> sys.stderr, ('Error: You must pass the following arguments:\n' ++ print(('Error: You must pass the following arguments:\n' + ' * arm2gnu_script_path\n' + ' * input_file\n' +- ' * output_file') +- print USAGE ++ ' * output_file'), file=sys.stderr) ++ print(USAGE) + return 1 + + arm2gnu_script = os.path.abspath(argv[0]) + if not os.path.exists(arm2gnu_script): +- print >> sys.stderr, ('Error: Cannot find arm2gnu.pl script at: %s.' % +- arm2gnu_script) ++ print(('Error: Cannot find arm2gnu.pl script at: %s.' % ++ arm2gnu_script), file=sys.stderr) + return 2 + + input_file = os.path.abspath(argv[1]) + if not os.path.exists(input_file): +- print >> sys.stderr, 'Error: Cannot find input file at: %s.' % input_file ++ print('Error: Cannot find input file at: %s.' % input_file, file=sys.stderr) + return 3 + + output_file = argv[2] +--- a/src/3rdparty/chromium/third_party/opus/src/scripts/dump_rnn.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/opus/src/scripts/dump_rnn.py 2025-01-16 02:26:08.597846099 +0800 +@@ -1,6 +1,6 @@ + #!/usr/bin/python + +-from __future__ import print_function ++ + + from keras.models import Sequential + from keras.layers import Dense +--- a/src/3rdparty/chromium/third_party/opus/src/scripts/rnn_train.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/opus/src/scripts/rnn_train.py 2025-01-16 02:26:08.597846099 +0800 +@@ -1,6 +1,6 @@ + #!/usr/bin/python + +-from __future__ import print_function ++ + + from keras.models import Sequential + from keras.models import Model +--- a/src/3rdparty/chromium/third_party/opus/src/training/rnn_dump.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/opus/src/training/rnn_dump.py 2025-01-16 02:26:08.597846099 +0800 +@@ -1,6 +1,6 @@ + #!/usr/bin/python + +-from __future__ import print_function ++ + + from keras.models import Sequential + from keras.models import Model +--- a/src/3rdparty/chromium/third_party/opus/src/training/rnn_train.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/opus/src/training/rnn_train.py 2025-01-16 02:26:08.597846099 +0800 +@@ -1,6 +1,6 @@ + #!/usr/bin/python3 + +-from __future__ import print_function ++ + + from keras.models import Sequential + from keras.models import Model +--- a/src/3rdparty/chromium/third_party/opus/src/training/txt2hdf5.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/opus/src/training/txt2hdf5.py 2025-01-16 02:26:08.597846099 +0800 +@@ -1,6 +1,6 @@ + #!/usr/bin/python + +-from __future__ import print_function ++ + + import numpy as np + import h5py +--- a/src/3rdparty/chromium/third_party/pdfium/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/PRESUBMIT.py 2025-01-16 02:26:08.597846099 +0800 +@@ -121,7 +121,7 @@ + cpp_system_include_pattern = input_api.re.compile(r'\s*#include <.*>') + custom_include_pattern = input_api.re.compile(r'\s*#include ".*') + +- C_SYSTEM_INCLUDES, CPP_SYSTEM_INCLUDES, CUSTOM_INCLUDES = range(3) ++ C_SYSTEM_INCLUDES, CPP_SYSTEM_INCLUDES, CUSTOM_INCLUDES = list(range(3)) + + state = C_SYSTEM_INCLUDES + +--- a/src/3rdparty/chromium/third_party/pdfium/PRESUBMIT_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/PRESUBMIT_test.py 2025-01-16 02:26:08.597846099 +0800 +@@ -31,9 +31,9 @@ + ] + mock_input_api = MockInputApi() + mock_output_api = MockOutputApi() +- mock_input_api.files = map(MockFile, correct_paths + wrong_paths) +- errors = map(str, PRESUBMIT._CheckPNGFormat(mock_input_api, +- mock_output_api)) ++ mock_input_api.files = list(map(MockFile, correct_paths + wrong_paths)) ++ errors = list(map(str, PRESUBMIT._CheckPNGFormat(mock_input_api, ++ mock_output_api))) + + self.assertEqual(len(wrong_paths), len(errors)) + self.assertFalse('notpng.cc' in errors[0]) +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/api_check.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/api_check.py 2025-01-16 02:26:08.597846099 +0800 +@@ -106,9 +106,9 @@ + if not failure_list: + return True + +- print '%s:' % failure_message ++ print('%s:' % failure_message) + for f in sorted(failure_list): +- print f ++ print(f) + return False + + +@@ -154,8 +154,8 @@ + result = result and check + + if not result: +- print('Some checks failed. Make sure %s is in sync with the public API ' +- 'headers.' % api_test_relative_path) ++ print(('Some checks failed. Make sure %s is in sync with the public API ' ++ 'headers.' % api_test_relative_path)) + return 1 + + return 0 +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/common.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/common.py 2025-01-16 02:26:08.597846099 +0800 +@@ -143,16 +143,16 @@ + os.chdir(cwd) + arg_match_output = re.search('%s = (.*)' % arg_name, gn_args_output).group(1) + if verbose: +- print >> sys.stderr, "Found '%s' for value of %s" % (arg_match_output, +- arg_name) ++ print("Found '%s' for value of %s" % (arg_match_output, ++ arg_name), file=sys.stderr) + return arg_match_output == 'true' + + + def PrintWithTime(s): + """Prints s prepended by a timestamp.""" +- print '[%s] %s' % (datetime.datetime.now().strftime("%Y%m%d %H:%M:%S"), s) ++ print('[%s] %s' % (datetime.datetime.now().strftime("%Y%m%d %H:%M:%S"), s)) + + + def PrintErr(s): + """Prints s to stderr.""" +- print >> sys.stderr, s ++ print(s, file=sys.stderr) +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/encode_pdf_filter.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/encode_pdf_filter.py 2025-01-16 02:26:08.597846099 +0800 +@@ -350,7 +350,7 @@ + if not raw: + out_buffer.write(b'<<\n') + entries['Length'] = len(data) +- for k, v in entries.items(): ++ for k, v in list(entries.items()): + v = _EncodePdfValue(v) + if k == 'Length' and use_streamlen: + out_buffer.write(b' {{streamlen}}\n') +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/fixup_pdf_template.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/fixup_pdf_template.py 2025-01-16 02:26:08.597846099 +0800 +@@ -17,7 +17,7 @@ + {{streamlen}} - expands to |/Length n|. + """ + +-import cStringIO ++import io + import optparse + import os + import re +@@ -138,7 +138,7 @@ + processor = TemplateProcessor() + try: + with open(output_path, 'wb') as outfile: +- preprocessed = cStringIO.StringIO() ++ preprocessed = io.StringIO() + for line in infile: + preprocessed.write(line) + processor.preprocess_line(line) +@@ -146,13 +146,13 @@ + for line in preprocessed: + outfile.write(processor.process_line(line)) + except IOError: +- print >> sys.stderr, 'failed to process %s' % input_path ++ print('failed to process %s' % input_path, file=sys.stderr) + + + def insert_includes(input_path, output_file, visited_set): + input_path = os.path.normpath(input_path) + if input_path in visited_set: +- print >> sys.stderr, 'Circular inclusion %s, ignoring' % input_path ++ print('Circular inclusion %s, ignoring' % input_path, file=sys.stderr) + return + visited_set.add(input_path) + try: +@@ -170,7 +170,7 @@ + line = line.replace(WINDOWS_LINE_ENDING, UNIX_LINE_ENDING) + output_file.write(line) + except IOError: +- print >> sys.stderr, 'failed to include %s' % input_path ++ print('failed to include %s' % input_path, file=sys.stderr) + raise + visited_set.discard(input_path) + +@@ -185,7 +185,7 @@ + output_dir = os.path.dirname(testcase_path) + if options.output_dir: + output_dir = options.output_dir +- intermediate_stream = cStringIO.StringIO() ++ intermediate_stream = io.StringIO() + insert_includes(testcase_path, intermediate_stream, set()) + intermediate_stream.seek(0) + output_path = os.path.join(output_dir, testcase_root + '.pdf') +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/githelper.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/githelper.py 2025-01-16 02:26:08.597846099 +0800 +@@ -6,7 +6,7 @@ + import subprocess + + # pylint: disable=relative-import +-from common import RunCommandPropagateErr ++from .common import RunCommandPropagateErr + + + class GitHelper(object): +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/gold.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/gold.py 2025-01-16 02:26:08.597846099 +0800 +@@ -7,7 +7,7 @@ + import shlex + import shutil + import ssl +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + + def _ParseKeyValuePairs(kv_str): +@@ -17,7 +17,7 @@ + kv_pairs = shlex.split(kv_str) + if len(kv_pairs) % 2: + raise ValueError('Uneven number of key/value pairs. Got %s' % kv_str) +- return {kv_pairs[i]: kv_pairs[i + 1] for i in xrange(0, len(kv_pairs), 2)} ++ return {kv_pairs[i]: kv_pairs[i + 1] for i in range(0, len(kv_pairs), 2)} + + + # This module downloads a json provided by Skia Gold with the expected baselines +@@ -76,7 +76,7 @@ + timeout = 2 + while True: + try: +- response = urllib2.urlopen(url, timeout=timeout) ++ response = urllib.request.urlopen(url, timeout=timeout) + c_type = response.headers.get('Content-type', '') + EXPECTED_CONTENT_TYPE = 'application/json' + if c_type != EXPECTED_CONTENT_TYPE: +@@ -84,17 +84,17 @@ + (c_type, EXPECTED_CONTENT_TYPE)) + json_data = response.read() + break # If this line is reached, then no exception occurred. +- except (ssl.SSLError, urllib2.HTTPError, urllib2.URLError) as e: ++ except (ssl.SSLError, urllib.error.HTTPError, urllib.error.URLError) as e: + timeout *= 2 + if timeout < MAX_TIMEOUT: + continue +- print('Error: Unable to read skia gold json from %s: %s' % (url, e)) ++ print(('Error: Unable to read skia gold json from %s: %s' % (url, e))) + return None + + try: + data = json.loads(json_data) + except ValueError as e: +- print 'Error: Malformed json read from %s: %s' % (url, e) ++ print('Error: Malformed json read from %s: %s' % (url, e)) + return None + + return data.get('master', {}) +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/pngdiffer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/pngdiffer.py 2025-01-16 02:26:08.597846099 +0800 +@@ -10,7 +10,7 @@ + import sys + + # pylint: disable=relative-import +-import common ++from . import common + + + class PathMode: +@@ -70,7 +70,7 @@ + for page in itertools.count(): + actual_path = path_templates.GetActualPath(page) + expected_paths = path_templates.GetExpectedPaths(page) +- if any(itertools.imap(os.path.exists, expected_paths)): ++ if any(map(os.path.exists, expected_paths)): + actual_paths.append(actual_path) + else: + break +@@ -92,15 +92,15 @@ + for page in itertools.count(): + actual_path = path_templates.GetActualPath(page) + expected_paths = path_templates.GetExpectedPaths(page) +- if not any(itertools.imap(os.path.exists, expected_paths)): ++ if not any(map(os.path.exists, expected_paths)): + if page == 0: +- print "WARNING: no expected results files for " + input_filename ++ print("WARNING: no expected results files for " + input_filename) + if os.path.exists(actual_path): +- print('FAILURE: Missing expected result for 0-based page %d of %s' % +- (page, input_filename)) ++ print(('FAILURE: Missing expected result for 0-based page %d of %s' % ++ (page, input_filename))) + return True + break +- print "Checking " + actual_path ++ print("Checking " + actual_path) + sys.stdout.flush() + + error = None +@@ -115,7 +115,7 @@ + break + + if error: +- print "FAILURE: " + input_filename + "; " + str(error) ++ print("FAILURE: " + input_filename + "; " + str(error)) + return True + + return False +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/run_corpus_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/run_corpus_tests.py 2025-01-16 02:26:08.597846099 +0800 +@@ -6,7 +6,7 @@ + import sys + + # pylint: disable=relative-import +-import test_runner ++from . import test_runner + + + def main(): +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/run_javascript_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/run_javascript_tests.py 2025-01-16 02:26:08.597846099 +0800 +@@ -6,7 +6,7 @@ + import sys + + # pylint: disable=relative-import +-import test_runner ++from . import test_runner + + + def main(): +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/run_pixel_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/run_pixel_tests.py 2025-01-16 02:26:08.597846099 +0800 +@@ -6,7 +6,7 @@ + import sys + + # pylint: disable=relative-import +-import test_runner ++from . import test_runner + + + def main(): +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/safetynet_compare.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/safetynet_compare.py 2025-01-16 02:26:08.597846099 +0800 +@@ -17,15 +17,15 @@ + import tempfile + + # pylint: disable=relative-import +-from common import GetBooleanGnArg +-from common import PrintErr +-from common import RunCommandPropagateErr +-from githelper import GitHelper +-from safetynet_conclusions import ComparisonConclusions +-from safetynet_conclusions import PrintConclusionsDictHumanReadable +-from safetynet_conclusions import RATING_IMPROVEMENT +-from safetynet_conclusions import RATING_REGRESSION +-from safetynet_image import ImageComparison ++from .common import GetBooleanGnArg ++from .common import PrintErr ++from .common import RunCommandPropagateErr ++from .githelper import GitHelper ++from .safetynet_conclusions import ComparisonConclusions ++from .safetynet_conclusions import PrintConclusionsDictHumanReadable ++from .safetynet_conclusions import RATING_IMPROVEMENT ++from .safetynet_conclusions import RATING_REGRESSION ++from .safetynet_image import ImageComparison + + + def RunSingleTestCaseParallel(this, run_label, build_dir, test_case): +@@ -564,7 +564,7 @@ + ComparisonConclusions.GetOutputDict(). + """ + if self.args.machine_readable: +- print json.dumps(conclusions_dict) ++ print(json.dumps(conclusions_dict)) + else: + PrintConclusionsDictHumanReadable( + conclusions_dict, colored=True, key=self.args.case_order) +@@ -584,7 +584,7 @@ + if self.args.profiler != 'callgrind': + return + +- for case_result in conclusions.GetCaseResults().values(): ++ for case_result in list(conclusions.GetCaseResults().values()): + if case_result.rating not in [RATING_REGRESSION, RATING_IMPROVEMENT]: + self._CleanUpOutputFile('before', case_result.case_name) + self._CleanUpOutputFile('after', case_result.case_name) +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/safetynet_conclusions.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/safetynet_conclusions.py 2025-01-16 02:26:08.597846099 +0800 +@@ -177,7 +177,7 @@ + output_dict['summary'] = self.summary.GetOutputDict() + output_dict['comparison_by_case'] = { + cr.case_name.decode('utf-8'): cr.GetOutputDict() +- for cr in self.GetCaseResults().values() ++ for cr in list(self.GetCaseResults().values()) + } + return output_dict + +@@ -245,46 +245,46 @@ + key: String with the CaseResult dictionary key to sort the cases. + """ + # Print header +- print '=' * 80 +- print '{0:>11s} {1:>15s} {2}'.format('% Change', 'Time after', 'Test case') +- print '-' * 80 ++ print('=' * 80) ++ print('{0:>11s} {1:>15s} {2}'.format('% Change', 'Time after', 'Test case')) ++ print('-' * 80) + + color = FORMAT_NORMAL + + # Print cases + if key is not None: + case_pairs = sorted( +- conclusions_dict['comparison_by_case'].iteritems(), ++ iter(conclusions_dict['comparison_by_case'].items()), + key=lambda kv: kv[1][key]) + else: +- case_pairs = sorted(conclusions_dict['comparison_by_case'].iteritems()) ++ case_pairs = sorted(conclusions_dict['comparison_by_case'].items()) + + for case_name, case_dict in case_pairs: + if colored: + color = RATING_TO_COLOR[case_dict['rating']] + + if case_dict['rating'] == RATING_FAILURE: +- print u'{} to measure time for {}'.format( +- color.format('Failed'), case_name).encode('utf-8') ++ print('{} to measure time for {}'.format( ++ color.format('Failed'), case_name).encode('utf-8')) + continue + +- print u'{0} {1:15,d} {2}'.format( ++ print('{0} {1:15,d} {2}'.format( + color.format('{:+11.4%}'.format(case_dict['ratio'])), +- case_dict['after'], case_name).encode('utf-8') ++ case_dict['after'], case_name).encode('utf-8')) + + # Print totals + totals = conclusions_dict['summary'] +- print '=' * 80 +- print 'Test cases run: %d' % totals['total'] ++ print('=' * 80) ++ print('Test cases run: %d' % totals['total']) + + if colored: + color = FORMAT_MAGENTA if totals[RATING_FAILURE] else FORMAT_GREEN +- print('Failed to measure: %s' % color.format(totals[RATING_FAILURE])) ++ print(('Failed to measure: %s' % color.format(totals[RATING_FAILURE]))) + + if colored: + color = FORMAT_RED if totals[RATING_REGRESSION] else FORMAT_GREEN +- print('Regressions: %s' % color.format(totals[RATING_REGRESSION])) ++ print(('Regressions: %s' % color.format(totals[RATING_REGRESSION]))) + + if colored: + color = FORMAT_CYAN if totals[RATING_IMPROVEMENT] else FORMAT_GREEN +- print('Improvements: %s' % color.format(totals[RATING_IMPROVEMENT])) ++ print(('Improvements: %s' % color.format(totals[RATING_IMPROVEMENT]))) +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/safetynet_image.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/safetynet_image.py 2025-01-16 02:26:08.597846099 +0800 +@@ -14,7 +14,7 @@ + import webbrowser + + # pylint: disable=relative-import +-from common import DirectoryFinder ++from .common import DirectoryFinder + + + def GenerateOneDiffParallel(image_comparison, image): +@@ -63,7 +63,7 @@ + # pylint: disable=attribute-defined-outside-init + + if len(self.two_labels) != 2: +- print >> sys.stderr, 'two_labels must be a tuple of length 2' ++ print('two_labels must be a tuple of length 2', file=sys.stderr) + return 1 + + finder = DirectoryFinder(self.build_dir) +@@ -88,7 +88,7 @@ + for image in self.image_locations.Images(): + diff = difference[image] + if diff is None: +- print >> sys.stderr, 'Failed to compare image %s' % image ++ print('Failed to compare image %s' % image, file=sys.stderr) + elif diff > self.threshold: + self._WriteImageRows(f, image, diff) + else: +@@ -170,7 +170,7 @@ + except subprocess.CalledProcessError as e: + return image, percentage_change + else: +- print >> sys.stderr, 'Warning: Should have failed the previous diff.' ++ print('Warning: Should have failed the previous diff.', file=sys.stderr) + return image, 0 + + def _GetRelativePath(self, absolute_path): +@@ -259,7 +259,7 @@ + self.left = self._FindImages(self.two_labels[0]) + self.right = self._FindImages(self.two_labels[1]) + +- self.images = list(self.left.viewkeys() & self.right.viewkeys()) ++ self.images = list(self.left.keys() & self.right.keys()) + + # Sort by pdf filename, then page number + def KeyFn(s): +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/safetynet_job.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/safetynet_job.py 2025-01-16 02:26:08.597846099 +0800 +@@ -16,10 +16,10 @@ + import sys + + # pylint: disable=relative-import +-from common import PrintWithTime +-from common import RunCommandPropagateErr +-from githelper import GitHelper +-from safetynet_conclusions import PrintConclusionsDictHumanReadable ++from .common import PrintWithTime ++from .common import RunCommandPropagateErr ++from .githelper import GitHelper ++from .safetynet_conclusions import PrintConclusionsDictHumanReadable + + + class JobContext(object): +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/safetynet_measure.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/safetynet_measure.py 2025-01-16 02:26:08.597846099 +0800 +@@ -14,7 +14,7 @@ + import sys + + # pylint: disable=relative-import +-from common import PrintErr ++from .common import PrintErr + + CALLGRIND_PROFILER = 'callgrind' + PERFSTAT_PROFILER = 'perfstat' +@@ -65,7 +65,7 @@ + if time is None: + return 1 + +- print time ++ print(time) + return 0 + + def _RunCallgrind(self): +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/suppressor.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/suppressor.py 2025-01-16 02:26:08.597846099 +0800 +@@ -6,7 +6,7 @@ + import os + + # pylint: disable=relative-import +-import common ++from . import common + + + class Suppressor: +@@ -49,18 +49,18 @@ + + def IsResultSuppressed(self, input_filename): + if input_filename in self.suppression_set: +- print "%s result is suppressed" % input_filename ++ print("%s result is suppressed" % input_filename) + return True + return False + + def IsExecutionSuppressed(self, input_filepath): + if "xfa_specific" in input_filepath and not self.has_xfa: +- print "%s execution is suppressed" % input_filepath ++ print("%s execution is suppressed" % input_filepath) + return True + return False + + def IsImageDiffSuppressed(self, input_filename): + if input_filename in self.image_suppression_set: +- print "%s image diff comparison is suppressed" % input_filename ++ print("%s image diff comparison is suppressed" % input_filename) + return True + return False +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/test_runner.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/test_runner.py 2025-01-16 02:26:08.598929414 +0800 +@@ -13,10 +13,10 @@ + import sys + + # pylint: disable=relative-import +-import common +-import gold +-import pngdiffer +-import suppressor ++from . import common ++from . import gold ++from . import pngdiffer ++from . import suppressor + + # Arbitrary timestamp, expressed in seconds since the epoch, used to make sure + # that tests that depend on the current time are stable. Happens to be the +@@ -87,7 +87,7 @@ + pdf_path) + + if raised_exception is not None: +- print 'FAILURE: %s; %s' % (input_filename, raised_exception) ++ print('FAILURE: %s; %s' % (input_filename, raised_exception)) + return False, [] + + results = [] +@@ -100,7 +100,7 @@ + raised_exception, results = self.TestPixel(pdf_path, use_ahem) + + if raised_exception is not None: +- print 'FAILURE: %s; %s' % (input_filename, raised_exception) ++ print('FAILURE: %s; %s' % (input_filename, raised_exception)) + return False, results + + if actual_images: +@@ -112,7 +112,7 @@ + if (self.enforce_expected_images and + not self.test_suppressor.IsImageDiffSuppressed(input_filename)): + self.RegenerateIfNeeded_(input_filename, source_dir) +- print 'FAILURE: %s; Missing expected images' % input_filename ++ print('FAILURE: %s; Missing expected images' % input_filename) + return False, results + + if self.delete_output_on_success: +@@ -234,9 +234,9 @@ + if not self.test_suppressor.IsResultSuppressed(input_filename): + matched = self.gold_baseline.MatchLocalResult(test_name, md5_hash) + if matched == gold.GoldBaseline.MISMATCH: +- print 'Skia Gold hash mismatch for test case: %s' % test_name ++ print('Skia Gold hash mismatch for test case: %s' % test_name) + elif matched == gold.GoldBaseline.NO_BASELINE: +- print 'No Skia Gold baseline found for test case: %s' % test_name ++ print('No Skia Gold baseline found for test case: %s' % test_name) + + if self.gold_results: + self.gold_results.AddTestResult(test_name, md5_hash, img_path, +@@ -333,7 +333,7 @@ + + if (self.options.regenerate_expected and + self.options.regenerate_expected not in ['all', 'platform']): +- print 'FAILURE: --regenerate_expected must be "all" or "platform"' ++ print('FAILURE: --regenerate_expected must be "all" or "platform"') + return 1 + + finder = common.DirectoryFinder(self.options.build_dir) +@@ -349,8 +349,8 @@ + + self.pdfium_test_path = finder.ExecutablePath('pdfium_test') + if not os.path.exists(self.pdfium_test_path): +- print "FAILURE: Can't find test executable '%s'" % self.pdfium_test_path +- print 'Use --build-dir to specify its location.' ++ print("FAILURE: Can't find test executable '%s'" % self.pdfium_test_path) ++ print('Use --build-dir to specify its location.') + return 1 + + self.working_dir = finder.WorkingDir(os.path.join('testing', self.test_dir)) +@@ -367,7 +367,7 @@ + error_message = self.image_differ.CheckMissingTools( + self.options.regenerate_expected) + if error_message: +- print "FAILURE: %s" % error_message ++ print("FAILURE: %s" % error_message) + return 1 + + self.gold_baseline = gold.GoldBaseline(self.options.gold_properties) +@@ -382,7 +382,7 @@ + file_name.replace('.pdf', '.in') + input_path = os.path.join(walk_from_dir, file_name) + if not os.path.isfile(input_path): +- print "Can't find test file '%s'" % file_name ++ print("Can't find test file '%s'" % file_name) + return 1 + + self.test_cases.append((os.path.basename(input_path), +@@ -440,15 +440,15 @@ + + if self.surprises: + self.surprises.sort() +- print '\n\nUnexpected Successes:' ++ print('\n\nUnexpected Successes:') + for surprise in self.surprises: +- print surprise ++ print(surprise) + + if self.failures: + self.failures.sort() +- print '\n\nSummary of Failures:' ++ print('\n\nSummary of Failures:') + for failure in self.failures: +- print failure ++ print(failure) + + self._PrintSummary() + +@@ -464,14 +464,14 @@ + number_suppressed = len(self.result_suppressed_cases) + number_successes = number_test_cases - number_failures - number_suppressed + number_surprises = len(self.surprises) +- print +- print 'Test cases executed: %d' % number_test_cases +- print ' Successes: %d' % number_successes +- print ' Suppressed: %d' % number_suppressed +- print ' Surprises: %d' % number_surprises +- print ' Failures: %d' % number_failures +- print +- print 'Test cases not executed: %d' % len(self.execution_suppressed_cases) ++ print() ++ print('Test cases executed: %d' % number_test_cases) ++ print(' Successes: %d' % number_successes) ++ print(' Suppressed: %d' % number_suppressed) ++ print(' Surprises: %d' % number_surprises) ++ print(' Failures: %d' % number_failures) ++ print() ++ print('Test cases not executed: %d' % len(self.execution_suppressed_cases)) + + def SetDeleteOutputOnSuccess(self, new_value): + """Set whether to delete generated output if the test passes.""" +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/text_diff.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/text_diff.py 2025-01-16 02:26:08.598929414 +0800 +@@ -9,7 +9,7 @@ + + def main(argv): + if len(argv) != 3: +- print '%s: invalid arguments' % argv[0] ++ print('%s: invalid arguments' % argv[0]) + return 2 + filename1 = argv[1] + filename2 = argv[2] +@@ -21,7 +21,7 @@ + diffs = difflib.unified_diff( + str1, str2, fromfile=filename1, tofile=filename2) + except Exception as e: +- print "something went astray: %s" % e ++ print("something went astray: %s" % e) + return 1 + status_code = 0 + for diff in diffs: +--- a/src/3rdparty/chromium/third_party/pdfium/testing/tools/coverage/coverage_report.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/testing/tools/coverage/coverage_report.py 2025-01-16 02:26:08.598929414 +0800 +@@ -115,45 +115,45 @@ + def check_output(self, args, dry_run=False, env=None): + """Dry run aware wrapper of subprocess.check_output()""" + if dry_run: +- print "Would have run '%s'" % ' '.join(args) ++ print("Would have run '%s'" % ' '.join(args)) + return '' + + output = subprocess.check_output(args, env=env) + + if self.verbose: +- print "check_output(%s) returned '%s'" % (args, output) ++ print("check_output(%s) returned '%s'" % (args, output)) + return output + + def call(self, args, dry_run=False, env=None): + """Dry run aware wrapper of subprocess.call()""" + if dry_run: +- print "Would have run '%s'" % ' '.join(args) ++ print("Would have run '%s'" % ' '.join(args)) + return 0 + + output = subprocess.call(args, env=env) + + if self.verbose: +- print 'call(%s) returned %s' % (args, output) ++ print('call(%s) returned %s' % (args, output)) + return output + + def call_silent(self, args, dry_run=False, env=None): + """Dry run aware wrapper of subprocess.call() that eats output from call""" + if dry_run: +- print "Would have run '%s'" % ' '.join(args) ++ print("Would have run '%s'" % ' '.join(args)) + return 0 + + with open(os.devnull, 'w') as f: + output = subprocess.call(args, env=env, stdout=f) + + if self.verbose: +- print 'call_silent(%s) returned %s' % (args, output) ++ print('call_silent(%s) returned %s' % (args, output)) + return output + + def calculate_coverage_tests(self, args): + """Determine which tests should be run.""" + testing_tools_directory = os.path.join(self.source_directory, 'testing', + 'tools') +- tests = args['tests'] if args['tests'] else COVERAGE_TESTS.keys() ++ tests = args['tests'] if args['tests'] else list(COVERAGE_TESTS.keys()) + coverage_tests = {} + build_targets = set() + for name in tests: +@@ -192,10 +192,10 @@ + spec: Tuple containing the TestSpec. + """ + if self.verbose: +- print "Generating coverage for test '%s', using data '%s'" % (name, spec) ++ print("Generating coverage for test '%s', using data '%s'" % (name, spec)) + if not os.path.exists(spec.binary): +- print('Unable to generate coverage for %s, since it appears to not exist' +- ' @ %s') % (name, spec.binary) ++ print(('Unable to generate coverage for %s, since it appears to not exist' ++ ' @ %s') % (name, spec.binary)) + return False + + binary_args = [spec.binary] +@@ -217,8 +217,8 @@ + # to the max value in LLVM_PROFILE_FILE, which is 8. + binary_args.extend(['-j', '8', '--build-dir', self.build_directory]) + if self.call(binary_args, dry_run=self.dry_run, env=env) and self.verbose: +- print('Running %s appears to have failed, which might affect ' +- 'results') % spec.binary ++ print(('Running %s appears to have failed, which might affect ' ++ 'results') % spec.binary) + + return True + +@@ -267,24 +267,24 @@ + def run(self): + """Setup environment, execute the tests and generate coverage report""" + if not self.fetch_profiling_tools(): +- print 'Unable to fetch profiling tools' ++ print('Unable to fetch profiling tools') + return False + + if not self.build_binaries(): +- print 'Failed to successfully build binaries' ++ print('Failed to successfully build binaries') + return False + +- for name in self.coverage_tests.keys(): ++ for name in list(self.coverage_tests.keys()): + if not self.generate_coverage(name, self.coverage_tests[name]): +- print 'Failed to successfully generate coverage data' ++ print('Failed to successfully generate coverage data') + return False + + if not self.merge_raw_coverage_results(): +- print 'Failed to successfully merge raw coverage results' ++ print('Failed to successfully merge raw coverage results') + return False + + if not self.generate_html_report(): +- print 'Failed to successfully generate HTML report' ++ print('Failed to successfully generate HTML report') + return False + + return True +@@ -334,7 +334,7 @@ + parser.add_argument( + 'tests', + help='Tests to be run, defaults to all. Valid entries are %s' % +- COVERAGE_TESTS.keys(), ++ list(COVERAGE_TESTS.keys()), + nargs='*') + + args = vars(parser.parse_args()) +--- a/src/3rdparty/chromium/third_party/pdfium/third_party/pymock/mock.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pdfium/third_party/pymock/mock.py 2025-01-16 02:26:08.598929414 +0800 +@@ -67,13 +67,13 @@ + return inner + + try: +- unicode ++ str + except NameError: + # Python 3 +- basestring = unicode = str ++ str = str = str + + try: +- long ++ int + except NameError: + # Python 3 + long = int +@@ -88,7 +88,7 @@ + next + except NameError: + def next(obj): +- return obj.next() ++ return obj.__next__() + + + BaseExceptions = (BaseException,) +@@ -220,7 +220,7 @@ + #funcopy.__dict__.update(func.__dict__) + funcopy.__module__ = func.__module__ + if not inPy3k: +- funcopy.func_defaults = func.func_defaults ++ funcopy.__defaults__ = func.__defaults__ + return + funcopy.__defaults__ = func.__defaults__ + funcopy.__kwdefaults__ = func.__kwdefaults__ +@@ -618,7 +618,7 @@ + self.call_args_list = _CallList() + self.method_calls = _CallList() + +- for child in self._mock_children.values(): ++ for child in list(self._mock_children.values()): + if isinstance(child, _SpecState): + continue + child.reset_mock() +@@ -637,7 +637,7 @@ + + >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError} + >>> mock.configure_mock(**attrs)""" +- for arg, val in sorted(kwargs.items(), ++ for arg, val in sorted(list(kwargs.items()), + # we sort on the number of dots so that + # attributes are set before we set attributes on + # attributes +@@ -1218,7 +1218,7 @@ + # not in Python 3 + patched.compat_co_firstlineno = getattr( + func, "compat_co_firstlineno", +- func.func_code.co_firstlineno ++ func.__code__.co_firstlineno + ) + return patched + +@@ -1465,7 +1465,7 @@ + When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX` + for choosing which methods to wrap. + """ +- if type(target) in (unicode, str): ++ if type(target) in (str, str): + getter = lambda: _importer(target) + else: + getter = lambda: target +@@ -1598,7 +1598,7 @@ + """ + + def __init__(self, in_dict, values=(), clear=False, **kwargs): +- if isinstance(in_dict, basestring): ++ if isinstance(in_dict, str): + in_dict = _importer(in_dict) + self.in_dict = in_dict + # support any argument supported by dict(...) constructor +@@ -1766,7 +1766,7 @@ + '__hash__': lambda self: object.__hash__(self), + '__str__': lambda self: object.__str__(self), + '__sizeof__': lambda self: object.__sizeof__(self), +- '__unicode__': lambda self: unicode(object.__str__(self)), ++ '__unicode__': lambda self: str(object.__str__(self)), + } + + _return_values = { +@@ -1784,7 +1784,7 @@ + '__nonzero__': True, + '__oct__': '1', + '__hex__': '0x1', +- '__long__': long(1), ++ '__long__': int(1), + '__index__': 1, + } + +@@ -1953,7 +1953,7 @@ + formatted_args = '' + args_string = ', '.join([repr(arg) for arg in args]) + kwargs_string = ', '.join([ +- '%s=%r' % (key, value) for key, value in kwargs.items() ++ '%s=%r' % (key, value) for key, value in list(kwargs.items()) + ]) + if args_string: + formatted_args = args_string +@@ -1995,7 +1995,7 @@ + name, args, kwargs = value + elif _len == 2: + first, second = value +- if isinstance(first, basestring): ++ if isinstance(first, str): + name = first + if isinstance(second, tuple): + args = second +@@ -2005,7 +2005,7 @@ + args, kwargs = first, second + elif _len == 1: + value, = value +- if isinstance(value, basestring): ++ if isinstance(value, str): + name = value + elif isinstance(value, tuple): + args = value +@@ -2049,7 +2049,7 @@ + if isinstance(value, tuple): + other_args = value + other_kwargs = {} +- elif isinstance(value, basestring): ++ elif isinstance(value, str): + other_name = value + other_args, other_kwargs = (), {} + else: +@@ -2059,7 +2059,7 @@ + # len 2 + # could be (name, args) or (name, kwargs) or (args, kwargs) + first, second = other +- if isinstance(first, basestring): ++ if isinstance(first, str): + other_name = first + if isinstance(second, tuple): + other_args, other_kwargs = second, {} +--- a/src/3rdparty/chromium/third_party/perfetto/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/PRESUBMIT.py 2025-01-16 02:26:08.598929414 +0800 +@@ -201,7 +201,7 @@ + if f.LocalPath() != 'tools/ftrace_proto_gen/event_list': + continue + if any((not new_line.startswith('removed')) and new_line != old_line +- for old_line, new_line in itertools.izip(f.OldContents(), ++ for old_line, new_line in zip(f.OldContents(), + f.NewContents())): + return [ + output_api.PresubmitError( +--- a/src/3rdparty/chromium/third_party/perfetto/gn/standalone/build_tool_wrapper.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/gn/standalone/build_tool_wrapper.py 2025-01-16 02:26:08.598929414 +0800 +@@ -18,7 +18,7 @@ + python sources. It is used to invoke tools like the protoc compiler. + """ + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/third_party/perfetto/gn/standalone/gen_git_revision.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/gn/standalone/gen_git_revision.py 2025-01-16 02:26:08.598929414 +0800 +@@ -20,7 +20,7 @@ + + def main(argv): + if len(argv) != 2: +- print('Usage: %s output_file.h' % argv[0]) ++ print(('Usage: %s output_file.h' % argv[0])) + return 1 + script_dir = os.path.dirname(os.path.realpath(__file__)) + revision = subprocess.check_output( +--- a/src/3rdparty/chromium/third_party/perfetto/gn/standalone/glob.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/gn/standalone/glob.py 2025-01-16 02:26:08.598929414 +0800 +@@ -20,7 +20,7 @@ + output of the build but just cause spurious re-runs (e.g. as input section of + an "action" target). + """ +-from __future__ import print_function ++ + import argparse + import fnmatch + import os +--- a/src/3rdparty/chromium/third_party/perfetto/gn/standalone/protoc.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/gn/standalone/protoc.py 2025-01-16 02:26:08.598929414 +0800 +@@ -17,7 +17,7 @@ + This script exists to work-around the bad depfile generation by protoc when + generating descriptors.""" + +-from __future__ import print_function ++ + import argparse + import os + import sys +--- a/src/3rdparty/chromium/third_party/perfetto/gn/standalone/write_ui_dist_file_map.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/gn/standalone/write_ui_dist_file_map.py 2025-01-16 02:26:08.598929414 +0800 +@@ -25,7 +25,7 @@ + } + """ + +-from __future__ import print_function ++ + + import argparse + import base64 +@@ -60,13 +60,13 @@ + args = parser.parse_args() + + # Compute the hash of each file. +- digests = dict(map(hash_file, args.file_list)) ++ digests = dict(list(map(hash_file, args.file_list))) + + contents = '// __generated_by %s\n' % __file__ + contents += 'export const UI_DIST_MAP = {\n' + contents += ' files: {\n' + strip = args.strip + ('' if args.strip[-1] == os.path.sep else os.path.sep) +- for fname, digest in digests.items(): ++ for fname, digest in list(digests.items()): + if not fname.startswith(strip): + raise Exception('%s must start with %s (--strip arg)' % (fname, strip)) + fname = fname[len(strip):] +@@ -77,7 +77,7 @@ + contents += ' },\n' + + # Compute the hash of the all resources' hashes. +- contents += ' hex_digest: \'%s\',\n' % hash_list_hex(digests.values()) ++ contents += ' hex_digest: \'%s\',\n' % hash_list_hex(list(digests.values())) + contents += '};\n' + + with open(args.out + '.tmp', 'w') as fout: +--- a/src/3rdparty/chromium/third_party/perfetto/gn/standalone/toolchain/linux_find_llvm.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/gn/standalone/toolchain/linux_find_llvm.py 2025-01-16 02:26:08.598929414 +0800 +@@ -31,9 +31,9 @@ + for lib in libs: + if '/clang/' not in lib or not os.path.isdir(lib + '/lib'): + continue +- print(os.path.abspath(lib)) ++ print((os.path.abspath(lib))) + print(clang) +- print(clang.replace('clang', 'clang++')) ++ print((clang.replace('clang', 'clang++'))) + return 0 + print('Could not find the LLVM lib dir') + return 1 +--- a/src/3rdparty/chromium/third_party/perfetto/gn/standalone/toolchain/mac_find_llvm.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/gn/standalone/toolchain/mac_find_llvm.py 2025-01-16 02:26:08.598929414 +0800 +@@ -24,14 +24,14 @@ + stderr=subprocess.STDOUT) + out, err = job.communicate() + if job.returncode != 0: +- print >> sys.stderr, out +- print >> sys.stderr, err ++ print(out, file=sys.stderr) ++ print(err, file=sys.stderr) + return job.returncode + sdk_dir = os.path.dirname(os.path.dirname(out.rstrip())) +- print sdk_dir ++ print(sdk_dir) + clang_dir = glob.glob( + os.path.join(sdk_dir, 'lib', 'clang', '*', 'lib', 'darwin')) +- print clang_dir[0] if clang_dir else 'CLANG_DIR_NOT_FOUND' ++ print(clang_dir[0] if clang_dir else 'CLANG_DIR_NOT_FOUND') + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/perfetto/infra/ci/config.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/infra/ci/config.py 2025-01-16 02:26:08.598929414 +0800 +@@ -18,7 +18,7 @@ + makefile dumps of the variables. This is so all vars can live in one place. + ''' + +-from __future__ import print_function ++ + + # Gerrit config + GERRIT_HOST = 'android-review.googlesource.com' +@@ -125,7 +125,7 @@ + import json + import re + import sys +- vars = dict(kv for kv in locals().items() if re.match('^[A-Z0-9_]+$', kv[0])) ++ vars = dict(kv for kv in list(locals().items()) if re.match('^[A-Z0-9_]+$', kv[0])) + + if len(sys.argv) > 1 and sys.argv[1] == 'makefile': + deps_path = os.path.join(os.path.dirname(__file__), '.deps') +@@ -134,12 +134,12 @@ + gen_file = os.path.join(deps_path, 'config.mk') + + try: +- literals = (int, long, basestring) ++ literals = (int, int, str) + except NameError: + literals = (int, str) + + with open(gen_file, 'w') as f: +- for k, v in vars.items(): ++ for k, v in list(vars.items()): + if isinstance(v, literals): + f.write('override %s=%s\n' % (k, v)) + elif isinstance(v, list): +--- a/src/3rdparty/chromium/third_party/perfetto/infra/ci/controller/controller.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/infra/ci/controller/controller.py 2025-01-16 02:26:08.598929414 +0800 +@@ -15,7 +15,7 @@ + import logging + import re + import time +-import urllib ++import urllib.request, urllib.parse, urllib.error + + from datetime import datetime, timedelta + from google.appengine.api import taskqueue +@@ -65,7 +65,7 @@ + + def create_stackdriver_metric_definitions(): + logging.info('Creating Stackdriver metric definitions') +- for name, metric in STACKDRIVER_METRICS.iteritems(): ++ for name, metric in STACKDRIVER_METRICS.items(): + logging.info('Creating metric %s', name) + req('POST', STACKDRIVER_API + '/metricDescriptors', body=metric) + +@@ -73,7 +73,7 @@ + def write_metrics(metric_dict): + now = utc_now_iso() + desc = {'timeSeries': []} +- for key, spec in metric_dict.iteritems(): ++ for key, spec in metric_dict.items(): + desc['timeSeries'] += [{ + 'metric': { + 'type': STACKDRIVER_METRICS[key]['type'], +@@ -138,7 +138,7 @@ + url += '+is:open+after:%s' % date_limit + resp = req('GET', url, gerrit=True) + for change in (change for change in resp if 'revisions' in change): +- rev_hash = change['revisions'].keys()[0] ++ rev_hash = list(change['revisions'].keys())[0] + rev = change['revisions'][rev_hash] + owner = rev['uploader']['email'] + prs_ready = change['labels'].get('Presubmit-Ready', {}).get('approved', {}) +@@ -170,7 +170,7 @@ + ''' + logging.info('Enqueueing jobs fos cl %s', src) + timestamp = (now or datetime.utcnow()).strftime('%Y%m%d%H%M%S') +- for cfg_name, env in JOB_CONFIGS.iteritems(): ++ for cfg_name, env in JOB_CONFIGS.items(): + job_id = '%s--%s--%s' % (timestamp, src.replace('/', '-'), cfg_name) + logging.info('Enqueueing job %s', job_id) + patch_obj['jobs/' + job_id] = { +@@ -240,19 +240,19 @@ + last_key = '%s-z' % cl + filt = 'orderBy="$key"&startAt="%s"&endAt="%s"' % (first_key, last_key) + cl_objs = req('GET', '%s/cls.json?%s' % (DB, filt)) or {} +- for cl_and_ps, cl_obj in cl_objs.iteritems(): ++ for cl_and_ps, cl_obj in cl_objs.items(): + ps = int(cl_and_ps.split('-')[-1]) + if cl_obj.get('time_ended') or ps >= int(patchset): + continue + logging.info('Cancelling jobs for previous patchset %s', cl_and_ps) +- map(lambda x: defer('cancel_job', job_id=x), cl_obj['jobs'].keys()) ++ list(map(lambda x: defer('cancel_job', job_id=x), list(cl_obj['jobs'].keys()))) + + + def check_pending_cls(handler): + # Check if any pending CL has completed (all jobs are done). If so publish + # the comment and vote on the CL. + pending_cls = req('GET', '%s/cls_pending.json' % DB) or {} +- for cl_and_ps, _ in pending_cls.iteritems(): ++ for cl_and_ps, _ in pending_cls.items(): + defer('check_pending_cl', cl_and_ps=cl_and_ps) + + +@@ -262,7 +262,7 @@ + # jobs (we run presubmit regardless, only the voting is conditioned by PR). + cl_and_ps = handler.request.get('cl_and_ps') + cl_obj = req('GET', '%s/cls/%s.json' % (DB, cl_and_ps)) +- all_jobs = cl_obj.get('jobs', {}).keys() ++ all_jobs = list(cl_obj.get('jobs', {}).keys()) + pending_jobs = [] + for job_id in all_jobs: + job_status = req('GET', '%s/jobs/%s/status.json' % (DB, job_id)) +@@ -276,7 +276,7 @@ + if age_sec > CL_TIMEOUT_SEC: + logging.warning('Canceling %s, it has been pending for too long (%s sec)', + cl_and_ps, int(age_sec)) +- map(lambda x: defer('cancel_job', job_id=x), pending_jobs) ++ list(map(lambda x: defer('cancel_job', job_id=x), pending_jobs)) + return + + logging.info('All jobs completed for CL %s', cl_and_ps) +@@ -288,7 +288,7 @@ + } + req('PATCH', '%s.json' % DB, body=patch_obj) + defer('update_cl_metrics', src='cls/' + cl_and_ps) +- map(lambda x: defer('update_job_metrics', job_id=x), all_jobs) ++ list(map(lambda x: defer('update_job_metrics', job_id=x), all_jobs)) + if cl_obj.get('wants_vote'): + defer('comment_and_vote_cl', cl_and_ps=cl_and_ps) + +@@ -310,7 +310,7 @@ + failed_jobs = {} + ui_links = [] + cancelled = False +- for job_id in cl_obj['jobs'].keys(): ++ for job_id in list(cl_obj['jobs'].keys()): + job_obj = req('GET', '%s/jobs/%s.json' % (DB, job_id)) + job_config = JOB_CONFIGS.get(job_obj['type'], {}) + if job_obj['status'] == 'CANCELLED': +@@ -333,7 +333,7 @@ + msg += 'FAIL:\n' + msg += ''.join([ + ' %s/%s (%s)\n' % (log_url, job_id, status) +- for (job_id, status) in failed_jobs.iteritems() ++ for (job_id, status) in failed_jobs.items() + ]) + if passed_jobs: + msg += 'PASS:\n' +@@ -359,7 +359,7 @@ + 1. ?branch=master: Will retrieve the SHA1 of master and call the one below. + 2. ?branch=master&rev=deadbeef1234: queues jobs for the given revision. + ''' +- prj = urllib.quote(GERRIT_PROJECT, '') ++ prj = urllib.parse.quote(GERRIT_PROJECT, '') + branch = handler.request.get('branch') + revision = handler.request.get('revision') + assert branch +@@ -402,7 +402,7 @@ + This is usually due to a crash in the VM that handles them. + ''' + running_jobs = req('GET', '%s/jobs_running.json?shallow=true' % (DB)) or {} +- for job_id in running_jobs.iterkeys(): ++ for job_id in running_jobs.keys(): + job = req('GET', '%s/jobs/%s.json' % (DB, job_id)) + time_started = parse_iso_time(job.get('time_started', utc_now_iso())) + age = (datetime.now() - time_started).total_seconds() +@@ -429,7 +429,7 @@ + + def delete_expired_logs(handler): + logs = req('GET', '%s/logs.json?shallow=true' % (DB)) or {} +- for job_id in logs.iterkeys(): ++ for job_id in logs.keys(): + age_days = (datetime.now() - datetime.strptime(job_id[:8], '%Y%m%d')).days + if age_days > LOGS_TTL_DAYS: + defer('delete_job_logs', job_id=job_id) +--- a/src/3rdparty/chromium/third_party/perfetto/infra/ci/frontend/frontend.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/infra/ci/frontend/frontend.py 2025-01-16 02:26:08.600012729 +0800 +@@ -14,7 +14,7 @@ + + import logging + import webapp2 +-import urllib ++import urllib.request, urllib.parse, urllib.error + + from google.appengine.api import urlfetch + from google.appengine.api import memcache +@@ -38,7 +38,7 @@ + class GerritCommitsHandler(webapp2.RequestHandler): + + def get(self, sha1): +- project = urllib.quote(GERRIT_PROJECT, '') ++ project = urllib.parse.quote(GERRIT_PROJECT, '') + url = 'https://%s/projects/%s/commits/%s' % (GERRIT_HOST, project, sha1) + status, content = req_cached(url) + self.response.status_int = status +--- a/src/3rdparty/chromium/third_party/perfetto/infra/ci/worker/run_job.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/infra/ci/worker/run_job.py 2025-01-16 02:26:08.600012729 +0800 +@@ -73,7 +73,7 @@ + def main(argv): + init_logging() + if len(argv) != 2: +- print('Usage: %s job_id' % argv[0]) ++ print(('Usage: %s job_id' % argv[0])) + return 1 + + job_id = argv[1] +@@ -110,7 +110,7 @@ + ] + + # Propagate environment variables coming from the job config. +- for kv in [kv for kv in os.environ.items() if kv[0].startswith('PERFETTO_')]: ++ for kv in [kv for kv in list(os.environ.items()) if kv[0].startswith('PERFETTO_')]: + cmd += ['--env', '%s=%s' % kv] + + # Rationale for the conditional branches below: when running in the real GCE +--- a/src/3rdparty/chromium/third_party/perfetto/infra/ci/worker/worker.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/infra/ci/worker/worker.py 2025-01-16 02:26:08.600012729 +0800 +@@ -81,7 +81,7 @@ + # acquire the same job). + job = None + job_id = None +- for job_id in sorted(jobs.keys(), reverse=True): ++ for job_id in sorted(list(jobs.keys()), reverse=True): + job = try_acquire_job(job_id) + if job is not None: + break +@@ -106,7 +106,7 @@ + cmd = [os.path.join(CUR_DIR, 'run_job.py'), job_id] + + # Propagate the worker's PERFETTO_ vars and merge with the job-specific vars. +- env = dict(os.environ, **{k: str(v) for (k, v) in job['env'].items()}) ++ env = dict(os.environ, **{k: str(v) for (k, v) in list(job['env'].items())}) + job_runner = subprocess.Popen(cmd, env=env) + + # Run the job in a python subprocess, to isolate the main loop from logs +--- a/src/3rdparty/chromium/third_party/perfetto/infra/git_mirror_bot/mirror_aosp_to_ghub_repo.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/infra/git_mirror_bot/mirror_aosp_to_ghub_repo.py 2025-01-16 02:26:08.600012729 +0800 +@@ -118,7 +118,7 @@ + update_ref_cmd = '' + for ref_to_delete in deleted_heads: + update_ref_cmd += 'delete %s\n' % ref_to_delete +- for ref_to_update, ref_sha1 in future_heads.iteritems(): ++ for ref_to_update, ref_sha1 in future_heads.items(): + if current_heads.get(ref_to_update) != ref_sha1: + update_ref_cmd += 'update %s %s\n' % (ref_to_update, ref_sha1) + +--- a/src/3rdparty/chromium/third_party/perfetto/src/trace_processor/python/example.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/src/trace_processor/python/example.py 2025-01-16 02:26:08.600012729 +0800 +@@ -48,14 +48,14 @@ + # Iterate through QueryResultIterator + res_it = tp.query('select * from slice limit 10') + for row in res_it: +- print(row.name) ++ print((row.name)) + + # Convert QueryResultIterator into a pandas dataframe + iterate. This yields + # the same results as the function above. + try: + res_df = tp.query('select * from slice limit 10').as_pandas_dataframe() + for index, row in res_df.iterrows(): +- print(row['name']) ++ print((row['name'])) + except Exception: + pass + +--- a/src/3rdparty/chromium/third_party/perfetto/src/trace_processor/python/perfetto/trace_processor/http.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/src/trace_processor/python/perfetto/trace_processor/http.py 2025-01-16 02:26:08.600012729 +0800 +@@ -13,7 +13,7 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-import http.client ++from . import http.client + + from .protos import ProtoFactory + +--- a/src/3rdparty/chromium/third_party/perfetto/tools/add_tp_diff_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/tools/add_tp_diff_test.py 2025-01-16 02:26:08.600012729 +0800 +@@ -24,7 +24,7 @@ + def create_if_not_exists(path): + create = not os.path.exists(path) + if create: +- print('Creating empty file {}'.format(os.path.relpath(path, ROOT_DIR))) ++ print(('Creating empty file {}'.format(os.path.relpath(path, ROOT_DIR)))) + with open(path, 'a'): + pass + return create +@@ -40,8 +40,8 @@ + include_index_path = os.path.join(test_dir, 'include_index') + + if not os.path.exists(include_index_path): +- print('Error: include index does not exist at {}'.format( +- os.path.relpath(include_index_path, ROOT_DIR))) ++ print(('Error: include index does not exist at {}'.format( ++ os.path.relpath(include_index_path, ROOT_DIR)))) + return 1 + + existing_folders = [] +@@ -57,18 +57,18 @@ + 'of trace processor. For help in this, please see the guidance at ' + 'http://perfetto.dev/docs/analysis/trace-processor#diff-tests') + print() +- print('Existing folders: {}.'.format(existing_folders)) ++ print(('Existing folders: {}.'.format(existing_folders))) + stdout_write('Folder: ') + + chosen_folder = sys.stdin.readline().rstrip() + chosen_folder_path = os.path.abspath(os.path.join(test_dir, chosen_folder)) + chosen_folder_path_rel_root = os.path.relpath(chosen_folder_path, ROOT_DIR) + if chosen_folder not in existing_folders: +- print('Creating new folder {} and adding include to include_index file' +- .format(chosen_folder)) ++ print(('Creating new folder {} and adding include to include_index file' ++ .format(chosen_folder))) + os.mkdir(chosen_folder_path) + +- out_include_index = list(map(lambda x: x + '/index', existing_folders)) ++ out_include_index = list([x + '/index' for x in existing_folders]) + out_include_index.append(chosen_folder + '/index') + out_include_index.sort() + +@@ -91,14 +91,14 @@ + pb_file = sys.stdin.readline().rstrip() + pb_path = os.path.abspath(os.path.join(ROOT_DIR, 'test', 'data', pb_file)) + if not os.path.exists(pb_path): +- print('Error: provided pb file {} does not exist', +- os.path.relpath(pb_path, ROOT_DIR)) ++ print(('Error: provided pb file {} does not exist', ++ os.path.relpath(pb_path, ROOT_DIR))) + return 1 + + trace_file = os.path.relpath(pb_path, chosen_folder_path) + elif trace_type == 'textproto': +- print('Provide the path to the textproto trace relative to the ' +- 'chosen folder {}'.format(chosen_folder_path_rel_root)) ++ print(('Provide the path to the textproto trace relative to the ' ++ 'chosen folder {}'.format(chosen_folder_path_rel_root))) + stdout_write( + 'If the file does not already exist, an empty file will be created: ') + +@@ -109,9 +109,9 @@ + + trace_file = textproto_file + elif trace_type == 'python': +- print( ++ print(( + 'Provide the path to the Python trace ' +- 'relative to the chosen folder {}'.format(chosen_folder_path_rel_root)) ++ 'relative to the chosen folder {}'.format(chosen_folder_path_rel_root))) + stdout_write( + 'If the file does not already exist, an empty file will be created: ') + +@@ -123,12 +123,12 @@ + + trace_file = python_file + else: +- print('Error: unexpected trace type {}'.format(trace_type)) ++ print(('Error: unexpected trace type {}'.format(trace_type))) + return 1 + + print() +- print('Provide the path to the SQL file relative to the chosen folder {}' +- .format(chosen_folder_path_rel_root)) ++ print(('Provide the path to the SQL file relative to the chosen folder {}' ++ .format(chosen_folder_path_rel_root))) + stdout_write( + 'If the file does not already exist, an empty file will be created: ') + +@@ -141,8 +141,8 @@ + pathlib.Path(sql_file).stem) + + print() +- print('Provide the name of the output file (or leave empty ' +- 'to accept the default: {})'.format(default_out_file)) ++ print(('Provide the name of the output file (or leave empty ' ++ 'to accept the default: {})'.format(default_out_file))) + stdout_write( + 'If the file does not already exist, an empty file will be created: ') + out_file = sys.stdin.readline().rstrip() +--- a/src/3rdparty/chromium/third_party/perfetto/tools/analyze_profiling_sampling_distribution.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/tools/analyze_profiling_sampling_distribution.py 2025-01-16 02:26:08.600012729 +0800 +@@ -46,11 +46,11 @@ + + # Map from key to list of bytes allocated, one for each iteration. + flat_distributions = { +- key: value.values() for key, value in distributions.iteritems() ++ key: list(value.values()) for key, value in distributions.items() + } + +- for key, value in flat_distributions.iteritems(): +- print key, "ground truth %d " % ground_truth[key], sp.stats.describe(value) ++ for key, value in flat_distributions.items(): ++ print(key, "ground truth %d " % ground_truth[key], sp.stats.describe(value)) + sns.distplot(value) + plt.show() + +--- a/src/3rdparty/chromium/third_party/perfetto/tools/build_all_configs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/tools/build_all_configs.py 2025-01-16 02:26:08.600012729 +0800 +@@ -13,9 +13,9 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import argparse + import os +--- a/src/3rdparty/chromium/third_party/perfetto/tools/compat.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/tools/compat.py 2025-01-16 02:26:08.600012729 +0800 +@@ -22,7 +22,7 @@ + try: + from urllib.request import urlretrieve + except ImportError: +- from urllib import urlretrieve ++ from urllib.request import urlretrieve + + try: + xrange = xrange +@@ -30,19 +30,19 @@ + xrange = range + + try: +- basestring = basestring ++ str = str + except NameError: +- basestring = str ++ str = str + + def itervalues(o): + try: +- return o.itervalues() ++ return iter(o.values()) + except AttributeError: +- return o.values() ++ return list(o.values()) + + + def iteritems(o): + try: +- return o.iteritems() ++ return iter(o.items()) + except AttributeError: +- return o.items() ++ return list(o.items()) +--- a/src/3rdparty/chromium/third_party/perfetto/tools/diff_test_trace_processor.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/tools/diff_test_trace_processor.py 2025-01-16 02:26:08.600012729 +0800 +@@ -13,9 +13,9 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import argparse + import datetime +--- a/src/3rdparty/chromium/third_party/perfetto/tools/find_scan_roots.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/tools/find_scan_roots.py 2025-01-16 02:26:08.600012729 +0800 +@@ -43,7 +43,7 @@ + return n + + def __iter__(self): +- for child in self.children.itervalues(): ++ for child in self.children.values(): + yield self.name + '/' + child.name, child + for p, ch in child: + yield self.name + '/' + p, ch +@@ -56,7 +56,7 @@ + + self.marked = True + +- for child in self.children.itervalues(): ++ for child in self.children.values(): + child.Mark(labels) + + return True +@@ -89,7 +89,7 @@ + root = BuildTree() + for fullpath, elem in root: + if elem.Mark(args.labels): +- print fullpath ++ print(fullpath) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/perfetto/tools/gen_cc_proto_descriptor.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/tools/gen_cc_proto_descriptor.py 2025-01-16 02:26:08.600012729 +0800 +@@ -13,9 +13,9 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + import os + import sys + import argparse +--- a/src/3rdparty/chromium/third_party/perfetto/tools/gen_merged_sql_metrics.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/tools/gen_merged_sql_metrics.py 2025-01-16 02:26:08.600012729 +0800 +@@ -92,7 +92,7 @@ + output.write(NAMESPACE_BEGIN) + + # Create the C++ variable for each SQL file. +- for path, sql in sql_outputs.items(): ++ for path, sql in list(sql_outputs.items()): + name = os.path.basename(path) + variable = filename_to_variable(os.path.splitext(name)[0]) + output.write( +@@ -103,7 +103,7 @@ + + # Create mapping of filename to variable name for each variable. + output.write("\nconst FileToSql kFileToSql[] = {") +- for path in sql_outputs.keys(): ++ for path in list(sql_outputs.keys()): + name = os.path.basename(path) + variable = filename_to_variable(os.path.splitext(name)[0]) + +--- a/src/3rdparty/chromium/third_party/perfetto/tools/gn_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/tools/gn_utils.py 2025-01-16 02:26:08.600012729 +0800 +@@ -15,7 +15,7 @@ + # A collection of utilities for extracting build rule information from GN + # projects. + +-from __future__ import print_function ++ + import errno + import filecmp + import json +--- a/src/3rdparty/chromium/third_party/perfetto/tools/proto_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/tools/proto_utils.py 2025-01-16 02:26:08.600012729 +0800 +@@ -12,7 +12,7 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from __future__ import absolute_import ++ + + import os + import subprocess +@@ -40,7 +40,7 @@ + package=f_desc_pb2.package, + serialized_pb=f_desc_pb2_encode) + +- for desc in f_desc.message_types_by_name.values(): ++ for desc in list(f_desc.message_types_by_name.values()): + desc_by_path[desc.full_name] = desc + + return message_factory.MessageFactory().GetPrototype(desc_by_path[proto_type]) +--- a/src/3rdparty/chromium/third_party/perfetto/tools/pull_ftrace_format_files.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/tools/pull_ftrace_format_files.py 2025-01-16 02:26:08.600012729 +0800 +@@ -13,7 +13,7 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from __future__ import print_function ++ + import argparse + import datetime + import os +--- a/src/3rdparty/chromium/third_party/perfetto/tools/serialize_test_trace.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/tools/serialize_test_trace.py 2025-01-16 02:26:08.600012729 +0800 +@@ -13,9 +13,9 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import argparse + import os +--- a/src/3rdparty/chromium/third_party/perfetto/tools/strip_android_host_binary.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/tools/strip_android_host_binary.py 2025-01-16 02:26:08.600012729 +0800 +@@ -13,7 +13,7 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/third_party/perfetto/tools/test_gen_amalgamated.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/perfetto/tools/test_gen_amalgamated.py 2025-01-16 02:26:08.600012729 +0800 +@@ -13,7 +13,7 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from __future__ import print_function ++ + + import os + import shutil +@@ -75,9 +75,9 @@ + gn_args = (' target_os="%s"' % os_name) + GN_ARGS + os_deps[os_name] = call(GEN_AMALGAMATED, '--gn_args', gn_args, '--out', + OUT_DIR, '--dump-deps', '--quiet').split('\n') +- for os_name, deps in os_deps.items(): ++ for os_name, deps in list(os_deps.items()): + for dep in deps: +- for other_os, other_deps in os_deps.items(): ++ for other_os, other_deps in list(os_deps.items()): + if not dep in other_deps: + raise AssertionError('Discrepancy in amalgamated build dependencies: ' + '%s is missing on %s.' % (dep, other_os)) +--- a/src/3rdparty/chromium/third_party/pexpect/FSM.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pexpect/FSM.py 2025-01-16 02:26:08.600012729 +0800 +@@ -92,7 +92,7 @@ + self.value = value + + def __str__(self): +- return `self.value` ++ return repr(self.value) + + class FSM: + +@@ -215,9 +215,9 @@ + 4. No transition was defined. If we get here then raise an exception. + """ + +- if self.state_transitions.has_key((input_symbol, state)): ++ if (input_symbol, state) in self.state_transitions: + return self.state_transitions[(input_symbol, state)] +- elif self.state_transitions_any.has_key (state): ++ elif state in self.state_transitions_any: + return self.state_transitions_any[state] + elif self.default_transition is not None: + return self.default_transition +@@ -296,11 +296,11 @@ + fsm.memory.append (al / ar) + + def DoEqual (fsm): +- print str(fsm.memory.pop()) ++ print(str(fsm.memory.pop())) + + def Error (fsm): +- print 'That does not compute.' +- print str(fsm.input_symbol) ++ print('That does not compute.') ++ print(str(fsm.input_symbol)) + + def main(): + +@@ -317,13 +317,13 @@ + f.add_transition_list (string.whitespace, 'BUILDING_NUMBER', EndBuildNumber, 'INIT') + f.add_transition_list ('+-*/', 'INIT', DoOperator, 'INIT') + +- print +- print 'Enter an RPN Expression.' +- print 'Numbers may be integers. Operators are * / + -' +- print 'Use the = sign to evaluate and print the expression.' +- print 'For example: ' +- print ' 167 3 2 2 * * * 1 - =' +- inputstr = raw_input ('> ') ++ print() ++ print('Enter an RPN Expression.') ++ print('Numbers may be integers. Operators are * / + -') ++ print('Use the = sign to evaluate and print the expression.') ++ print('For example: ') ++ print(' 167 3 2 2 * * * 1 - =') ++ inputstr = input ('> ') + f.process_list(inputstr) + + if __name__ == '__main__': +@@ -332,18 +332,18 @@ + parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(), usage=globals()['__doc__'], version='$Id: FSM.py 533 2012-10-20 02:19:33Z noah $') + parser.add_option ('-v', '--verbose', action='store_true', default=False, help='verbose output') + (options, args) = parser.parse_args() +- if options.verbose: print time.asctime() ++ if options.verbose: print(time.asctime()) + main() +- if options.verbose: print time.asctime() +- if options.verbose: print 'TOTAL TIME IN MINUTES:', +- if options.verbose: print (time.time() - start_time) / 60.0 ++ if options.verbose: print(time.asctime()) ++ if options.verbose: print('TOTAL TIME IN MINUTES:', end=' ') ++ if options.verbose: print((time.time() - start_time) / 60.0) + sys.exit(0) +- except KeyboardInterrupt, e: # Ctrl-C ++ except KeyboardInterrupt as e: # Ctrl-C + raise e +- except SystemExit, e: # sys.exit() ++ except SystemExit as e: # sys.exit() + raise e +- except Exception, e: +- print 'ERROR, UNEXPECTED EXCEPTION' +- print str(e) ++ except Exception as e: ++ print('ERROR, UNEXPECTED EXCEPTION') ++ print(str(e)) + traceback.print_exc() + os._exit(1) +--- a/src/3rdparty/chromium/third_party/pexpect/fdpexpect.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pexpect/fdpexpect.py 2025-01-16 02:26:08.600012729 +0800 +@@ -49,7 +49,7 @@ + try: # make sure fd is a valid file descriptor + os.fstat(fd) + except OSError: +- raise ExceptionPexpect, 'The fd argument is not a valid file descriptor.' ++ raise ExceptionPexpect('The fd argument is not a valid file descriptor.') + + self.args = None + self.command = None +--- a/src/3rdparty/chromium/third_party/pexpect/pexpect.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pexpect/pexpect.py 2025-01-16 02:26:08.600012729 +0800 +@@ -242,18 +242,18 @@ + while True: + try: + index = child.expect(patterns) +- if type(child.after) in types.StringTypes: ++ if type(child.after) in (str,): + child_result_list.append(child.before + child.after) + else: + # child.after may have been a TIMEOUT or EOF, + # which we don't want appended to the list. + child_result_list.append(child.before) +- if type(responses[index]) in types.StringTypes: ++ if type(responses[index]) in (str,): + child.send(responses[index]) + elif isinstance(responses[index], types.FunctionType): + callback_result = responses[index](locals()) + sys.stdout.flush() +- if type(callback_result) in types.StringTypes: ++ if type(callback_result) in (str,): + child.send(callback_result) + elif callback_result: + break +@@ -1289,7 +1289,7 @@ + compile_flags = compile_flags | re.IGNORECASE + compiled_pattern_list = [] + for p in patterns: +- if type(p) in types.StringTypes: ++ if type(p) in (str,): + compiled_pattern_list.append(re.compile(p, compile_flags)) + elif p is EOF: + compiled_pattern_list.append(EOF) +@@ -1414,7 +1414,7 @@ + This method is also useful when you don't want to have to worry about + escaping regular expression characters that you want to match.""" + +- if (type(pattern_list) in types.StringTypes or ++ if (type(pattern_list) in (str,) or + pattern_list in (TIMEOUT, EOF)): + pattern_list = [pattern_list] + return self.expect_loop(searcher_string(pattern_list), +@@ -1722,7 +1722,7 @@ + ss.append((self.timeout_index, + ' %d: TIMEOUT' % self.timeout_index)) + ss.sort() +- ss = zip(*ss)[1] ++ ss = list(zip(*ss))[1] + return '\n'.join(ss) + + def search(self, buffer, freshlen, searchwindowsize=None): +@@ -1824,7 +1824,7 @@ + ss.append((self.timeout_index, ' %d: TIMEOUT' % + self.timeout_index)) + ss.sort() +- ss = zip(*ss)[1] ++ ss = list(zip(*ss))[1] + return '\n'.join(ss) + + def search(self, buffer, freshlen, searchwindowsize=None): +--- a/src/3rdparty/chromium/third_party/pexpect/pxssh.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pexpect/pxssh.py 2025-01-16 02:26:08.600012729 +0800 +@@ -131,7 +131,7 @@ + if n > m: + a,b = b,a + n,m = m,n +- current = range(n+1) ++ current = list(range(n+1)) + for i in range(1,m+1): + previous, current = current, [i]+[0]*n + for j in range(1,n+1): +--- a/src/3rdparty/chromium/third_party/pffft/generate_seed_corpus.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pffft/generate_seed_corpus.py 2025-01-16 02:26:08.600012729 +0800 +@@ -3,8 +3,8 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import division +-from __future__ import print_function ++ ++ + + from array import array + import os +--- a/src/3rdparty/chromium/third_party/ply/lex.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/ply/lex.py 2025-01-16 02:26:08.600012729 +0800 +@@ -44,7 +44,7 @@ + # This tuple contains known string types + try: + # Python 2.6 +- StringTypes = (types.StringType, types.UnicodeType) ++ StringTypes = (bytes, str) + except AttributeError: + # Python 3.0 + StringTypes = (str, bytes) +@@ -150,7 +150,7 @@ + + if object: + newtab = {} +- for key, ritem in self.lexstatere.items(): ++ for key, ritem in list(self.lexstatere.items()): + newre = [] + for cre, findex in ritem: + newfindex = [] +@@ -163,7 +163,7 @@ + newtab[key] = newre + c.lexstatere = newtab + c.lexstateerrorf = {} +- for key, ef in self.lexstateerrorf.items(): ++ for key, ef in list(self.lexstateerrorf.items()): + c.lexstateerrorf[key] = getattr(object, ef.__name__) + c.lexmodule = object + return c +@@ -186,7 +186,7 @@ + + # Rewrite the lexstatere table, replacing function objects with function names + tabre = {} +- for statename, lre in self.lexstatere.items(): ++ for statename, lre in list(self.lexstatere.items()): + titem = [] + for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]): + titem.append((retext, _funcs_to_names(func, renames))) +@@ -196,12 +196,12 @@ + tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore)) + + taberr = {} +- for statename, ef in self.lexstateerrorf.items(): ++ for statename, ef in list(self.lexstateerrorf.items()): + taberr[statename] = ef.__name__ if ef else None + tf.write('_lexstateerrorf = %s\n' % repr(taberr)) + + tabeof = {} +- for statename, ef in self.lexstateeoff.items(): ++ for statename, ef in list(self.lexstateeoff.items()): + tabeof[statename] = ef.__name__ if ef else None + tf.write('_lexstateeoff = %s\n' % repr(tabeof)) + +@@ -226,7 +226,7 @@ + self.lexstateignore = lextab._lexstateignore + self.lexstatere = {} + self.lexstateretext = {} +- for statename, lre in lextab._lexstatere.items(): ++ for statename, lre in list(lextab._lexstatere.items()): + titem = [] + txtitem = [] + for pat, func_name in lre: +@@ -236,11 +236,11 @@ + self.lexstateretext[statename] = txtitem + + self.lexstateerrorf = {} +- for statename, ef in lextab._lexstateerrorf.items(): ++ for statename, ef in list(lextab._lexstateerrorf.items()): + self.lexstateerrorf[statename] = fdict[ef] + + self.lexstateeoff = {} +- for statename, ef in lextab._lexstateeoff.items(): ++ for statename, ef in list(lextab._lexstateeoff.items()): + self.lexstateeoff[statename] = fdict[ef] + + self.begin('INITIAL') +@@ -415,7 +415,7 @@ + def __iter__(self): + return self + +- def next(self): ++ def __next__(self): + t = self.token() + if t is None: + raise StopIteration +@@ -501,7 +501,7 @@ + lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1) + lexindexnames = lexindexfunc[:] + +- for f, i in lexre.groupindex.items(): ++ for f, i in list(lexre.groupindex.items()): + handle = ldict.get(f, None) + if type(handle) in (types.FunctionType, types.MethodType): + lexindexfunc[i] = (handle, toknames[f]) +@@ -717,11 +717,11 @@ + self.error = True + + # Sort the functions by line number +- for f in self.funcsym.values(): ++ for f in list(self.funcsym.values()): + f.sort(key=lambda x: x[1].__code__.co_firstlineno) + + # Sort the strings by regular expression length +- for s in self.strsym.values(): ++ for s in list(self.strsym.values()): + s.sort(key=lambda x: len(x[1]), reverse=True) + + # Validate all of the t_rules collected +@@ -975,7 +975,7 @@ + debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text) + + # For inclusive states, we need to add the regular expressions from the INITIAL state +- for state, stype in stateinfo.items(): ++ for state, stype in list(stateinfo.items()): + if state != 'INITIAL' and stype == 'inclusive': + lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) + lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) +@@ -1001,7 +1001,7 @@ + lexobj.lexeoff = linfo.eoff.get('INITIAL', None) + + # Check state information for ignore and error rules +- for s, stype in stateinfo.items(): ++ for s, stype in list(stateinfo.items()): + if stype == 'exclusive': + if s not in linfo.errorf: + errorlog.warning("No error rule is defined for exclusive state '%s'", s) +--- a/src/3rdparty/chromium/third_party/ply/yacc.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/ply/yacc.py 2025-01-16 02:26:08.600012729 +0800 +@@ -93,7 +93,7 @@ + + # String type-checking compatibility + if sys.version_info[0] < 3: +- string_types = basestring ++ string_types = str + else: + string_types = str + +@@ -314,7 +314,7 @@ + # See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions + def set_defaulted_states(self): + self.defaulted_states = {} +- for state, actions in self.action.items(): ++ for state, actions in list(self.action.items()): + rules = list(actions.values()) + if len(rules) == 1 and rules[0] < 0: + self.defaulted_states[state] = rules[0] +@@ -1349,8 +1349,8 @@ + def __len__(self): + return len(self.prod) + +- def __nonzero__(self): +- return 1 ++ def __bool__(self): ++ return True + + def __getitem__(self, index): + return self.prod[index] +@@ -1689,7 +1689,7 @@ + # Then propagate termination until no change: + while True: + some_change = False +- for (n, pl) in self.Prodnames.items(): ++ for (n, pl) in list(self.Prodnames.items()): + # Nonterminal n terminates iff any of its productions terminates. + for p in pl: + # Production p terminates iff all of its rhs symbols terminate. +@@ -1717,7 +1717,7 @@ + break + + infinite = [] +- for (s, term) in terminates.items(): ++ for (s, term) in list(terminates.items()): + if not term: + if s not in self.Prodnames and s not in self.Terminals and s != 'error': + # s is used-but-not-defined, and we've already warned of that, +@@ -1754,7 +1754,7 @@ + # ----------------------------------------------------------------------------- + def unused_terminals(self): + unused_tok = [] +- for s, v in self.Terminals.items(): ++ for s, v in list(self.Terminals.items()): + if s != 'error' and not v: + unused_tok.append(s) + +@@ -1769,7 +1769,7 @@ + + def unused_rules(self): + unused_prod = [] +- for s, v in self.Nonterminals.items(): ++ for s, v in list(self.Nonterminals.items()): + if not v: + p = self.Prodnames[s][0] + unused_prod.append(p) +@@ -1999,7 +1999,7 @@ + + def read_pickle(self, filename): + try: +- import cPickle as pickle ++ import pickle as pickle + except ImportError: + import pickle + +@@ -2490,7 +2490,7 @@ + # ----------------------------------------------------------------------------- + + def add_lookaheads(self, lookbacks, followset): +- for trans, lb in lookbacks.items(): ++ for trans, lb in list(lookbacks.items()): + # Loop over productions in lookback + for state, p in lb: + if state not in p.lookaheads: +@@ -2751,8 +2751,8 @@ + if smaller: + items = {} + +- for s, nd in self.lr_action.items(): +- for name, v in nd.items(): ++ for s, nd in list(self.lr_action.items()): ++ for name, v in list(nd.items()): + i = items.get(name) + if not i: + i = ([], []) +@@ -2761,7 +2761,7 @@ + i[1].append(v) + + f.write('\n_lr_action_items = {') +- for k, v in items.items(): ++ for k, v in list(items.items()): + f.write('%r:([' % k) + for i in v[0]: + f.write('%r,' % i) +@@ -2783,7 +2783,7 @@ + + else: + f.write('\n_lr_action = { ') +- for k, v in self.lr_action.items(): ++ for k, v in list(self.lr_action.items()): + f.write('(%r,%r):%r,' % (k[0], k[1], v)) + f.write('}\n') + +@@ -2791,8 +2791,8 @@ + # Factor out names to try and make smaller + items = {} + +- for s, nd in self.lr_goto.items(): +- for name, v in nd.items(): ++ for s, nd in list(self.lr_goto.items()): ++ for name, v in list(nd.items()): + i = items.get(name) + if not i: + i = ([], []) +@@ -2801,7 +2801,7 @@ + i[1].append(v) + + f.write('\n_lr_goto_items = {') +- for k, v in items.items(): ++ for k, v in list(items.items()): + f.write('%r:([' % k) + for i in v[0]: + f.write('%r,' % i) +@@ -2822,7 +2822,7 @@ + ''') + else: + f.write('\n_lr_goto = { ') +- for k, v in self.lr_goto.items(): ++ for k, v in list(self.lr_goto.items()): + f.write('(%r,%r):%r,' % (k[0], k[1], v)) + f.write('}\n') + +@@ -2849,7 +2849,7 @@ + + def pickle_table(self, filename, signature=''): + try: +- import cPickle as pickle ++ import pickle as pickle + except ImportError: + import pickle + with open(filename, 'wb') as outf: +@@ -3128,7 +3128,7 @@ + # Get all p_functions from the grammar + def get_pfunctions(self): + p_functions = [] +- for name, item in self.pdict.items(): ++ for name, item in list(self.pdict.items()): + if not name.startswith('p_') or name == 'p_error': + continue + if isinstance(item, (types.FunctionType, types.MethodType)): +@@ -3187,7 +3187,7 @@ + # Secondary validation step that looks for p_ definitions that are not functions + # or functions that look like they might be grammar rules. + +- for n, v in self.pdict.items(): ++ for n, v in list(self.pdict.items()): + if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)): + continue + if n.startswith('t_'): +--- a/src/3rdparty/chromium/third_party/polymer/v1_0/create_components_summary.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/polymer/v1_0/create_components_summary.py 2025-01-16 02:26:08.600012729 +0800 +@@ -26,13 +26,13 @@ + repository_web = re.sub('^git:', 'https:', re.sub('\.git$', '', repository)) + # Specify tree to browse to. + tree_link = repository_web + '/tree/' + tree +- print COMPONENT_SUMMARY % { ++ print(COMPONENT_SUMMARY % { + 'name': info['name'], + 'repository': repository, + 'tree': tree, + 'revision': resolution['commit'], + 'tree_link': tree_link +- } ++ }) + + + def GetTreeishName(resolution): +--- a/src/3rdparty/chromium/third_party/polymer/v1_0/css_strip_prefixes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/polymer/v1_0/css_strip_prefixes.py 2025-01-16 02:26:08.600012729 +0800 +@@ -66,7 +66,7 @@ + indices_to_remove.append(i) + + if len(indices_to_remove): +- print 'stripping CSS from: ' + filename ++ print('stripping CSS from: ' + filename) + + # Process line numbers in descinding order, such that the array can be + # modified in-place. +--- a/src/3rdparty/chromium/third_party/polymer/v1_0/find_unused_elements.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/polymer/v1_0/find_unused_elements.py 2025-01-16 02:26:08.600012729 +0800 +@@ -117,9 +117,9 @@ + unused_elements.append(element) + + if unused_elements: +- print 'Found unused elements: %s\nRemove from bower.json and re-run ' \ ++ print('Found unused elements: %s\nRemove from bower.json and re-run ' \ + 'reproduce.sh, or add to whitelist in %s' % ( +- ', '.join(unused_elements), os.path.basename(__file__)) ++ ', '.join(unused_elements), os.path.basename(__file__))) + + def __IsImported(self, element_dir, dirs): + """Returns whether the element directory is used in HTML or JavaScript. +--- a/src/3rdparty/chromium/third_party/polymer/v1_0/rgbify_hex_vars_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/polymer/v1_0/rgbify_hex_vars_test.py 2025-01-16 02:26:08.601096044 +0800 +@@ -10,7 +10,7 @@ + class RgbifyHexVarsTest(unittest.TestCase): + def checkProduces(self, content, expected, **kwargs): + actual = rgbify_hex_vars.Rgbify(content, **kwargs) +- self.assertEquals(actual, expected) ++ self.assertEqual(actual, expected) + + def checkSame(self, content): + self.checkProduces(content, content) +--- a/src/3rdparty/chromium/third_party/protobuf/generate_changelog.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/generate_changelog.py 2025-01-16 02:26:08.601096044 +0800 +@@ -55,11 +55,11 @@ + previous = sys.argv[1] + + for language in languages: +- print(language.name) ++ print((language.name)) + sys.stdout.flush() + os.system(("git log --pretty=oneline --abbrev-commit %s...HEAD %s | " + + "sed -e 's/^/ - /'") % (previous, " ".join(language.pathspec))) + print("") + +-print("To view a commit on GitHub: " + +- "https://github.com/protocolbuffers/protobuf/commit/") ++print(("To view a commit on GitHub: " + ++ "https://github.com/protocolbuffers/protobuf/commit/")) +--- a/src/3rdparty/chromium/third_party/protobuf/update_version.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/update_version.py 2025-01-16 02:26:08.601096044 +0800 +@@ -13,25 +13,25 @@ + from xml.dom import minidom + + if len(sys.argv) < 2 or len(sys.argv) > 3: +- print """ ++ print(""" + [ERROR] Please specify a version. + + ./update_version.py .. [] + + Example: + ./update_version.py 3.7.1 2 +-""" ++""") + exit(1) + + NEW_VERSION = sys.argv[1] + NEW_VERSION_INFO = NEW_VERSION.split('.') + if len(NEW_VERSION_INFO) != 3: +- print """ ++ print(""" + [ERROR] Version must be in the format .. + + Example: + ./update_version.py 3.7.3 +-""" ++""") + exit(1) + + RC_VERSION = 0 +@@ -83,7 +83,7 @@ + for line in lines: + updated_lines.append(line_rewriter(line)) + if lines == updated_lines: +- print '%s was not updated. Please double check.' % filename ++ print('%s was not updated. Please double check.' % filename) + f = open(filename, 'w') + f.write(''.join(updated_lines)) + f.close() +@@ -228,11 +228,11 @@ + protobuf_version_offset = 11 + expected_major_version = '3' + if NEW_VERSION_INFO[0] != expected_major_version: +- print """[ERROR] Major protobuf version has changed. Please update ++ print("""[ERROR] Major protobuf version has changed. Please update + update_version.py to readjust the protobuf_version_offset and + expected_major_version such that the PROTOBUF_VERSION in src/Makefile.am is + always increasing. +- """ ++ """) + exit(1) + + protobuf_version_info = '%s:%s:0' % ( +@@ -278,8 +278,8 @@ + changelog = Find(root, 'changelog') + for old_version in changelog.getElementsByTagName('version'): + if Find(old_version, 'release').firstChild.nodeValue == NEW_VERSION: +- print ('[WARNING] Version %s already exists in the change log.' +- % NEW_VERSION) ++ print(('[WARNING] Version %s already exists in the change log.' ++ % NEW_VERSION)) + return + changelog.appendChild(document.createTextNode(' ')) + release = CreateNode('release', 2, [ +--- a/src/3rdparty/chromium/third_party/protobuf/benchmarks/python/py_benchmark.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/benchmarks/python/py_benchmark.py 2025-01-16 02:26:08.601096044 +0800 +@@ -1,4 +1,4 @@ +-from __future__ import print_function ++ + import sys + import os + import timeit +--- a/src/3rdparty/chromium/third_party/protobuf/benchmarks/util/big_query_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/benchmarks/util/big_query_utils.py 2025-01-16 02:26:08.601096044 +0800 +@@ -1,6 +1,6 @@ + #!/usr/bin/env python2.7 + +-from __future__ import print_function ++ + import argparse + import json + import uuid +--- a/src/3rdparty/chromium/third_party/protobuf/benchmarks/util/result_uploader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/benchmarks/util/result_uploader.py 2025-01-16 02:26:08.601096044 +0800 +@@ -1,5 +1,5 @@ +-from __future__ import print_function +-from __future__ import absolute_import ++ ++ + import argparse + import os + import re +--- a/src/3rdparty/chromium/third_party/protobuf/examples/add_person.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/examples/add_person.py 2025-01-16 02:26:08.601096044 +0800 +@@ -13,22 +13,22 @@ + + # This function fills in a Person message based on user input. + def PromptForAddress(person): +- person.id = int(raw_input("Enter person ID number: ")) +- person.name = raw_input("Enter name: ") ++ person.id = int(input("Enter person ID number: ")) ++ person.name = input("Enter name: ") + +- email = raw_input("Enter email address (blank for none): ") ++ email = input("Enter email address (blank for none): ") + if email != "": + person.email = email + + while True: +- number = raw_input("Enter a phone number (or leave blank to finish): ") ++ number = input("Enter a phone number (or leave blank to finish): ") + if number == "": + break + + phone_number = person.phones.add() + phone_number.number = number + +- type = raw_input("Is this a mobile, home, or work phone? ") ++ type = input("Is this a mobile, home, or work phone? ") + if type == "mobile": + phone_number.type = addressbook_pb2.Person.MOBILE + elif type == "home": +@@ -43,7 +43,7 @@ + # adds one person based on user input, then writes it back out to the same + # file. + if len(sys.argv) != 2: +- print("Usage:", sys.argv[0], "ADDRESS_BOOK_FILE") ++ print(("Usage:", sys.argv[0], "ADDRESS_BOOK_FILE")) + sys.exit(-1) + + address_book = addressbook_pb2.AddressBook() +@@ -53,7 +53,7 @@ + with open(sys.argv[1], "rb") as f: + address_book.ParseFromString(f.read()) + except IOError: +- print(sys.argv[1] + ": File not found. Creating a new file.") ++ print((sys.argv[1] + ": File not found. Creating a new file.")) + + # Add an address. + PromptForAddress(address_book.people.add()) +--- a/src/3rdparty/chromium/third_party/protobuf/examples/list_people.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/examples/list_people.py 2025-01-16 02:26:08.601096044 +0800 +@@ -2,7 +2,7 @@ + + # See README.txt for information and build instructions. + +-from __future__ import print_function ++ + import addressbook_pb2 + import sys + +--- a/src/3rdparty/chromium/third_party/protobuf/kokoro/linux/make_test_output.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/kokoro/linux/make_test_output.py 2025-01-16 02:26:08.601096044 +0800 +@@ -64,7 +64,7 @@ + tests["cpp"]["time"] = f.read().strip() + tests["cpp"]["failure"] = False + +- ret = tests.values() ++ ret = list(tests.values()) + ret.sort(key=lambda x: x["name"]) + + return ret +@@ -91,4 +91,4 @@ + + sys.stderr.write("make_test_output.py: writing XML from directory: " + + sys.argv[1] + "\n") +-print(genxml(readtests(sys.argv[1]))) ++print((genxml(readtests(sys.argv[1])))) +--- a/src/3rdparty/chromium/third_party/protobuf/objectivec/DevTools/pddm.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/objectivec/DevTools/pddm.py 2025-01-16 02:26:08.601096044 +0800 +@@ -318,7 +318,7 @@ + # Nothing to do + return macro.body + assert len(arg_values) == len(macro.args) +- args = dict(zip(macro.args, arg_values)) ++ args = dict(list(zip(macro.args, arg_values))) + + def _lookupArg(match): + val = args[match.group('name')] +@@ -351,7 +351,7 @@ + return macro_arg_ref_re.sub(_lookupArg, macro.body) + + def _EvalMacrosRefs(self, text, macro_stack): +- macro_ref_re = _MacroRefRe(self._macros.keys()) ++ macro_ref_re = _MacroRefRe(list(self._macros.keys())) + + def _resolveMacro(match): + return self._Expand(match, macro_stack) +@@ -673,15 +673,15 @@ + + if src_file.processed_content != src_file.original_content: + if not opts.dry_run: +- print('Updating for "%s".' % a_path) ++ print(('Updating for "%s".' % a_path)) + with open(a_path, 'w') as f: + f.write(src_file.processed_content) + else: + # Special result to indicate things need updating. +- print('Update needed for "%s".' % a_path) ++ print(('Update needed for "%s".' % a_path)) + result = 1 + elif opts.verbose: +- print('No update for "%s".' % a_path) ++ print(('No update for "%s".' % a_path)) + + return result + +--- a/src/3rdparty/chromium/third_party/protobuf/objectivec/DevTools/pddm_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/objectivec/DevTools/pddm_tests.py 2025-01-16 02:26:08.601096044 +0800 +@@ -41,24 +41,24 @@ + class TestParsingMacros(unittest.TestCase): + + def testParseEmpty(self): +- f = io.StringIO(u'') ++ f = io.StringIO('') + result = pddm.MacroCollection(f) + self.assertEqual(len(result._macros), 0) + + def testParseOne(self): +- f = io.StringIO(u"""PDDM-DEFINE foo( ) ++ f = io.StringIO("""PDDM-DEFINE foo( ) + body""") + result = pddm.MacroCollection(f) + self.assertEqual(len(result._macros), 1) + macro = result._macros.get('foo') + self.assertIsNotNone(macro) +- self.assertEquals(macro.name, 'foo') +- self.assertEquals(macro.args, tuple()) +- self.assertEquals(macro.body, 'body') ++ self.assertEqual(macro.name, 'foo') ++ self.assertEqual(macro.args, tuple()) ++ self.assertEqual(macro.body, 'body') + + def testParseGeneral(self): + # Tests multiple defines, spaces in all places, etc. +- f = io.StringIO(u""" ++ f = io.StringIO(""" + PDDM-DEFINE noArgs( ) + body1 + body2 +@@ -74,21 +74,21 @@ + self.assertEqual(len(result._macros), 3) + macro = result._macros.get('noArgs') + self.assertIsNotNone(macro) +- self.assertEquals(macro.name, 'noArgs') +- self.assertEquals(macro.args, tuple()) +- self.assertEquals(macro.body, 'body1\nbody2\n') ++ self.assertEqual(macro.name, 'noArgs') ++ self.assertEqual(macro.args, tuple()) ++ self.assertEqual(macro.body, 'body1\nbody2\n') + macro = result._macros.get('oneArg') + self.assertIsNotNone(macro) +- self.assertEquals(macro.name, 'oneArg') +- self.assertEquals(macro.args, ('foo',)) +- self.assertEquals(macro.body, 'body3') ++ self.assertEqual(macro.name, 'oneArg') ++ self.assertEqual(macro.args, ('foo',)) ++ self.assertEqual(macro.body, 'body3') + macro = result._macros.get('twoArgs') + self.assertIsNotNone(macro) +- self.assertEquals(macro.name, 'twoArgs') +- self.assertEquals(macro.args, ('bar_', 'baz')) +- self.assertEquals(macro.body, 'body4\nbody5') ++ self.assertEqual(macro.name, 'twoArgs') ++ self.assertEqual(macro.args, ('bar_', 'baz')) ++ self.assertEqual(macro.body, 'body4\nbody5') + # Add into existing collection +- f = io.StringIO(u""" ++ f = io.StringIO(""" + PDDM-DEFINE another(a,b,c) + body1 + body2""") +@@ -96,23 +96,23 @@ + self.assertEqual(len(result._macros), 4) + macro = result._macros.get('another') + self.assertIsNotNone(macro) +- self.assertEquals(macro.name, 'another') +- self.assertEquals(macro.args, ('a', 'b', 'c')) +- self.assertEquals(macro.body, 'body1\nbody2') ++ self.assertEqual(macro.name, 'another') ++ self.assertEqual(macro.args, ('a', 'b', 'c')) ++ self.assertEqual(macro.body, 'body1\nbody2') + + def testParseDirectiveIssues(self): + test_list = [ + # Unknown directive +- (u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINED foo\nbaz', ++ ('PDDM-DEFINE foo()\nbody\nPDDM-DEFINED foo\nbaz', + 'Hit a line with an unknown directive: '), + # End without begin +- (u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINE-END\nPDDM-DEFINE-END\n', ++ ('PDDM-DEFINE foo()\nbody\nPDDM-DEFINE-END\nPDDM-DEFINE-END\n', + 'Got DEFINE-END directive without an active macro: '), + # Line not in macro block +- (u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINE-END\nmumble\n', ++ ('PDDM-DEFINE foo()\nbody\nPDDM-DEFINE-END\nmumble\n', + 'Hit a line that wasn\'t a directive and no open macro definition: '), + # Redefine macro +- (u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINE foo(a)\nmumble\n', ++ ('PDDM-DEFINE foo()\nbody\nPDDM-DEFINE foo(a)\nmumble\n', + 'Attempt to redefine macro: '), + ] + for idx, (input_str, expected_prefix) in enumerate(test_list, 1): +@@ -127,47 +127,47 @@ + def testParseBeginIssues(self): + test_list = [ + # 1. No name +- (u'PDDM-DEFINE\nmumble', ++ ('PDDM-DEFINE\nmumble', + 'Failed to parse macro definition: '), + # 2. No name (with spaces) +- (u'PDDM-DEFINE \nmumble', ++ ('PDDM-DEFINE \nmumble', + 'Failed to parse macro definition: '), + # 3. No open paren +- (u'PDDM-DEFINE foo\nmumble', ++ ('PDDM-DEFINE foo\nmumble', + 'Failed to parse macro definition: '), + # 4. No close paren +- (u'PDDM-DEFINE foo(\nmumble', ++ ('PDDM-DEFINE foo(\nmumble', + 'Failed to parse macro definition: '), + # 5. No close paren (with args) +- (u'PDDM-DEFINE foo(a, b\nmumble', ++ ('PDDM-DEFINE foo(a, b\nmumble', + 'Failed to parse macro definition: '), + # 6. No name before args +- (u'PDDM-DEFINE (a, b)\nmumble', ++ ('PDDM-DEFINE (a, b)\nmumble', + 'Failed to parse macro definition: '), + # 7. No name before args +- (u'PDDM-DEFINE foo bar(a, b)\nmumble', ++ ('PDDM-DEFINE foo bar(a, b)\nmumble', + 'Failed to parse macro definition: '), + # 8. Empty arg name +- (u'PDDM-DEFINE foo(a, ,b)\nmumble', ++ ('PDDM-DEFINE foo(a, ,b)\nmumble', + 'Empty arg name in macro definition: '), +- (u'PDDM-DEFINE foo(a,,b)\nmumble', ++ ('PDDM-DEFINE foo(a,,b)\nmumble', + 'Empty arg name in macro definition: '), + # 10. Duplicate name +- (u'PDDM-DEFINE foo(a,b,a,c)\nmumble', ++ ('PDDM-DEFINE foo(a,b,a,c)\nmumble', + 'Arg name "a" used more than once in macro definition: '), + # 11. Invalid arg name +- (u'PDDM-DEFINE foo(a b,c)\nmumble', ++ ('PDDM-DEFINE foo(a b,c)\nmumble', + 'Invalid arg name "a b" in macro definition: '), +- (u'PDDM-DEFINE foo(a.b,c)\nmumble', ++ ('PDDM-DEFINE foo(a.b,c)\nmumble', + 'Invalid arg name "a.b" in macro definition: '), +- (u'PDDM-DEFINE foo(a-b,c)\nmumble', ++ ('PDDM-DEFINE foo(a-b,c)\nmumble', + 'Invalid arg name "a-b" in macro definition: '), +- (u'PDDM-DEFINE foo(a,b,c.)\nmumble', ++ ('PDDM-DEFINE foo(a,b,c.)\nmumble', + 'Invalid arg name "c." in macro definition: '), + # 15. Extra stuff after the name +- (u'PDDM-DEFINE foo(a,c) foo\nmumble', ++ ('PDDM-DEFINE foo(a,c) foo\nmumble', + 'Failed to parse macro definition: '), +- (u'PDDM-DEFINE foo(a,c) foo)\nmumble', ++ ('PDDM-DEFINE foo(a,c) foo)\nmumble', + 'Failed to parse macro definition: '), + ] + for idx, (input_str, expected_prefix) in enumerate(test_list, 1): +@@ -183,7 +183,7 @@ + class TestExpandingMacros(unittest.TestCase): + + def testExpandBasics(self): +- f = io.StringIO(u""" ++ f = io.StringIO(""" + PDDM-DEFINE noArgs( ) + body1 + body2 +@@ -203,21 +203,21 @@ + """) + mc = pddm.MacroCollection(f) + test_list = [ +- (u'noArgs()', ++ ('noArgs()', + 'body1\nbody2\n'), +- (u'oneArg(wee)', ++ ('oneArg(wee)', + 'body3 wee\n'), +- (u'twoArgs(having some, fun)', ++ ('twoArgs(having some, fun)', + 'body4 having some fun\nbody5'), + # One arg, pass empty. +- (u'oneArg()', ++ ('oneArg()', + 'body3 \n'), + # Two args, gets empty in each slot. +- (u'twoArgs(, empty)', ++ ('twoArgs(, empty)', + 'body4 empty\nbody5'), +- (u'twoArgs(empty, )', ++ ('twoArgs(empty, )', + 'body4 empty \nbody5'), +- (u'twoArgs(, )', ++ ('twoArgs(, )', + 'body4 \nbody5'), + ] + for idx, (input_str, expected) in enumerate(test_list, 1): +@@ -227,7 +227,7 @@ + (idx, result, expected)) + + def testExpandArgOptions(self): +- f = io.StringIO(u""" ++ f = io.StringIO(""" + PDDM-DEFINE bar(a) + a-a$S-a$l-a$L-a$u-a$U + PDDM-DEFINE-END +@@ -240,7 +240,7 @@ + self.assertEqual(mc.Expand('bar()'), '-----') + + def testExpandSimpleMacroErrors(self): +- f = io.StringIO(u""" ++ f = io.StringIO(""" + PDDM-DEFINE foo(a, b) + + PDDM-DEFINE baz(a) +@@ -249,19 +249,19 @@ + mc = pddm.MacroCollection(f) + test_list = [ + # 1. Unknown macro +- (u'bar()', ++ ('bar()', + 'No macro named "bar".'), +- (u'bar(a)', ++ ('bar(a)', + 'No macro named "bar".'), + # 3. Arg mismatch +- (u'foo()', ++ ('foo()', + 'Expected 2 args, got: "foo()".'), +- (u'foo(a b)', ++ ('foo(a b)', + 'Expected 2 args, got: "foo(a b)".'), +- (u'foo(a,b,c)', ++ ('foo(a,b,c)', + 'Expected 2 args, got: "foo(a,b,c)".'), + # 6. Unknown option in expansion +- (u'baz(mumble)', ++ ('baz(mumble)', + 'Unknown arg option "a$z" while expanding "baz(mumble)".'), + ] + for idx, (input_str, expected_err) in enumerate(test_list, 1): +@@ -273,7 +273,7 @@ + 'Entry %d failed: %r' % (idx, e)) + + def testExpandReferences(self): +- f = io.StringIO(u""" ++ f = io.StringIO(""" + PDDM-DEFINE StartIt() + foo(abc, def) + foo(ghi, jkl) +@@ -301,7 +301,7 @@ + self.assertEqual(mc.Expand('StartIt()'), expected) + + def testCatchRecursion(self): +- f = io.StringIO(u""" ++ f = io.StringIO(""" + PDDM-DEFINE foo(a, b) + bar(1, a) + bar(2, b) +@@ -322,29 +322,29 @@ + def testBasicParse(self): + test_list = [ + # 1. no directives +- (u'a\nb\nc', ++ ('a\nb\nc', + (3,) ), + # 2. One define +- (u'a\n//%PDDM-DEFINE foo()\n//%body\nc', ++ ('a\n//%PDDM-DEFINE foo()\n//%body\nc', + (1, 2, 1) ), + # 3. Two defines +- (u'a\n//%PDDM-DEFINE foo()\n//%body\n//%PDDM-DEFINE bar()\n//%body2\nc', ++ ('a\n//%PDDM-DEFINE foo()\n//%body\n//%PDDM-DEFINE bar()\n//%body2\nc', + (1, 4, 1) ), + # 4. Two defines with ends +- (u'a\n//%PDDM-DEFINE foo()\n//%body\n//%PDDM-DEFINE-END\n' +- u'//%PDDM-DEFINE bar()\n//%body2\n//%PDDM-DEFINE-END\nc', ++ ('a\n//%PDDM-DEFINE foo()\n//%body\n//%PDDM-DEFINE-END\n' ++ '//%PDDM-DEFINE bar()\n//%body2\n//%PDDM-DEFINE-END\nc', + (1, 6, 1) ), + # 5. One expand, one define (that runs to end of file) +- (u'a\n//%PDDM-EXPAND foo()\nbody\n//%PDDM-EXPAND-END\n' +- u'//%PDDM-DEFINE bar()\n//%body2\n', ++ ('a\n//%PDDM-EXPAND foo()\nbody\n//%PDDM-EXPAND-END\n' ++ '//%PDDM-DEFINE bar()\n//%body2\n', + (1, 1, 2) ), + # 6. One define ended with an expand. +- (u'a\nb\n//%PDDM-DEFINE bar()\n//%body2\n' +- u'//%PDDM-EXPAND bar()\nbody2\n//%PDDM-EXPAND-END\n', ++ ('a\nb\n//%PDDM-DEFINE bar()\n//%body2\n' ++ '//%PDDM-EXPAND bar()\nbody2\n//%PDDM-EXPAND-END\n', + (2, 2, 1) ), + # 7. Two expands (one end), one define. +- (u'a\n//%PDDM-EXPAND foo(1)\nbody\n//%PDDM-EXPAND foo(2)\nbody2\n//%PDDM-EXPAND-END\n' +- u'//%PDDM-DEFINE foo()\n//%body2\n', ++ ('a\n//%PDDM-EXPAND foo(1)\nbody\n//%PDDM-EXPAND foo(2)\nbody2\n//%PDDM-EXPAND-END\n' ++ '//%PDDM-DEFINE foo()\n//%body2\n', + (1, 2, 2) ), + ] + for idx, (input_str, line_counts) in enumerate(test_list, 1): +@@ -362,24 +362,24 @@ + def testErrors(self): + test_list = [ + # 1. Directive within expansion +- (u'//%PDDM-EXPAND a()\n//%PDDM-BOGUS', ++ ('//%PDDM-EXPAND a()\n//%PDDM-BOGUS', + 'Ran into directive ("//%PDDM-BOGUS", line 2) while in "//%PDDM-EXPAND a()".'), +- (u'//%PDDM-EXPAND a()\n//%PDDM-DEFINE a()\n//%body\n', ++ ('//%PDDM-EXPAND a()\n//%PDDM-DEFINE a()\n//%body\n', + 'Ran into directive ("//%PDDM-DEFINE", line 2) while in "//%PDDM-EXPAND a()".'), + # 3. Expansion ran off end of file +- (u'//%PDDM-EXPAND a()\na\nb\n', ++ ('//%PDDM-EXPAND a()\na\nb\n', + 'Hit the end of the file while in "//%PDDM-EXPAND a()".'), + # 4. Directive within define +- (u'//%PDDM-DEFINE a()\n//%body\n//%PDDM-BOGUS', ++ ('//%PDDM-DEFINE a()\n//%body\n//%PDDM-BOGUS', + 'Ran into directive ("//%PDDM-BOGUS", line 3) while in "//%PDDM-DEFINE a()".'), +- (u'//%PDDM-DEFINE a()\n//%body\n//%PDDM-EXPAND-END a()', ++ ('//%PDDM-DEFINE a()\n//%body\n//%PDDM-EXPAND-END a()', + 'Ran into directive ("//%PDDM-EXPAND-END", line 3) while in "//%PDDM-DEFINE a()".'), + # 6. Directives that shouldn't start sections +- (u'a\n//%PDDM-DEFINE-END a()\n//a\n', ++ ('a\n//%PDDM-DEFINE-END a()\n//a\n', + 'Unexpected line 2: "//%PDDM-DEFINE-END a()".'), +- (u'a\n//%PDDM-EXPAND-END a()\n//a\n', ++ ('a\n//%PDDM-EXPAND-END a()\n//a\n', + 'Unexpected line 2: "//%PDDM-EXPAND-END a()".'), +- (u'//%PDDM-BOGUS\n//a\n', ++ ('//%PDDM-BOGUS\n//a\n', + 'Unexpected line 1: "//%PDDM-BOGUS".'), + ] + for idx, (input_str, expected_err) in enumerate(test_list, 1): +@@ -394,7 +394,7 @@ + class TestProcessingSource(unittest.TestCase): + + def testBasics(self): +- input_str = u""" ++ input_str = """ + //%PDDM-IMPORT-DEFINES ImportFile + foo + //%PDDM-EXPAND mumble(abc) +@@ -407,12 +407,12 @@ + //%PDDM-DEFINE mumble(a_) + //%a_: getName(a_) + """ +- input_str2 = u""" ++ input_str2 = """ + //%PDDM-DEFINE getName(x_) + //%do##x_$u##(int x_); + + """ +- expected = u""" ++ expected = """ + //%PDDM-IMPORT-DEFINES ImportFile + foo + //%PDDM-EXPAND mumble(abc) +@@ -434,7 +434,7 @@ + //%PDDM-DEFINE mumble(a_) + //%a_: getName(a_) + """ +- expected_stripped = u""" ++ expected_stripped = """ + //%PDDM-IMPORT-DEFINES ImportFile + foo + //%PDDM-EXPAND mumble(abc) +@@ -471,7 +471,7 @@ + self.assertEqual(sf2.processed_content, expected_stripped) + + def testProcessFileWithMacroParseError(self): +- input_str = u""" ++ input_str = """ + foo + //%PDDM-DEFINE mumble(a_) + //%body +@@ -491,7 +491,7 @@ + ' Line 3: //%PDDM-DEFINE mumble(a_)') + + def testProcessFileWithExpandError(self): +- input_str = u""" ++ input_str = """ + foo + //%PDDM-DEFINE mumble(a_) + //%body +--- a/src/3rdparty/chromium/third_party/protobuf/python/mox.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/mox.py 2025-01-16 02:26:08.601096044 +0800 +@@ -152,8 +152,8 @@ + + # A list of types that should be stubbed out with MockObjects (as + # opposed to MockAnythings). +- _USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType, +- types.ObjectType, types.TypeType] ++ _USE_MOCK_OBJECT = [type, types.InstanceType, types.ModuleType, ++ object, type] + + def __init__(self): + """Initialize a new Mox.""" +@@ -306,7 +306,7 @@ + return MockMethod(method_name, self._expected_calls_queue, + self._replay_mode) + +- def __nonzero__(self): ++ def __bool__(self): + """Return 1 for nonzero so the mock can be used as a conditional.""" + + return 1 +@@ -1350,7 +1350,7 @@ + for attr_name in dir(base): + d[attr_name] = getattr(base, attr_name) + +- for func_name, func in d.items(): ++ for func_name, func in list(d.items()): + if func_name.startswith('test') and callable(func): + setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func)) + +@@ -1386,7 +1386,7 @@ + return new_method + + +-class MoxTestBase(unittest.TestCase): ++class MoxTestBase(unittest.TestCase, metaclass=MoxMetaTestBase): + """Convenience test class to make stubbing easier. + + Sets up a "mox" attribute which is an instance of Mox - any mox tests will +@@ -1395,7 +1395,5 @@ + code. + """ + +- __metaclass__ = MoxMetaTestBase +- + def setUp(self): + self.mox = Mox() +--- a/src/3rdparty/chromium/third_party/protobuf/python/setup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/setup.py 2025-01-16 02:26:08.601096044 +0800 +@@ -59,7 +59,7 @@ + if (not os.path.exists(output) or + (os.path.exists(source) and + os.path.getmtime(source) > os.path.getmtime(output))): +- print("Generating %s..." % output) ++ print(("Generating %s..." % output)) + + if not os.path.exists(source): + sys.stderr.write("Can't find required file: %s\n" % source) +--- a/src/3rdparty/chromium/third_party/protobuf/python/stubout.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/stubout.py 2025-01-16 02:26:08.601096044 +0800 +@@ -63,7 +63,7 @@ + Raises AttributeError if the attribute cannot be found. + """ + if (inspect.ismodule(obj) or +- (not inspect.isclass(obj) and obj.__dict__.has_key(attr_name))): ++ (not inspect.isclass(obj) and attr_name in obj.__dict__)): + orig_obj = obj + orig_attr = getattr(obj, attr_name) + +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/descriptor_pool.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/descriptor_pool.py 2025-01-16 02:26:08.602179359 +0800 +@@ -321,7 +321,7 @@ + # TODO(jieluo): This is a temporary solution for FieldDescriptor.file. + # FieldDescriptor.file is added in code gen. Remove this solution after + # maybe 2020 for compatibility reason (with 3.4.1 only). +- for extension in file_desc.extensions_by_name.values(): ++ for extension in list(file_desc.extensions_by_name.values()): + self._file_desc_by_toplevel_extension[ + extension.full_name] = file_desc + +@@ -612,12 +612,12 @@ + + try: + file_desc = self._ConvertFileProtoToFileDescriptor(file_proto) +- for extension in file_desc.extensions_by_name.values(): ++ for extension in list(file_desc.extensions_by_name.values()): + self._extensions_by_number[extension.containing_type][ + extension.number] = extension + self._extensions_by_name[extension.containing_type][ + extension.full_name] = extension +- for message_type in file_desc.message_types_by_name.values(): ++ for message_type in list(file_desc.message_types_by_name.values()): + for extension in message_type.extensions: + self._extensions_by_number[extension.containing_type][ + extension.number] = extension +@@ -719,9 +719,9 @@ + # file proto. + for dependency in built_deps: + scope.update(self._ExtractSymbols( +- dependency.message_types_by_name.values())) ++ list(dependency.message_types_by_name.values()))) + scope.update((_PrefixWithDot(enum.full_name), enum) +- for enum in dependency.enum_types_by_name.values()) ++ for enum in list(dependency.enum_types_by_name.values())) + + for message_type in file_proto.message_type: + message_desc = self._ConvertMessageDescriptor( +@@ -972,7 +972,7 @@ + self._SetFieldType(field_proto, field_desc, nested_package, scope) + + for extension_proto, extension_desc in ( +- zip(desc_proto.extension, main_desc.extensions)): ++ list(zip(desc_proto.extension, main_desc.extensions))): + extension_desc.containing_type = self._GetTypeFromScope( + nested_package, extension_proto.extendee, scope) + self._SetFieldType(extension_proto, extension_desc, nested_package, scope) +@@ -1039,7 +1039,7 @@ + field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): + field_desc.default_value = 0.0 + elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: +- field_desc.default_value = u'' ++ field_desc.default_value = '' + elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: + field_desc.default_value = False + elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/message_factory.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/message_factory.py 2025-01-16 02:26:08.602179359 +0800 +@@ -112,7 +112,7 @@ + result = {} + for file_name in files: + file_desc = self.pool.FindFileByName(file_name) +- for desc in file_desc.message_types_by_name.values(): ++ for desc in list(file_desc.message_types_by_name.values()): + result[desc.full_name] = self.GetPrototype(desc) + + # While the extension FieldDescriptors are created by the descriptor pool, +@@ -124,7 +124,7 @@ + # ignore the registration if the original was the same, or raise + # an error if they were different. + +- for extension in file_desc.extensions_by_name.values(): ++ for extension in list(file_desc.extensions_by_name.values()): + if extension.containing_type not in self._classes: + self.GetPrototype(extension.containing_type) + extended_class = self._classes[extension.containing_type] +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/proto_builder.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/proto_builder.py 2025-01-16 02:26:08.602179359 +0800 +@@ -84,7 +84,7 @@ + # Get a list of (name, field_type) tuples from the fields dict. If fields was + # an OrderedDict we keep the order, but otherwise we sort the field to ensure + # consistent ordering. +- field_items = fields.items() ++ field_items = list(fields.items()) + if not isinstance(fields, OrderedDict): + field_items = sorted(field_items) + +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/symbol_database.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/symbol_database.py 2025-01-16 02:26:08.602179359 +0800 +@@ -171,7 +171,7 @@ + result = {} + for file_name in files: + file_desc = self.pool.FindFileByName(file_name) +- for msg_desc in file_desc.message_types_by_name.values(): ++ for msg_desc in list(file_desc.message_types_by_name.values()): + for desc in _GetAllMessages(msg_desc): + try: + result[desc.full_name] = self._classes[desc] +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/text_encoding.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/text_encoding.py 2025-01-16 02:26:08.602179359 +0800 +@@ -43,14 +43,14 @@ + + # Lookup table for unicode + _cescape_unicode_to_str = [chr(i) for i in range(0, 256)] +-for byte, string in _cescape_chr_to_symbol_map.items(): ++for byte, string in list(_cescape_chr_to_symbol_map.items()): + _cescape_unicode_to_str[byte] = string + + # Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32) + _cescape_byte_to_str = ([r'\%03o' % i for i in range(0, 32)] + + [chr(i) for i in range(32, 127)] + + [r'\%03o' % i for i in range(127, 256)]) +-for byte, string in _cescape_chr_to_symbol_map.items(): ++for byte, string in list(_cescape_chr_to_symbol_map.items()): + _cescape_byte_to_str[byte] = string + del byte, string + +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/text_format.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/text_format.py 2025-01-16 02:26:08.602179359 +0800 +@@ -645,7 +645,7 @@ + Raises: + ParseError: On text parsing problems. + """ +- return ParseLines(text.split(b'\n' if isinstance(text, bytes) else u'\n'), ++ return ParseLines(text.split(b'\n' if isinstance(text, bytes) else '\n'), + message, + allow_unknown_extension, + allow_field_number, +@@ -682,7 +682,7 @@ + ParseError: On text parsing problems. + """ + return MergeLines( +- text.split(b'\n' if isinstance(text, bytes) else u'\n'), ++ text.split(b'\n' if isinstance(text, bytes) else '\n'), + message, + allow_unknown_extension, + allow_field_number, +@@ -1676,7 +1676,7 @@ + # alternate implementations where the distinction is more significant + # (e.g. the C++ implementation) simpler. + if is_long: +- return long(text, 0) ++ return int(text, 0) + else: + return int(text, 0) + except ValueError: +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/_parameterized.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/_parameterized.py 2025-01-16 02:26:08.602179359 +0800 +@@ -187,7 +187,7 @@ + def _FormatParameterList(testcase_params): + if isinstance(testcase_params, collections_abc.Mapping): + return ', '.join('%s=%s' % (argname, _CleanRepr(value)) +- for argname, value in testcase_params.items()) ++ for argname, value in list(testcase_params.items())) + elif _NonStringIterable(testcase_params): + return ', '.join(map(_CleanRepr, testcase_params)) + else: +@@ -271,7 +271,7 @@ + class_object._id_suffix = id_suffix = {} + # We change the size of __dict__ while we iterate over it, + # which Python 3.x will complain about, so use copy(). +- for name, obj in class_object.__dict__.copy().items(): ++ for name, obj in list(class_object.__dict__.copy().items()): + if (name.startswith(unittest.TestLoader.testMethodPrefix) + and isinstance(obj, types.FunctionType)): + delattr(class_object, name) +@@ -279,7 +279,7 @@ + _UpdateClassDictForParamTestCase( + methods, id_suffix, name, + _ParameterizedTestIter(obj, testcases, naming_type)) +- for name, meth in methods.items(): ++ for name, meth in list(methods.items()): + setattr(class_object, name, meth) + + +@@ -359,7 +359,7 @@ + + def __new__(mcs, class_name, bases, dct): + dct['_id_suffix'] = id_suffix = {} +- for name, obj in dct.items(): ++ for name, obj in list(dct.items()): + if (name.startswith(unittest.TestLoader.testMethodPrefix) and + _NonStringIterable(obj)): + iterator = iter(obj) +@@ -391,9 +391,8 @@ + id_suffix[new_name] = getattr(func, '__x_extra_id__', '') + + +-class TestCase(unittest.TestCase): ++class TestCase(unittest.TestCase, metaclass=TestGeneratorMetaclass): + """Base class for test cases using the parameters decorator.""" +- __metaclass__ = TestGeneratorMetaclass + + def _OriginalName(self): + return self._testMethodName.split(_SEPARATOR)[0] +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/containers.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/containers.py 2025-01-16 02:26:08.602179359 +0800 +@@ -112,7 +112,7 @@ + def __eq__(self, other): + if not isinstance(other, collections_abc.Mapping): + return NotImplemented +- return dict(self.items()) == dict(other.items()) ++ return dict(list(self.items())) == dict(list(other.items())) + + def __ne__(self, other): + return not (self == other) +@@ -162,12 +162,12 @@ + for key in other: + self[key] = other[key] + elif hasattr(other, "keys"): +- for key in other.keys(): ++ for key in list(other.keys()): + self[key] = other[key] + else: + for key, value in other: + self[key] = value +- for key, value in kwds.items(): ++ for key, value in list(kwds.items()): + self[key] = value + + def setdefault(self, key, default=None): +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/decoder.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/decoder.py 2025-01-16 02:26:08.602179359 +0800 +@@ -163,8 +163,8 @@ + # alternate implementations where the distinction is more significant + # (e.g. the C++ implementation) simpler. + +-_DecodeVarint = _VarintDecoder((1 << 64) - 1, long) +-_DecodeSignedVarint = _SignedVarintDecoder(64, long) ++_DecodeVarint = _VarintDecoder((1 << 64) - 1, int) ++_DecodeSignedVarint = _SignedVarintDecoder(64, int) + + # Use these versions for values which must be limited to 32 bits. + _DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int) +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/descriptor_database_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/descriptor_database_test.py 2025-01-16 02:26:08.602179359 +0800 +@@ -105,7 +105,7 @@ + self.assertEqual(file_desc_proto2, db.FindFileContainingSymbol( + 'protobuf_unittest.TestAllTypes.none_field')) + +- with self.assertRaisesRegexp(KeyError, r'\'protobuf_unittest\.NoneMessage\''): ++ with self.assertRaisesRegex(KeyError, r'\'protobuf_unittest\.NoneMessage\''): + db.FindFileContainingSymbol('protobuf_unittest.NoneMessage') + + def testConflictRegister(self): +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/descriptor_pool_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/descriptor_pool_test.py 2025-01-16 02:26:08.602179359 +0800 +@@ -457,11 +457,11 @@ + + def _CheckDefaultValues(msg): + try: +- int64 = long ++ int64 = int + except NameError: # Python3 + int64 = int + try: +- unicode_type = unicode ++ unicode_type = str + except NameError: # Python3 + unicode_type = str + _CheckValueAndType(msg.optional_int32, 0, int) +@@ -469,7 +469,7 @@ + _CheckValueAndType(msg.optional_float, 0, (float, int)) + _CheckValueAndType(msg.optional_double, 0, (float, int)) + _CheckValueAndType(msg.optional_bool, False, bool) +- _CheckValueAndType(msg.optional_string, u'', unicode_type) ++ _CheckValueAndType(msg.optional_string, '', unicode_type) + _CheckValueAndType(msg.optional_bytes, b'', bytes) + _CheckValueAndType(msg.optional_nested_enum, msg.FOO, int) + # First for the generated message +@@ -649,10 +649,10 @@ + enum_value.number = 0 + self.db.Add(file_proto) + +- self.assertRaisesRegexp(KeyError, 'SubMessage', ++ self.assertRaisesRegex(KeyError, 'SubMessage', + self.pool.FindMessageTypeByName, + 'collector.ErrorMessage') +- self.assertRaisesRegexp(KeyError, 'SubMessage', ++ self.assertRaisesRegex(KeyError, 'SubMessage', + self.pool.FindFileByName, 'error_file') + with self.assertRaises(KeyError) as exc: + self.pool.FindFileByName('none_file') +@@ -715,7 +715,7 @@ + test.assertEqual(self.dependencies, dependencies_names) + public_dependencies_names = [f.name for f in file_desc.public_dependencies] + test.assertEqual(self.public_dependencies, public_dependencies_names) +- for name, msg_type in self.messages.items(): ++ for name, msg_type in list(self.messages.items()): + msg_type.CheckType(test, None, name, file_desc) + + +@@ -762,7 +762,7 @@ + test.assertEqual(containing_type_desc, desc.containing_type) + test.assertEqual(desc.file, file_desc) + test.assertEqual(self.is_extendable, desc.is_extendable) +- for name, subtype in self.type_dict.items(): ++ for name, subtype in list(self.type_dict.items()): + subtype.CheckType(test, desc, name, file_desc) + + for index, (name, field) in enumerate(self.field_list): +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/descriptor_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/descriptor_test.py 2025-01-16 02:26:08.603262673 +0800 +@@ -573,15 +573,15 @@ + self.assertNotEqual(mapping, {}) + self.assertNotEqual(mapping, 1) + self.assertFalse(mapping == 1) # Only for cpp test coverage +- excepted_dict = dict(mapping.items()) ++ excepted_dict = dict(list(mapping.items())) + self.assertEqual(mapping, excepted_dict) + self.assertEqual(mapping, mapping) + self.assertGreater(len(mapping), 0) # Sized + self.assertEqual(len(mapping), len(excepted_dict)) # Iterable + if sys.version_info >= (3,): +- key, item = next(iter(mapping.items())) ++ key, item = next(iter(list(mapping.items()))) + else: +- key, item = mapping.items()[0] ++ key, item = list(mapping.items())[0] + self.assertIn(key, mapping) # Container + self.assertEqual(mapping.get(key), item) + with self.assertRaises(TypeError): +@@ -592,15 +592,15 @@ + else: + self.assertEqual(None, mapping.get([])) + # keys(), iterkeys() &co +- item = (next(iter(mapping.keys())), next(iter(mapping.values()))) +- self.assertEqual(item, next(iter(mapping.items()))) ++ item = (next(iter(list(mapping.keys()))), next(iter(list(mapping.values())))) ++ self.assertEqual(item, next(iter(list(mapping.items())))) + if sys.version_info < (3,): + def CheckItems(seq, iterator): + self.assertEqual(next(iterator), seq[0]) + self.assertEqual(list(iterator), seq[1:]) +- CheckItems(mapping.keys(), mapping.iterkeys()) +- CheckItems(mapping.values(), mapping.itervalues()) +- CheckItems(mapping.items(), mapping.iteritems()) ++ CheckItems(list(mapping.keys()), iter(mapping.keys())) ++ CheckItems(list(mapping.values()), iter(mapping.values())) ++ CheckItems(list(mapping.items()), iter(mapping.items())) + excepted_dict[key] = 'change value' + self.assertNotEqual(mapping, excepted_dict) + del excepted_dict[key] +@@ -610,7 +610,7 @@ + self.assertRaises(KeyError, mapping.__getitem__, len(mapping) + 1) + # TODO(jieluo): Add __repr__ support for DescriptorMapping. + if api_implementation.Type() == 'python': +- self.assertEqual(len(str(dict(mapping.items()))), len(str(mapping))) ++ self.assertEqual(len(str(dict(list(mapping.items())))), len(str(mapping))) + else: + self.assertEqual(str(mapping)[0], '<') + +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/generator_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/generator_test.py 2025-01-16 02:26:08.603262673 +0800 +@@ -300,7 +300,7 @@ + self.assertEqual( + nested_names, + set([field.name for field in desc.oneofs[0].fields])) +- for field_name, field_desc in desc.fields_by_name.items(): ++ for field_name, field_desc in list(desc.fields_by_name.items()): + if field_name in nested_names: + self.assertIs(desc.oneofs[0], field_desc.containing_oneof) + else: +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/json_format_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/json_format_test.py 2025-01-16 02:26:08.603262673 +0800 +@@ -104,7 +104,7 @@ + + def CheckError(self, text, error_message): + message = json_format_proto3_pb2.TestMessage() +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + error_message, + json_format.Parse, text, message) +@@ -242,7 +242,7 @@ + }, + '[protobuf_unittest.' + 'TestMessageSetExtension2.messageSetExtension]': { +- 'str': u'foo', ++ 'str': 'foo', + }, + }, + } +@@ -259,7 +259,7 @@ + ) + expected_dict = { + '[protobuf_unittest.TestExtension.ext]': { +- 'value': u'stuff', ++ 'value': 'stuff', + }, + } + self.assertEqual(expected_dict, message_dict) +@@ -304,7 +304,7 @@ + '"&\\n<\\\"\\r>\\b\\t\\f\\\\\\u0001/\\u2028\\u2029"\n}') + parsed_message = json_format_proto3_pb2.TestMessage() + self.CheckParseBack(message, parsed_message) +- text = u'{"int32Value": "\u0031"}' ++ text = '{"int32Value": "\u0031"}' + json_format.Parse(text, message) + self.assertEqual(message.int32_value, 1) + +@@ -791,7 +791,7 @@ + json_format.Parse('{"messageValue": {}}', parsed_message) + self.assertTrue(parsed_message.HasField('message_value')) + # Null is not allowed to be used as an element in repeated field. +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + 'Failed to parse repeatedInt32Value field: ' + 'null is not allowed to be used as an element in a repeated field.', +@@ -837,7 +837,7 @@ + json_format.Parse(text, message) + # Proto2 does not accept unknown enums. + message = unittest_pb2.TestAllTypes() +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + 'Failed to parse optionalNestedEnum field: Invalid enum value 12345 ' + 'for enum type protobuf_unittest.TestAllTypes.NestedEnum.', +@@ -919,30 +919,30 @@ + def testInvalidMap(self): + message = json_format_proto3_pb2.TestMap() + text = '{"int32Map": {"null": 2, "2": 3}}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + 'Failed to parse int32Map field: invalid literal', + json_format.Parse, text, message) + text = '{"int32Map": {1: 2, "2": 3}}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + (r'Failed to load JSON: Expecting property name' + r'( enclosed in double quotes)?: line 1'), + json_format.Parse, text, message) + text = '{"boolMap": {"null": 1}}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + 'Failed to parse boolMap field: Expected "true" or "false", not null.', + json_format.Parse, text, message) + if sys.version_info < (2, 7): + return + text = r'{"stringMap": {"a": 3, "\u0061": 2}}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + 'Failed to load JSON: duplicate key a', + json_format.Parse, text, message) + text = r'{"stringMap": 0}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + 'Failed to parse stringMap field: Map field string_map must be ' + 'in a dict which is 0.', +@@ -951,31 +951,31 @@ + def testInvalidTimestamp(self): + message = json_format_proto3_pb2.TestTimestamp() + text = '{"value": "10000-01-01T00:00:00.00Z"}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + 'Failed to parse value field: ' + 'time data \'10000-01-01T00:00:00\' does not match' + ' format \'%Y-%m-%dT%H:%M:%S\'.', + json_format.Parse, text, message) + text = '{"value": "1970-01-01T00:00:00.0123456789012Z"}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + 'nanos 0123456789012 more than 9 fractional digits.', + json_format.Parse, text, message) + text = '{"value": "1972-01-01T01:00:00.01+08"}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + (r'Invalid timezone offset value: \+08.'), + json_format.Parse, text, message) + # Time smaller than minimum time. + text = '{"value": "0000-01-01T00:00:00Z"}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + 'Failed to parse value field: year (0 )?is out of range.', + json_format.Parse, text, message) + # Time bigger than maxinum time. + message.value.seconds = 253402300800 +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + OverflowError, + 'date value out of range', + json_format.MessageToJson, message) +@@ -983,7 +983,7 @@ + def testInvalidOneof(self): + message = json_format_proto3_pb2.TestOneof() + text = '{"oneofInt32Value": 1, "oneofStringValue": "2"}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + 'Message type "proto3.TestOneof"' + ' should not have multiple "oneof_value" oneof fields.', +@@ -992,7 +992,7 @@ + def testInvalidListValue(self): + message = json_format_proto3_pb2.TestListValue() + text = '{"value": 1234}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + r'Failed to parse value field: ListValue must be in \[\] which is 1234', + json_format.Parse, text, message) +@@ -1000,7 +1000,7 @@ + def testInvalidStruct(self): + message = json_format_proto3_pb2.TestStruct() + text = '{"value": 1234}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + 'Failed to parse value field: Struct must be in a dict which is 1234', + json_format.Parse, text, message) +@@ -1008,17 +1008,17 @@ + def testInvalidAny(self): + message = any_pb2.Any() + text = '{"@type": "type.googleapis.com/google.protobuf.Int32Value"}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + KeyError, + 'value', + json_format.Parse, text, message) + text = '{"value": 1234}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + json_format.ParseError, + '@type is missing when parsing any message.', + json_format.Parse, text, message) + text = '{"@type": "type.googleapis.com/MessageNotExist", "value": 1234}' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + TypeError, + 'Can not find message descriptor by type_url: ' + 'type.googleapis.com/MessageNotExist.', +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/message_factory_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/message_factory_test.py 2025-01-16 02:26:08.603262673 +0800 +@@ -68,8 +68,8 @@ + msg.factory_1_message.nested_factory_1_message.value = ( + 'nested message value') + msg.factory_1_message.scalar_value = 22 +- msg.factory_1_message.list_value.extend([u'one', u'two', u'three']) +- msg.factory_1_message.list_value.append(u'four') ++ msg.factory_1_message.list_value.extend(['one', 'two', 'three']) ++ msg.factory_1_message.list_value.append('four') + msg.factory_1_enum = 1 + msg.nested_factory_1_enum = 0 + msg.nested_factory_1_message.value = 'nested message value' +@@ -77,8 +77,8 @@ + msg.circular_message.circular_message.mandatory = 2 + msg.circular_message.scalar_value = 'one deep' + msg.scalar_value = 'zero deep' +- msg.list_value.extend([u'four', u'three', u'two']) +- msg.list_value.append(u'one') ++ msg.list_value.extend(['four', 'three', 'two']) ++ msg.list_value.append('one') + msg.grouped.add() + msg.grouped[0].part_1 = 'hello' + msg.grouped[0].part_2 = 'world' +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/message_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/message_test.py 2025-01-16 02:26:08.603262673 +0800 +@@ -191,7 +191,7 @@ + + class BadArg(object): + +- def __nonzero__(self): ++ def __bool__(self): + raise BadArgError() + + def __bool__(self): +@@ -586,7 +586,7 @@ + self.assertEqual(message.repeated_string[0], 'a') + self.assertEqual(message.repeated_string[1], 'b') + self.assertEqual(message.repeated_string[2], 'c') +- self.assertEqual(str(message.repeated_string), str([u'a', u'b', u'c'])) ++ self.assertEqual(str(message.repeated_string), str(['a', 'b', 'c'])) + + message.repeated_bytes.append(b'a') + message.repeated_bytes.append(b'c') +@@ -890,7 +890,7 @@ + self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field')) + self.assertTrue(m.HasField('oneof_uint32')) + +- m.oneof_string = u'foo' ++ m.oneof_string = 'foo' + self.assertEqual('oneof_string', m.WhichOneof('oneof_field')) + self.assertFalse(m.HasField('oneof_uint32')) + self.assertTrue(m.HasField('oneof_string')) +@@ -1038,12 +1038,12 @@ + + # Repeated scalar + m.repeated_int32.append(1) +- sl = m.repeated_int32[long(0):long(len(m.repeated_int32))] ++ sl = m.repeated_int32[int(0):int(len(m.repeated_int32))] + self.assertEqual(len(m.repeated_int32), len(sl)) + + # Repeated composite + m.repeated_nested_message.add().bb = 3 +- sl = m.repeated_nested_message[long(0):long(len(m.repeated_nested_message))] ++ sl = m.repeated_nested_message[int(0):int(len(m.repeated_nested_message))] + self.assertEqual(len(m.repeated_nested_message), len(sl)) + + def testExtendShouldNotSwallowExceptions(self, message_module): +@@ -1055,7 +1055,7 @@ + m.repeated_nested_enum.extend( + a for i in range(10)) # pylint: disable=undefined-variable + +- FALSY_VALUES = [None, False, 0, 0.0, b'', u'', bytearray(), [], {}, set()] ++ FALSY_VALUES = [None, False, 0, 0.0, b'', '', bytearray(), [], {}, set()] + + def testExtendInt32WithNothing(self, message_module): + """Test no-ops extending repeated int32 fields.""" +@@ -1146,7 +1146,7 @@ + def __init__(self, values=None): + self._list = values or [] + +- def __nonzero__(self): ++ def __bool__(self): + size = len(self._list) + if size == 0: + return False +@@ -1230,7 +1230,7 @@ + m = message_module.TestAllTypes() + with self.assertRaises(IndexError) as _: + m.repeated_int32.pop() +- m.repeated_int32.extend(range(5)) ++ m.repeated_int32.extend(list(range(5))) + self.assertEqual(4, m.repeated_int32.pop()) + self.assertEqual(0, m.repeated_int32.pop(0)) + self.assertEqual(2, m.repeated_int32.pop(1)) +@@ -1335,13 +1335,13 @@ + message.optional_bool = True + message.optional_nested_message.bb = 15 + +- self.assertTrue(message.HasField(u"optional_int32")) ++ self.assertTrue(message.HasField("optional_int32")) + self.assertTrue(message.HasField("optional_bool")) + self.assertTrue(message.HasField("optional_nested_message")) + + # Clearing the fields unsets them and resets their value to default. + message.ClearField("optional_int32") +- message.ClearField(u"optional_bool") ++ message.ClearField("optional_bool") + message.ClearField("optional_nested_message") + + self.assertFalse(message.HasField("optional_int32")) +@@ -1535,7 +1535,7 @@ + self.assertEqual(0, len(message.repeated_float)) + self.assertEqual(42, message.default_int64) + +- message = unittest_pb2.TestAllTypes(optional_nested_enum=u'BAZ') ++ message = unittest_pb2.TestAllTypes(optional_nested_enum='BAZ') + self.assertEqual(unittest_pb2.TestAllTypes.BAZ, + message.optional_nested_enum) + +@@ -1557,7 +1557,7 @@ + # Both string/unicode field name keys should work. + kwargs = { + 'optional_int32': 100, +- u'optional_fixed32': 200, ++ 'optional_fixed32': 200, + } + msg = unittest_pb2.TestAllTypes(**kwargs) + self.assertEqual(100, msg.optional_int32) +@@ -1813,7 +1813,7 @@ + def testStringUnicodeConversionInMap(self): + msg = map_unittest_pb2.TestMap() + +- unicode_obj = u'\u1234' ++ unicode_obj = '\u1234' + bytes_obj = unicode_obj.encode('utf8') + + msg.map_string_string[bytes_obj] = bytes_obj +@@ -2000,7 +2000,7 @@ + + def testMergeFromBadType(self): + msg = map_unittest_pb2.TestMap() +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + TypeError, + r'Parameter to MergeFrom\(\) must be instance of same class: expected ' + r'.*TestMap got int\.'): +@@ -2008,7 +2008,7 @@ + + def testCopyFromBadType(self): + msg = map_unittest_pb2.TestMap() +- with self.assertRaisesRegexp( ++ with self.assertRaisesRegex( + TypeError, + r'Parameter to [A-Za-z]*From\(\) must be instance of same class: ' + r'expected .*TestMap got int\.'): +@@ -2016,10 +2016,10 @@ + + def testIntegerMapWithLongs(self): + msg = map_unittest_pb2.TestMap() +- msg.map_int32_int32[long(-123)] = long(-456) +- msg.map_int64_int64[long(-2**33)] = long(-2**34) +- msg.map_uint32_uint32[long(123)] = long(456) +- msg.map_uint64_uint64[long(2**33)] = long(2**34) ++ msg.map_int32_int32[int(-123)] = int(-456) ++ msg.map_int64_int64[int(-2**33)] = int(-2**34) ++ msg.map_uint32_uint32[int(123)] = int(456) ++ msg.map_uint64_uint64[int(2**33)] = int(2**34) + + serialized = msg.SerializeToString() + msg2 = map_unittest_pb2.TestMap() +@@ -2119,7 +2119,7 @@ + def testMapIteration(self): + msg = map_unittest_pb2.TestMap() + +- for k, v in msg.map_int32_int32.items(): ++ for k, v in list(msg.map_int32_int32.items()): + # Should not be reached. + self.assertTrue(False) + +@@ -2129,7 +2129,7 @@ + self.assertEqual(3, len(msg.map_int32_int32)) + + matching_dict = {2: 4, 3: 6, 4: 8} +- self.assertMapIterEquals(msg.map_int32_int32.items(), matching_dict) ++ self.assertMapIterEquals(list(msg.map_int32_int32.items()), matching_dict) + + def testPython2Map(self): + if sys.version_info < (3,): +@@ -2147,9 +2147,9 @@ + self.assertEqual(next(iterator), seq[0]) + self.assertEqual(list(iterator), seq[1:]) + +- CheckItems(map_int32.items(), map_int32.iteritems()) +- CheckItems(map_int32.keys(), map_int32.iterkeys()) +- CheckItems(map_int32.values(), map_int32.itervalues()) ++ CheckItems(list(map_int32.items()), iter(map_int32.items())) ++ CheckItems(list(map_int32.keys()), iter(map_int32.keys())) ++ CheckItems(list(map_int32.values()), iter(map_int32.values())) + + self.assertEqual(6, map_int32.get(3)) + self.assertEqual(None, map_int32.get(999)) +@@ -2190,8 +2190,8 @@ + msg.map_string_string['variables'] = '' + msg.map_string_string['init_op'] = '' + msg.map_string_string['summaries'] = '' +- items1 = msg.map_string_string.items() +- items2 = msg.map_string_string.items() ++ items1 = list(msg.map_string_string.items()) ++ items2 = list(msg.map_string_string.items()) + self.assertEqual(items1, items2) + + def testMapDeterministicSerialization(self): +@@ -2231,7 +2231,7 @@ + msg.map_int32_int32[3] = 6 + msg.map_int32_int32[4] = 8 + +- it = msg.map_int32_int32.items() ++ it = list(msg.map_int32_int32.items()) + del msg + + matching_dict = {2: 4, 3: 6, 4: 8} +@@ -2278,7 +2278,7 @@ + msg.ClearField('map_int32_int32') + self.assertEqual(b'', msg.SerializeToString()) + matching_dict = {2: 4, 3: 6, 4: 8} +- self.assertMapIterEquals(int32_map.items(), matching_dict) ++ self.assertMapIterEquals(list(int32_map.items()), matching_dict) + + def testMessageMapValidAfterFieldCleared(self): + # Map needs to work even if field is cleared. +@@ -2291,7 +2291,7 @@ + + msg.ClearField('map_int32_foreign_message') + self.assertEqual(b'', msg.SerializeToString()) +- self.assertTrue(2 in int32_foreign_message.keys()) ++ self.assertTrue(2 in list(int32_foreign_message.keys())) + + def testMessageMapItemValidAfterTopMessageCleared(self): + # Message map item needs to work even if it is cleared. +@@ -2385,14 +2385,14 @@ + + # Test optional_string=u'😍' is accepted. + serialized = unittest_proto3_arena_pb2.TestAllTypes( +- optional_string=u'😍').SerializeToString() ++ optional_string='😍').SerializeToString() + msg2 = unittest_proto3_arena_pb2.TestAllTypes() + msg2.MergeFromString(serialized) +- self.assertEqual(msg2.optional_string, u'😍') ++ self.assertEqual(msg2.optional_string, '😍') + + msg = unittest_proto3_arena_pb2.TestAllTypes( +- optional_string=u'\ud001') +- self.assertEqual(msg.optional_string, u'\ud001') ++ optional_string='\ud001') ++ self.assertEqual(msg.optional_string, '\ud001') + + @unittest.skipIf(six.PY2, 'Surrogates are acceptable in python2') + def testSurrogatesInPython3(self): +@@ -2410,16 +2410,16 @@ + # Surrogates are rejected at setters in Python3. + with self.assertRaises(ValueError): + unittest_proto3_arena_pb2.TestAllTypes( +- optional_string=u'\ud801\udc01') ++ optional_string='\ud801\udc01') + with self.assertRaises(ValueError): + unittest_proto3_arena_pb2.TestAllTypes( + optional_string=b'\xed\xa0\x81') + with self.assertRaises(ValueError): + unittest_proto3_arena_pb2.TestAllTypes( +- optional_string=u'\ud801') ++ optional_string='\ud801') + with self.assertRaises(ValueError): + unittest_proto3_arena_pb2.TestAllTypes( +- optional_string=u'\ud801\ud801') ++ optional_string='\ud801\ud801') + + @unittest.skipIf(six.PY3 or sys.maxunicode == UCS2_MAXUNICODE, + 'Surrogates are rejected at setters in Python3') +@@ -2427,26 +2427,26 @@ + # Test optional_string=u'\ud801\udc01'. + # surrogate pair is acceptable in python2. + msg = unittest_proto3_arena_pb2.TestAllTypes( +- optional_string=u'\ud801\udc01') ++ optional_string='\ud801\udc01') + # TODO(jieluo): Change pure python to have same behavior with c extension. + # Some build in python2 consider u'\ud801\udc01' and u'\U00010401' are + # equal, some are not equal. + if api_implementation.Type() == 'python': +- self.assertEqual(msg.optional_string, u'\ud801\udc01') ++ self.assertEqual(msg.optional_string, '\ud801\udc01') + else: +- self.assertEqual(msg.optional_string, u'\U00010401') ++ self.assertEqual(msg.optional_string, '\U00010401') + serialized = msg.SerializeToString() + msg2 = unittest_proto3_arena_pb2.TestAllTypes() + msg2.MergeFromString(serialized) +- self.assertEqual(msg2.optional_string, u'\U00010401') ++ self.assertEqual(msg2.optional_string, '\U00010401') + + # Python2 does not reject surrogates at setters. + msg = unittest_proto3_arena_pb2.TestAllTypes( + optional_string=b'\xed\xa0\x81') + unittest_proto3_arena_pb2.TestAllTypes( +- optional_string=u'\ud801') ++ optional_string='\ud801') + unittest_proto3_arena_pb2.TestAllTypes( +- optional_string=u'\ud801\ud801') ++ optional_string='\ud801\ud801') + + + @testing_refleaks.TestCase +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/python_message.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/python_message.py 2025-01-16 02:26:08.603262673 +0800 +@@ -359,7 +359,7 @@ + + def _AddClassAttributesForNestedExtensions(descriptor, dictionary): + extensions = descriptor.extensions_by_name +- for extension_name, extension_field in extensions.items(): ++ for extension_name, extension_field in list(extensions.items()): + assert extension_name not in dictionary + dictionary[extension_name] = extension_field + +@@ -505,7 +505,7 @@ + self._is_present_in_parent = False + self._listener = message_listener_mod.NullMessageListener() + self._listener_for_children = _Listener(self) +- for field_name, field_value in kwargs.items(): ++ for field_name, field_value in list(kwargs.items()): + field = _GetFieldByName(message_descriptor, field_name) + if field is None: + raise TypeError('%s() got an unexpected keyword argument "%s"' % +@@ -768,7 +768,7 @@ + def _AddPropertiesForExtensions(descriptor, cls): + """Adds properties for all fields in this protocol message type.""" + extensions = descriptor.extensions_by_name +- for extension_name, extension_field in extensions.items(): ++ for extension_name, extension_field in list(extensions.items()): + constant_name = extension_name.upper() + '_FIELD_NUMBER' + setattr(cls, constant_name, extension_field.number) + +@@ -812,7 +812,7 @@ + """Helper for _AddMessageMethods().""" + + def ListFields(self): +- all_fields = [item for item in self._fields.items() if _IsPresent(item)] ++ all_fields = [item for item in list(self._fields.items()) if _IsPresent(item)] + all_fields.sort(key = lambda item: item[0].number) + return all_fields + +@@ -1307,7 +1307,7 @@ + + fields = self._fields + +- for field, value in msg._fields.items(): ++ for field, value in list(msg._fields.items()): + if field.label == LABEL_REPEATED: + field_value = fields.get(field) + if field_value is None: +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/reflection_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/reflection_test.py 2025-01-16 02:26:08.603262673 +0800 +@@ -574,7 +574,7 @@ + proto.default_import_enum) + + proto = unittest_pb2.TestExtremeDefaultValues() +- self.assertEqual(u'\u1234', proto.utf8_string) ++ self.assertEqual('\u1234', proto.utf8_string) + + def testHasFieldWithUnknownFieldName(self): + proto = unittest_pb2.TestAllTypes() +@@ -649,7 +649,7 @@ + TestGetAndDeserialize('optional_int32', 1, int) + TestGetAndDeserialize('optional_int32', 1 << 30, int) + TestGetAndDeserialize('optional_uint32', 1 << 30, int) +- integer_64 = long ++ integer_64 = int + if struct.calcsize('L') == 4: + # Python only has signed ints, so 32-bit python can't fit an uint32 + # in an int. +@@ -676,7 +676,7 @@ + pb.optional_uint64 = '2' + + # The exact error should propagate with a poorly written custom integer. +- with self.assertRaisesRegexp(RuntimeError, 'my_error'): ++ with self.assertRaisesRegex(RuntimeError, 'my_error'): + pb.optional_uint64 = test_util.NonStandardInteger(5, 'my_error') + + def assetIntegerBoundsChecking(self, integer_fn): +@@ -1860,7 +1860,7 @@ + + # Assignment of a unicode object to a field of type 'bytes' is not allowed. + self.assertRaises(TypeError, +- setattr, proto, 'optional_bytes', u'unicode object') ++ setattr, proto, 'optional_bytes', 'unicode object') + + # Check that the default value is of python's 'unicode' type. + self.assertEqual(type(proto.optional_string), six.text_type) +@@ -1876,10 +1876,10 @@ + self.assertRaises(ValueError, + setattr, proto, 'optional_string', b'a\x80a') + # No exception: Assign already encoded UTF-8 bytes to a string field. +- utf8_bytes = u'Тест'.encode('utf-8') ++ utf8_bytes = 'Тест'.encode('utf-8') + proto.optional_string = utf8_bytes + # No exception: Assign the a non-ascii unicode object. +- proto.optional_string = u'Тест' ++ proto.optional_string = 'Тест' + # No exception thrown (normal str assignment containing ASCII). + proto.optional_string = 'abc' + +@@ -1888,7 +1888,7 @@ + extension_message = message_set_extensions_pb2.TestMessageSetExtension2 + extension = extension_message.message_set_extension + +- test_utf8 = u'Тест' ++ test_utf8 = 'Тест' + test_utf8_bytes = test_utf8.encode('utf-8') + + # 'Test' in another language, using UTF-8 charset. +@@ -1939,7 +1939,7 @@ + + def testBytesInTextFormat(self): + proto = unittest_pb2.TestAllTypes(optional_bytes=b'\x00\x7f\x80\xff') +- self.assertEqual(u'optional_bytes: "\\000\\177\\200\\377"\n', ++ self.assertEqual('optional_bytes: "\\000\\177\\200\\377"\n', + six.text_type(proto)) + + def testEmptyNestedMessage(self): +@@ -2180,7 +2180,7 @@ + self.assertEqual(expected_varint_size + 1, self.Size()) + Test(0, 1) + Test(1, 1) +- for i, num_bytes in zip(range(7, 63, 7), range(1, 10000)): ++ for i, num_bytes in zip(list(range(7, 63, 7)), list(range(1, 10000))): + Test((1 << i) - 1, num_bytes) + Test(-1, 10) + Test(-2, 10) +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/test_util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/test_util.py 2025-01-16 02:26:08.604345988 +0800 +@@ -44,7 +44,7 @@ + from google.protobuf import unittest_pb2 + + try: +- long # Python 2 ++ int # Python 2 + except NameError: + long = int # Python 3 + +@@ -80,7 +80,7 @@ + message.optional_float = 111 + message.optional_double = 112 + message.optional_bool = True +- message.optional_string = u'115' ++ message.optional_string = '115' + message.optional_bytes = b'116' + + if IsProto2(message): +@@ -95,8 +95,8 @@ + if IsProto2(message): + message.optional_import_enum = unittest_import_pb2.IMPORT_BAZ + +- message.optional_string_piece = u'124' +- message.optional_cord = u'125' ++ message.optional_string_piece = '124' ++ message.optional_cord = '125' + + # + # Repeated fields. +@@ -115,7 +115,7 @@ + message.repeated_float.append(211) + message.repeated_double.append(212) + message.repeated_bool.append(True) +- message.repeated_string.append(u'215') ++ message.repeated_string.append('215') + message.repeated_bytes.append(b'216') + + if IsProto2(message): +@@ -130,8 +130,8 @@ + if IsProto2(message): + message.repeated_import_enum.append(unittest_import_pb2.IMPORT_BAR) + +- message.repeated_string_piece.append(u'224') +- message.repeated_cord.append(u'225') ++ message.repeated_string_piece.append('224') ++ message.repeated_cord.append('225') + + # Add a second one of each field and set value by index. + message.repeated_int32.append(0) +@@ -147,7 +147,7 @@ + message.repeated_float.append(0) + message.repeated_double.append(0) + message.repeated_bool.append(True) +- message.repeated_string.append(u'0') ++ message.repeated_string.append('0') + message.repeated_bytes.append(b'0') + message.repeated_int32[1] = 301 + message.repeated_int64[1] = 302 +@@ -162,7 +162,7 @@ + message.repeated_float[1] = 311 + message.repeated_double[1] = 312 + message.repeated_bool[1] = False +- message.repeated_string[1] = u'315' ++ message.repeated_string[1] = '315' + message.repeated_bytes[1] = b'316' + + if IsProto2(message): +@@ -178,8 +178,8 @@ + if IsProto2(message): + message.repeated_import_enum.append(unittest_import_pb2.IMPORT_BAZ) + +- message.repeated_string_piece.append(u'324') +- message.repeated_cord.append(u'325') ++ message.repeated_string_piece.append('324') ++ message.repeated_cord.append('325') + + # + # Fields that have defaults. +@@ -248,7 +248,7 @@ + extensions[pb2.optional_float_extension] = 111 + extensions[pb2.optional_double_extension] = 112 + extensions[pb2.optional_bool_extension] = True +- extensions[pb2.optional_string_extension] = u'115' ++ extensions[pb2.optional_string_extension] = '115' + extensions[pb2.optional_bytes_extension] = b'116' + + extensions[pb2.optionalgroup_extension].a = 117 +@@ -263,8 +263,8 @@ + extensions[pb2.optional_foreign_enum_extension] = pb2.FOREIGN_BAZ + extensions[pb2.optional_import_enum_extension] = import_pb2.IMPORT_BAZ + +- extensions[pb2.optional_string_piece_extension] = u'124' +- extensions[pb2.optional_cord_extension] = u'125' ++ extensions[pb2.optional_string_piece_extension] = '124' ++ extensions[pb2.optional_cord_extension] = '125' + + # + # Repeated fields. +@@ -283,7 +283,7 @@ + extensions[pb2.repeated_float_extension].append(211) + extensions[pb2.repeated_double_extension].append(212) + extensions[pb2.repeated_bool_extension].append(True) +- extensions[pb2.repeated_string_extension].append(u'215') ++ extensions[pb2.repeated_string_extension].append('215') + extensions[pb2.repeated_bytes_extension].append(b'216') + + extensions[pb2.repeatedgroup_extension].add().a = 217 +@@ -296,8 +296,8 @@ + extensions[pb2.repeated_foreign_enum_extension].append(pb2.FOREIGN_BAR) + extensions[pb2.repeated_import_enum_extension].append(import_pb2.IMPORT_BAR) + +- extensions[pb2.repeated_string_piece_extension].append(u'224') +- extensions[pb2.repeated_cord_extension].append(u'225') ++ extensions[pb2.repeated_string_piece_extension].append('224') ++ extensions[pb2.repeated_cord_extension].append('225') + + # Append a second one of each field. + extensions[pb2.repeated_int32_extension].append(301) +@@ -313,7 +313,7 @@ + extensions[pb2.repeated_float_extension].append(311) + extensions[pb2.repeated_double_extension].append(312) + extensions[pb2.repeated_bool_extension].append(False) +- extensions[pb2.repeated_string_extension].append(u'315') ++ extensions[pb2.repeated_string_extension].append('315') + extensions[pb2.repeated_bytes_extension].append(b'316') + + extensions[pb2.repeatedgroup_extension].add().a = 317 +@@ -326,8 +326,8 @@ + extensions[pb2.repeated_foreign_enum_extension].append(pb2.FOREIGN_BAZ) + extensions[pb2.repeated_import_enum_extension].append(import_pb2.IMPORT_BAZ) + +- extensions[pb2.repeated_string_piece_extension].append(u'324') +- extensions[pb2.repeated_cord_extension].append(u'325') ++ extensions[pb2.repeated_string_piece_extension].append('324') ++ extensions[pb2.repeated_cord_extension].append('325') + + # + # Fields with defaults. +@@ -346,19 +346,19 @@ + extensions[pb2.default_float_extension] = 411 + extensions[pb2.default_double_extension] = 412 + extensions[pb2.default_bool_extension] = False +- extensions[pb2.default_string_extension] = u'415' ++ extensions[pb2.default_string_extension] = '415' + extensions[pb2.default_bytes_extension] = b'416' + + extensions[pb2.default_nested_enum_extension] = pb2.TestAllTypes.FOO + extensions[pb2.default_foreign_enum_extension] = pb2.FOREIGN_FOO + extensions[pb2.default_import_enum_extension] = import_pb2.IMPORT_FOO + +- extensions[pb2.default_string_piece_extension] = u'424' ++ extensions[pb2.default_string_piece_extension] = '424' + extensions[pb2.default_cord_extension] = '425' + + extensions[pb2.oneof_uint32_extension] = 601 + extensions[pb2.oneof_nested_message_extension].bb = 602 +- extensions[pb2.oneof_string_extension] = u'603' ++ extensions[pb2.oneof_string_extension] = '603' + extensions[pb2.oneof_bytes_extension] = b'604' + + +@@ -742,7 +742,7 @@ + def __long__(self): + if self.error_string_on_conversion: + raise RuntimeError(self.error_string_on_conversion) +- return long(self.val) ++ return int(self.val) + + def __abs__(self): + return NonStandardInteger(operator.abs(self.val)) +@@ -847,7 +847,7 @@ + def __bool__(self): + return self.val + +- def __nonzero__(self): ++ def __bool__(self): + return self.val + + def __ceil__(self): +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/testing_refleaks.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/testing_refleaks.py 2025-01-16 02:26:08.604345988 +0800 +@@ -42,7 +42,7 @@ + import sys + + try: +- import copy_reg as copyreg #PY26 ++ import copyreg as copyreg #PY26 + except ImportError: + import copyreg + +@@ -93,7 +93,7 @@ + super(ReferenceLeakCheckerMixin, self).run(result=local_result) + newrefcount = self._getRefcounts() + refcount_deltas.append(newrefcount - oldrefcount) +- print(refcount_deltas, self) ++ print((refcount_deltas, self)) + + try: + self.assertEqual(refcount_deltas, [0] * self.NB_RUNS) +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/text_format_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/text_format_test.py 2025-01-16 02:26:08.605429303 +0800 +@@ -113,7 +113,7 @@ + message.repeated_double.append(1.23e22) + message.repeated_double.append(1.23e-18) + message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"') +- message.repeated_string.append(u'\u00fc\ua71f') ++ message.repeated_string.append('\u00fc\ua71f') + self.CompareToGoldenText( + self.RemoveRedundantZeros(text_format.MessageToString(message)), + 'repeated_int64: -9223372036854775808\n' +@@ -131,7 +131,7 @@ + pass + + message = message_module.TestAllTypes() +- message.repeated_string.append(UnicodeSub(u'\u00fc\ua71f')) ++ message.repeated_string.append(UnicodeSub('\u00fc\ua71f')) + self.CompareToGoldenText( + text_format.MessageToString(message), + 'repeated_string: "\\303\\274\\352\\234\\237"\n') +@@ -208,7 +208,7 @@ + message.repeated_double.append(1.23e22) + message.repeated_double.append(1.23e-18) + message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"') +- message.repeated_string.append(u'\u00fc\ua71f') ++ message.repeated_string.append('\u00fc\ua71f') + self.CompareToGoldenText( + self.RemoveRedundantZeros(text_format.MessageToString( + message, as_one_line=True)), +@@ -229,7 +229,7 @@ + message.repeated_double.append(1.23e22) + message.repeated_double.append(1.23e-18) + message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"') +- message.repeated_string.append(u'\u00fc\ua71f') ++ message.repeated_string.append('\u00fc\ua71f') + + # Test as_utf8 = False. + wire_text = text_format.MessageToString(message, +@@ -252,9 +252,9 @@ + + def testPrintRawUtf8String(self, message_module): + message = message_module.TestAllTypes() +- message.repeated_string.append(u'\u00fc\t\ua71f') ++ message.repeated_string.append('\u00fc\t\ua71f') + text = text_format.MessageToString(message, as_utf8=True) +- golden_unicode = u'repeated_string: "\u00fc\\t\ua71f"\n' ++ golden_unicode = 'repeated_string: "\u00fc\\t\ua71f"\n' + golden_text = golden_unicode if six.PY3 else golden_unicode.encode('utf-8') + # MessageToString always returns a native str. + self.CompareToGoldenText(text, golden_text) +@@ -334,7 +334,7 @@ + self.assertEqual('c: 123\n', str(message)) + + def testMessageToStringUnicode(self, message_module): +- golden_unicode = u'Á short desçription and a 🍌.' ++ golden_unicode = 'Á short desçription and a 🍌.' + golden_bytes = golden_unicode.encode('utf-8') + message = message_module.TestAllTypes() + message.optional_string = golden_unicode +@@ -348,7 +348,7 @@ + self.CompareToGoldenText(text, golden_message) + + def testMessageToStringASCII(self, message_module): +- golden_unicode = u'Á short desçription and a 🍌.' ++ golden_unicode = 'Á short desçription and a 🍌.' + golden_bytes = golden_unicode.encode('utf-8') + message = message_module.TestAllTypes() + message.optional_string = golden_unicode +@@ -461,7 +461,7 @@ + + def testRawUtf8RoundTrip(self, message_module): + message = message_module.TestAllTypes() +- message.repeated_string.append(u'\u00fc\t\ua71f') ++ message.repeated_string.append('\u00fc\t\ua71f') + utf8_text = text_format.MessageToBytes(message, as_utf8=True) + golden_bytes = b'repeated_string: "\xc3\xbc\\t\xea\x9c\x9f"\n' + self.CompareToGoldenText(utf8_text, golden_bytes) +@@ -474,7 +474,7 @@ + + def testEscapedUtf8ASCIIRoundTrip(self, message_module): + message = message_module.TestAllTypes() +- message.repeated_string.append(u'\u00fc\t\ua71f') ++ message.repeated_string.append('\u00fc\t\ua71f') + ascii_text = text_format.MessageToBytes(message) # as_utf8=False default + golden_bytes = b'repeated_string: "\\303\\274\\t\\352\\234\\237"\n' + self.CompareToGoldenText(ascii_text, golden_bytes) +@@ -519,13 +519,13 @@ + test_util.ExpectAllFieldsSet(self, message) + + msg2 = message_module.TestAllTypes() +- text = (u'optional_string: "café"') ++ text = ('optional_string: "café"') + text_format.Merge(text, msg2) +- self.assertEqual(msg2.optional_string, u'café') ++ self.assertEqual(msg2.optional_string, 'café') + msg2.Clear() +- self.assertEqual(msg2.optional_string, u'') ++ self.assertEqual(msg2.optional_string, '') + text_format.Parse(text, msg2) +- self.assertEqual(msg2.optional_string, u'café') ++ self.assertEqual(msg2.optional_string, 'café') + + def testParseDoubleToFloat(self, message_module): + message = message_module.TestAllTypes() +@@ -557,8 +557,8 @@ + self.assertEqual(1.23e-18, message.repeated_double[2]) + self.assertEqual('\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0]) + self.assertEqual('foocorgegrault', message.repeated_string[1]) +- self.assertEqual(u'\u00fc\ua71f', message.repeated_string[2]) +- self.assertEqual(u'\u00fc', message.repeated_string[3]) ++ self.assertEqual('\u00fc\ua71f', message.repeated_string[2]) ++ self.assertEqual('\u00fc', message.repeated_string[3]) + + def testParseTrailingCommas(self, message_module): + message = message_module.TestAllTypes() +@@ -572,8 +572,8 @@ + self.assertEqual(100, message.repeated_int64[0]) + self.assertEqual(200, message.repeated_int64[1]) + self.assertEqual(300, message.repeated_int64[2]) +- self.assertEqual(u'one', message.repeated_string[0]) +- self.assertEqual(u'two', message.repeated_string[1]) ++ self.assertEqual('one', message.repeated_string[0]) ++ self.assertEqual('two', message.repeated_string[1]) + + def testParseRepeatedScalarShortFormat(self, message_module): + message = message_module.TestAllTypes() +@@ -586,8 +586,8 @@ + self.assertEqual(100, message.repeated_int64[0]) + self.assertEqual(200, message.repeated_int64[1]) + self.assertEqual(300, message.repeated_int64[2]) +- self.assertEqual(u'one', message.repeated_string[0]) +- self.assertEqual(u'two', message.repeated_string[1]) ++ self.assertEqual('one', message.repeated_string[0]) ++ self.assertEqual('two', message.repeated_string[1]) + + def testParseRepeatedMessageShortFormat(self, message_module): + message = message_module.TestAllTypes() +@@ -692,7 +692,7 @@ + # itself for string fields. It also demonstrates escaped binary data. + # The ur"" string prefix is unfortunately missing from Python 3 + # so we resort to double escaping our \s so that they come through. +- _UNICODE_SAMPLE = u""" ++ _UNICODE_SAMPLE = """ + optional_bytes: 'Á short desçription' + optional_string: 'Á short desçription' + repeated_bytes: '\\303\\201 short des\\303\\247ription' +@@ -700,10 +700,10 @@ + repeated_string: '\\xd0\\x9f\\xd1\\x80\\xd0\\xb8\\xd0\\xb2\\xd0\\xb5\\xd1\\x82' + """ + _BYTES_SAMPLE = _UNICODE_SAMPLE.encode('utf-8') +- _GOLDEN_UNICODE = u'Á short desçription' ++ _GOLDEN_UNICODE = 'Á short desçription' + _GOLDEN_BYTES = _GOLDEN_UNICODE.encode('utf-8') + _GOLDEN_BYTES_1 = b'\x12\x34\x56\x78\x90\xab\xcd\xef' +- _GOLDEN_STR_0 = u'Привет' ++ _GOLDEN_STR_0 = 'Привет' + + def testParseUnicode(self, message_module): + m = message_module.TestAllTypes() +@@ -754,7 +754,7 @@ + + def testFromUnicodeLines(self, message_module): + m = message_module.TestAllTypes() +- text_format.ParseLines(self._UNICODE_SAMPLE.split(u'\n'), m) ++ text_format.ParseLines(self._UNICODE_SAMPLE.split('\n'), m) + self.assertEqual(m.optional_bytes, self._GOLDEN_BYTES) + self.assertEqual(m.optional_string, self._GOLDEN_UNICODE) + self.assertEqual(m.repeated_bytes[0], self._GOLDEN_BYTES) +@@ -838,7 +838,7 @@ + message = unittest_pb2.TestAllTypes() + message.optional_int32 = 101 + message.optional_double = 102.0 +- message.optional_string = u'hello' ++ message.optional_string = 'hello' + message.optional_bytes = b'103' + message.optionalgroup.a = 104 + message.optional_nested_message.bb = 105 +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/type_checkers.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/type_checkers.py 2025-01-16 02:26:08.605429303 +0800 +@@ -201,7 +201,7 @@ + return proposed_value + + def DefaultValue(self): +- return u"" ++ return "" + + + class Int32ValueChecker(IntValueChecker): +@@ -221,13 +221,13 @@ + class Int64ValueChecker(IntValueChecker): + _MIN = -(1 << 63) + _MAX = (1 << 63) - 1 +- _TYPE = long ++ _TYPE = int + + + class Uint64ValueChecker(IntValueChecker): + _MIN = 0 + _MAX = (1 << 64) - 1 +- _TYPE = long ++ _TYPE = int + + + # The max 4 bytes float is about 3.4028234663852886e+38 +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/well_known_types.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/well_known_types.py 2025-01-16 02:26:08.605429303 +0800 +@@ -774,7 +774,7 @@ + return iter(self.fields) + + def keys(self): # pylint: disable=invalid-name +- return self.fields.keys() ++ return list(self.fields.keys()) + + def values(self): # pylint: disable=invalid-name + return [self[key] for key in self] +@@ -797,7 +797,7 @@ + return self.fields[key].struct_value + + def update(self, dictionary): # pylint: disable=invalid-name +- for key, value in dictionary.items(): ++ for key, value in list(dictionary.items()): + _SetStructValue(self.fields[key], value) + + collections_abc.MutableMapping.register(Struct) +--- a/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/well_known_types_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/python/google/protobuf/internal/well_known_types_test.py 2025-01-16 02:26:08.605429303 +0800 +@@ -307,82 +307,82 @@ + + def testInvalidTimestamp(self): + message = timestamp_pb2.Timestamp() +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + 'Failed to parse timestamp: missing valid timezone offset.', + message.FromJsonString, + '') +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + 'Failed to parse timestamp: invalid trailing data ' + '1970-01-01T00:00:01Ztrail.', + message.FromJsonString, + '1970-01-01T00:00:01Ztrail') +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + 'time data \'10000-01-01T00:00:00\' does not match' + ' format \'%Y-%m-%dT%H:%M:%S\'', + message.FromJsonString, '10000-01-01T00:00:00.00Z') +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + 'nanos 0123456789012 more than 9 fractional digits.', + message.FromJsonString, + '1970-01-01T00:00:00.0123456789012Z') +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + (r'Invalid timezone offset value: \+08.'), + message.FromJsonString, + '1972-01-01T01:00:00.01+08',) +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + 'year (0 )?is out of range', + message.FromJsonString, + '0000-01-01T00:00:00Z') + message.seconds = 253402300800 +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + OverflowError, + 'date value out of range', + message.ToJsonString) + + def testInvalidDuration(self): + message = duration_pb2.Duration() +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + 'Duration must end with letter "s": 1.', + message.FromJsonString, '1') +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + 'Couldn\'t parse duration: 1...2s.', + message.FromJsonString, '1...2s') + text = '-315576000001.000000000s' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + r'Duration is not valid\: Seconds -315576000001 must be in range' + r' \[-315576000000\, 315576000000\].', + message.FromJsonString, text) + text = '315576000001.000000000s' +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + r'Duration is not valid\: Seconds 315576000001 must be in range' + r' \[-315576000000\, 315576000000\].', + message.FromJsonString, text) + message.seconds = -315576000001 + message.nanos = 0 +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + r'Duration is not valid\: Seconds -315576000001 must be in range' + r' \[-315576000000\, 315576000000\].', + message.ToJsonString) + message.seconds = 0 + message.nanos = 999999999 + 1 +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + r'Duration is not valid\: Nanos 1000000000 must be in range' + r' \[-999999999\, 999999999\].', + message.ToJsonString) + message.seconds = -1 + message.nanos = 1 +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + r'Duration is not valid\: Sign mismatch.', + message.ToJsonString) +@@ -707,7 +707,7 @@ + well_known_types._SnakeCaseToCamelCase('foo3_bar')) + + # No uppercase letter is allowed. +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + 'Fail to print FieldMask to Json string: Path name Foo must ' + 'not contain uppercase letters.', +@@ -717,19 +717,19 @@ + # 1. "_" cannot be followed by another "_". + # 2. "_" cannot be followed by a digit. + # 3. "_" cannot appear as the last character. +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + 'Fail to print FieldMask to Json string: The character after a ' + '"_" must be a lowercase letter in path name foo__bar.', + well_known_types._SnakeCaseToCamelCase, + 'foo__bar') +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + 'Fail to print FieldMask to Json string: The character after a ' + '"_" must be a lowercase letter in path name foo_3bar.', + well_known_types._SnakeCaseToCamelCase, + 'foo_3bar') +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + 'Fail to print FieldMask to Json string: Trailing "_" in path ' + 'name foo_bar_.', +@@ -743,7 +743,7 @@ + well_known_types._CamelCaseToSnakeCase('FooBar')) + self.assertEqual('foo3_bar', + well_known_types._CamelCaseToSnakeCase('foo3Bar')) +- self.assertRaisesRegexp( ++ self.assertRaisesRegex( + ValueError, + 'Fail to parse FieldMask: Path name foo_bar must not contain "_"s.', + well_known_types._CamelCaseToSnakeCase, +@@ -787,20 +787,20 @@ + struct2.ParseFromString(serialized) + + self.assertEqual(struct, struct2) +- for key, value in struct.items(): ++ for key, value in list(struct.items()): + self.assertIn(key, struct) + self.assertIn(key, struct2) + self.assertEqual(value, struct2[key]) + +- self.assertEqual(7, len(struct.keys())) +- self.assertEqual(7, len(struct.values())) +- for key in struct.keys(): ++ self.assertEqual(7, len(list(struct.keys()))) ++ self.assertEqual(7, len(list(struct.values()))) ++ for key in list(struct.keys()): + self.assertIn(key, struct) + self.assertIn(key, struct2) + self.assertEqual(struct[key], struct2[key]) + +- item = (next(iter(struct.keys())), next(iter(struct.values()))) +- self.assertEqual(item, next(iter(struct.items()))) ++ item = (next(iter(list(struct.keys()))), next(iter(list(struct.values())))) ++ self.assertEqual(item, next(iter(list(struct.items())))) + + self.assertTrue(isinstance(struct2, well_known_types.Struct)) + self.assertEqual(5, struct2['key1']) +@@ -924,7 +924,7 @@ + msg_descriptor = msg.DESCRIPTOR + all_types = unittest_pb2.TestAllTypes() + all_descriptor = all_types.DESCRIPTOR +- all_types.repeated_string.append(u'\u00fc\ua71f') ++ all_types.repeated_string.append('\u00fc\ua71f') + # Packs to Any. + msg.value.Pack(all_types) + self.assertEqual(msg.value.type_url, +--- a/src/3rdparty/chromium/third_party/protobuf/third_party/six/six.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/protobuf/third_party/six/six.py 2025-01-16 02:26:08.605429303 +0800 +@@ -20,7 +20,7 @@ + + """Utilities for writing code that runs on Python 2 and 3""" + +-from __future__ import absolute_import ++ + + import functools + import itertools +@@ -46,10 +46,10 @@ + + MAXSIZE = sys.maxsize + else: +- string_types = basestring, +- integer_types = (int, long) +- class_types = (type, types.ClassType) +- text_type = unicode ++ string_types = str, ++ integer_types = (int, int) ++ class_types = (type, type) ++ text_type = str + binary_type = str + + if sys.platform.startswith("java"): +@@ -527,7 +527,7 @@ + advance_iterator = next + except NameError: + def advance_iterator(it): +- return it.next() ++ return it.__next__() + next = advance_iterator + + +@@ -550,7 +550,7 @@ + Iterator = object + else: + def get_unbound_function(unbound): +- return unbound.im_func ++ return unbound.__func__ + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) +@@ -560,7 +560,7 @@ + + class Iterator(object): + +- def next(self): ++ def __next__(self): + return type(self).__next__(self) + + callable = callable +@@ -627,7 +627,7 @@ + + def u(s): + return s +- unichr = chr ++ chr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct +@@ -650,8 +650,8 @@ + # Workaround for standalone backslash + + def u(s): +- return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") +- unichr = unichr ++ return str(s.replace(r'\\', r'\\\\'), "unicode_escape") ++ chr = chr + int2byte = chr + + def byte2int(bs): +@@ -660,8 +660,8 @@ + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) +- import StringIO +- StringIO = BytesIO = StringIO.StringIO ++ import io ++ StringIO = BytesIO = io.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +@@ -746,11 +746,11 @@ + return + + def write(data): +- if not isinstance(data, basestring): ++ if not isinstance(data, str): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and +- isinstance(data, unicode) and ++ isinstance(data, str) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: +@@ -760,13 +760,13 @@ + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: +- if isinstance(sep, unicode): ++ if isinstance(sep, str): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: +- if isinstance(end, unicode): ++ if isinstance(end, str): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") +@@ -774,12 +774,12 @@ + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: +- if isinstance(arg, unicode): ++ if isinstance(arg, str): + want_unicode = True + break + if want_unicode: +- newline = unicode("\n") +- space = unicode(" ") ++ newline = str("\n") ++ space = str(" ") + else: + newline = "\n" + space = " " +--- a/src/3rdparty/chromium/third_party/pycoverage/igor.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pycoverage/igor.py 2025-01-16 02:26:08.605429303 +0800 +@@ -164,18 +164,18 @@ + for n, line in enumerate(open(fname, "rb")): + if crlf: + if "\r" in line: +- print("%s@%d: CR found" % (fname, n+1)) ++ print(("%s@%d: CR found" % (fname, n+1))) + return + if trail_white: + line = line[:-1] + if not crlf: + line = line.rstrip('\r') + if line.rstrip() != line: +- print("%s@%d: trailing whitespace found" % (fname, n+1)) ++ print(("%s@%d: trailing whitespace found" % (fname, n+1))) + return + + if line is not None and not line.strip(): +- print("%s: final blank line" % (fname,)) ++ print(("%s: final blank line" % (fname,))) + + def check_files(root, patterns, **kwargs): + """Check a number of files for whitespace abuse.""" +@@ -218,7 +218,7 @@ + pypy_version = sys.pypy_version_info # pylint: disable=E1101 + version += " (pypy %s)" % ".".join([str(v) for v in pypy_version]) + +- print('=== %s %s %s (%s) ===' % (impl, version, label, sys.executable)) ++ print(('=== %s %s %s (%s) ===' % (impl, version, label, sys.executable))) + + + def do_help(): +@@ -227,7 +227,7 @@ + items.sort() + for name, value in items: + if name.startswith('do_'): +- print("%-20s%s" % (name[3:], value.__doc__)) ++ print(("%-20s%s" % (name[3:], value.__doc__))) + + + def main(args): +@@ -241,7 +241,7 @@ + verb = args.pop(0) + handler = globals().get('do_'+verb) + if handler is None: +- print("*** No handler for %r" % verb) ++ print(("*** No handler for %r" % verb)) + return 1 + argspec = inspect.getargspec(handler) + if argspec[1]: +--- a/src/3rdparty/chromium/third_party/pycoverage/setup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pycoverage/setup.py 2025-01-16 02:26:08.605429303 +0800 +@@ -193,7 +193,7 @@ + msg = "Couldn't install with extension module, trying without it..." + exc = sys.exc_info()[1] + exc_msg = "%s: %s" % (exc.__class__.__name__, exc.cause) +- print("**\n** %s\n** %s\n**" % (msg, exc_msg)) ++ print(("**\n** %s\n** %s\n**" % (msg, exc_msg))) + + del setup_args['ext_modules'] + setup(**setup_args) +--- a/src/3rdparty/chromium/third_party/pycoverage/coverage/backward.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pycoverage/coverage/backward.py 2025-01-16 02:26:08.605429303 +0800 +@@ -51,20 +51,20 @@ + + # Pythons 2 and 3 differ on where to get StringIO + try: +- from cStringIO import StringIO ++ from io import StringIO + BytesIO = StringIO + except ImportError: + from io import StringIO, BytesIO + + # What's a string called? + try: +- string_class = basestring ++ string_class = str + except NameError: + string_class = str + + # Where do pickles come from? + try: +- import cPickle as pickle ++ import pickle as pickle + except ImportError: + import pickle + +@@ -80,11 +80,11 @@ + except AttributeError: + def iitems(d): + """Produce the items from dict `d`.""" +- return d.items() ++ return list(d.items()) + else: + def iitems(d): + """Produce the items from dict `d`.""" +- return d.iteritems() ++ return iter(d.items()) + + # Exec is a statement in Py2, a function in Py3 + if sys.version_info >= (3, 0): +--- a/src/3rdparty/chromium/third_party/pycoverage/coverage/cmdline.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pycoverage/coverage/cmdline.py 2025-01-16 02:26:08.605429303 +0800 +@@ -478,13 +478,13 @@ + print(error) + print("Use 'coverage help' for help.") + elif parser: +- print(parser.format_help().strip()) ++ print((parser.format_help().strip())) + else: + help_msg = HELP_TOPICS.get(topic, '').strip() + if help_msg: +- print(help_msg % self.covpkg.__dict__) ++ print((help_msg % self.covpkg.__dict__)) + else: +- print("Don't know topic %r" % topic) ++ print(("Don't know topic %r" % topic)) + + def do_help(self, options, args, parser): + """Deal with help requests. +@@ -594,18 +594,18 @@ + if info == 'sys': + print("-- sys ----------------------------------------") + for line in info_formatter(self.coverage.sysinfo()): +- print(" %s" % line) ++ print((" %s" % line)) + elif info == 'data': + print("-- data ---------------------------------------") + self.coverage.load() +- print("path: %s" % self.coverage.data.filename) +- print("has_arcs: %r" % self.coverage.data.has_arcs()) ++ print(("path: %s" % self.coverage.data.filename)) ++ print(("has_arcs: %r" % self.coverage.data.has_arcs())) + summary = self.coverage.data.summary(fullpath=True) + if summary: + filenames = sorted(summary.keys()) +- print("\n%d files:" % len(filenames)) ++ print(("\n%d files:" % len(filenames))) + for f in filenames: +- print("%s: %d lines" % (f, summary[f])) ++ print(("%s: %d lines" % (f, summary[f]))) + else: + print("No data collected") + else: +@@ -721,7 +721,7 @@ + status = CoverageScript().command_line(argv) + end = time.clock() + if 0: +- print("time: %.3fs" % (end - start)) ++ print(("time: %.3fs" % (end - start))) + except ExceptionDuringRun: + # An exception was caught while running the product code. The + # sys.exc_info() return tuple is packed into an ExceptionDuringRun +--- a/src/3rdparty/chromium/third_party/pycoverage/coverage/collector.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pycoverage/coverage/collector.py 2025-01-16 02:26:08.605429303 +0800 +@@ -310,7 +310,7 @@ + if stats: + print("\nCoverage.py tracer stats:") + for k in sorted(stats.keys()): +- print("%16s: %s" % (k, stats[k])) ++ print(("%16s: %s" % (k, stats[k]))) + threading.settrace(None) + + def resume(self): +@@ -329,7 +329,7 @@ + # If we were measuring branches, then we have to re-build the dict + # to show line data. + line_data = {} +- for f, arcs in self.data.items(): ++ for f, arcs in list(self.data.items()): + line_data[f] = ldf = {} + for l1, _ in list(arcs.keys()): + if l1: +--- a/src/3rdparty/chromium/third_party/pycoverage/coverage/config.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pycoverage/coverage/config.py 2025-01-16 02:26:08.605429303 +0800 +@@ -7,7 +7,7 @@ + try: + import configparser # pylint: disable=F0401 + except ImportError: +- import ConfigParser as configparser ++ import configparser as configparser + + + class HandyConfigParser(configparser.RawConfigParser): +@@ -69,7 +69,7 @@ + + """ + value_list = self.get(section, option) +- return list(filter(None, value_list.split('\n'))) ++ return list([_f for _f in value_list.split('\n') if _f]) + + + # The default line exclusion regexes +--- a/src/3rdparty/chromium/third_party/pycoverage/coverage/control.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pycoverage/coverage/control.py 2025-01-16 02:26:08.605429303 +0800 +@@ -503,7 +503,7 @@ + aliases = None + if self.config.paths: + aliases = PathAliases(self.file_locator) +- for paths in self.config.paths.values(): ++ for paths in list(self.config.paths.values()): + result = paths[0] + for pattern in paths[1:]: + aliases.add(pattern, result) +--- a/src/3rdparty/chromium/third_party/pycoverage/coverage/misc.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pycoverage/coverage/misc.py 2025-01-16 02:26:08.605429303 +0800 +@@ -123,7 +123,7 @@ + for e in v: + self.update(e) + elif isinstance(v, dict): +- keys = v.keys() ++ keys = list(v.keys()) + for k in sorted(keys): + self.update(k) + self.update(v[k]) +--- a/src/3rdparty/chromium/third_party/pycoverage/coverage/parser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pycoverage/coverage/parser.py 2025-01-16 02:26:08.605429303 +0800 +@@ -111,10 +111,10 @@ + tokgen = generate_tokens(self.text) + for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen: + if self.show_tokens: # pragma: not covered +- print("%10s %5s %-20r %r" % ( ++ print(("%10s %5s %-20r %r" % ( + tokenize.tok_name.get(toktype, toktype), + nice_pair((slineno, elineno)), ttext, ltext +- )) ++ ))) + if toktype == token.INDENT: + indent += 1 + elif toktype == token.DEDENT: +@@ -136,7 +136,7 @@ + # (a trick from trace.py in the stdlib.) This works for + # 99.9999% of cases. For the rest (!) see: + # http://stackoverflow.com/questions/1769332/x/1769794#1769794 +- self.docstrings.update(range(slineno, elineno+1)) ++ self.docstrings.update(list(range(slineno, elineno+1))) + elif toktype == token.NEWLINE: + if first_line is not None and elineno != first_line: + # We're at the end of a line, and we've ended on a +--- a/src/3rdparty/chromium/third_party/pycoverage/coverage/phystokens.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pycoverage/coverage/phystokens.py 2025-01-16 02:26:08.605429303 +0800 +@@ -124,7 +124,7 @@ + cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)") + + # Do this so the detect_encode code we copied will work. +- readline = iter(source.splitlines(True)).next ++ readline = iter(source.splitlines(True)).__next__ + + def _get_normal_name(orig_enc): + """Imitates get_normal_name in tokenizer.c.""" +--- a/src/3rdparty/chromium/third_party/pycoverage/coverage/results.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pycoverage/coverage/results.py 2025-01-16 02:26:08.605429303 +0800 +@@ -148,7 +148,7 @@ + def total_branches(self): + """How many total branches are there?""" + exit_counts = self.parser.exit_counts() +- return sum([count for count in exit_counts.values() if count > 1]) ++ return sum([count for count in list(exit_counts.values()) if count > 1]) + + def missing_branch_arcs(self): + """Return arcs that weren't executed from branch lines. +--- a/src/3rdparty/chromium/third_party/pycoverage/coverage/xmlreport.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pycoverage/coverage/xmlreport.py 2025-01-16 02:26:08.605429303 +0800 +@@ -138,8 +138,8 @@ + class_hits = class_lines - len(analysis.missing) + + if self.arcs: +- class_branches = sum([t for t,k in branch_stats.values()]) +- missing_branches = sum([t-k for t,k in branch_stats.values()]) ++ class_branches = sum([t for t,k in list(branch_stats.values())]) ++ missing_branches = sum([t-k for t,k in list(branch_stats.values())]) + class_br_hits = class_branches - missing_branches + else: + class_branches = 0.0 +--- a/src/3rdparty/chromium/third_party/pycoverage/coverage/fullcoverage/encodings.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pycoverage/coverage/fullcoverage/encodings.py 2025-01-16 02:26:08.605429303 +0800 +@@ -51,7 +51,7 @@ + # happen last, since all of the symbols in this module will become None + # at that exact moment, including "sys". + +-parentdir = max(filter(__file__.startswith, sys.path), key=len) ++parentdir = max(list(filter(__file__.startswith, sys.path)), key=len) + sys.path.remove(parentdir) + del sys.modules['encodings'] + import encodings +--- a/src/3rdparty/chromium/third_party/pyelftools/z.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/z.py 2025-01-16 02:26:08.605429303 +0800 +@@ -8,7 +8,7 @@ + # Just a script for playing around with pyelftools during testing + # please ignore it! + # +-from __future__ import print_function ++ + + import sys, pprint + from elftools.elf.structs import ELFStructs +--- a/src/3rdparty/chromium/third_party/pyelftools/elftools/common/ordereddict.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/elftools/common/ordereddict.py 2025-01-16 02:26:08.605429303 +0800 +@@ -5,9 +5,9 @@ + # Code by Raymond Hettinger. License: MIT + #------------------------------------------------------------------------------- + try: +- from thread import get_ident as _get_ident ++ from _thread import get_ident as _get_ident + except ImportError: +- from dummy_thread import get_ident as _get_ident ++ from _dummy_thread import get_ident as _get_ident + + try: + from _abcoll import KeysView, ValuesView, ItemsView +@@ -81,7 +81,7 @@ + def clear(self): + 'od.clear() -> None. Remove all items from od.' + try: +- for node in self.__map.itervalues(): ++ for node in self.__map.values(): + del node[:] + root = self.__root + root[:] = [root, root, None] +@@ -164,12 +164,12 @@ + for key in other: + self[key] = other[key] + elif hasattr(other, 'keys'): +- for key in other.keys(): ++ for key in list(other.keys()): + self[key] = other[key] + else: + for key, value in other: + self[key] = value +- for key, value in kwds.items(): ++ for key, value in list(kwds.items()): + self[key] = value + + __update = update # let subclasses override update without breaking __init__ +@@ -205,7 +205,7 @@ + try: + if not self: + return '%s()' % (self.__class__.__name__,) +- return '%s(%r)' % (self.__class__.__name__, self.items()) ++ return '%s(%r)' % (self.__class__.__name__, list(self.items())) + finally: + del _repr_running[call_key] + +@@ -240,7 +240,7 @@ + + ''' + if isinstance(other, OrderedDict): +- return len(self)==len(other) and self.items() == other.items() ++ return len(self)==len(other) and list(self.items()) == list(other.items()) + return dict.__eq__(self, other) + + def __ne__(self, other): +--- a/src/3rdparty/chromium/third_party/pyelftools/elftools/common/py3compat.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/elftools/common/py3compat.py 2025-01-16 02:26:08.605429303 +0800 +@@ -31,8 +31,8 @@ + + maxint = sys.maxsize + else: +- import cStringIO +- StringIO = BytesIO = cStringIO.StringIO ++ import io ++ StringIO = BytesIO = io.StringIO + + from .ordereddict import OrderedDict + +@@ -45,9 +45,9 @@ + int2byte = chr + byte2int = ord + +- from itertools import ifilter ++ + +- maxint = sys.maxint ++ maxint = sys.maxsize + + + def iterkeys(d): +--- a/src/3rdparty/chromium/third_party/pyelftools/elftools/construct/adapters.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/elftools/construct/adapters.py 2025-01-16 02:26:08.605429303 +0800 +@@ -118,13 +118,13 @@ + self.flags = flags + def _encode(self, obj, context): + flags = 0 +- for name, value in self.flags.items(): ++ for name, value in list(self.flags.items()): + if getattr(obj, name, False): + flags |= value + return flags + def _decode(self, obj, context): + obj2 = FlagsContainer() +- for name, value in self.flags.items(): ++ for name, value in list(self.flags.items()): + setattr(obj2, name, bool(obj & value)) + return obj2 + +--- a/src/3rdparty/chromium/third_party/pyelftools/elftools/construct/core.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/elftools/construct/core.py 2025-01-16 02:26:08.605429303 +0800 +@@ -160,7 +160,7 @@ + """ + Set this construct's state to a given state. + """ +- for name, value in attrs.items(): ++ for name, value in list(attrs.items()): + setattr(self, name, value) + + def __copy__(self): +@@ -813,12 +813,12 @@ + def __init__(self, name, keyfunc, cases, default = NoDefault, + include_key = False): + Construct.__init__(self, name) +- self._inherit_flags(*cases.values()) ++ self._inherit_flags(*list(cases.values())) + self.keyfunc = keyfunc + self.cases = cases + self.default = default + self.include_key = include_key +- self._inherit_flags(*cases.values()) ++ self._inherit_flags(*list(cases.values())) + self._set_flag(self.FLAG_DYNAMIC) + def _parse(self, stream, context): + key = self.keyfunc(context) +--- a/src/3rdparty/chromium/third_party/pyelftools/elftools/construct/debug.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/elftools/construct/debug.py 2025-01-16 02:26:08.605429303 +0800 +@@ -1,7 +1,7 @@ + """ + Debugging utilities for constructs + """ +-from __future__ import print_function ++ + import sys + import traceback + import pdb +@@ -70,7 +70,7 @@ + else: + stream.seek(-len(follows), 1) + obj.following_stream_data = HexString(follows) +- print ++ print() + + if self.show_context: + obj.context = context +--- a/src/3rdparty/chromium/third_party/pyelftools/elftools/construct/macros.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/elftools/construct/macros.py 2025-01-16 02:26:08.605429303 +0800 +@@ -411,7 +411,7 @@ + default value is given, and exception is raised. setting to Pass would + return the value "as is" (unmapped) + """ +- reversed_mapping = dict((v, k) for k, v in mapping.items()) ++ reversed_mapping = dict((v, k) for k, v in list(mapping.items())) + return MappingAdapter(subcon, + encoding = mapping, + decoding = reversed_mapping, +--- a/src/3rdparty/chromium/third_party/pyelftools/elftools/construct/lib/container.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/elftools/construct/lib/container.py 2025-01-16 02:26:08.605429303 +0800 +@@ -2,7 +2,7 @@ + Various containers. + """ + +-from collections import MutableMapping ++from collections.abc import MutableMapping + from pprint import pformat + + def recursion_lock(retval, lock_name = "__recursion_lock__"): +@@ -41,10 +41,10 @@ + self.__dict__[name] = value + + def keys(self): +- return self.__dict__.keys() ++ return list(self.__dict__.keys()) + + def __len__(self): +- return len(self.__dict__.keys()) ++ return len(list(self.__dict__.keys())) + + # Extended dictionary interface. + +--- a/src/3rdparty/chromium/third_party/pyelftools/elftools/construct/lib/py3compat.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/elftools/construct/lib/py3compat.py 2025-01-16 02:26:08.605429303 +0800 +@@ -43,21 +43,21 @@ + advance_iterator = next + + else: +- import cStringIO +- StringIO = BytesIO = cStringIO.StringIO ++ import io ++ StringIO = BytesIO = io.StringIO + + int2byte = chr + byte2int = ord + bchr = lambda i: i + + def u(s): +- return unicode(s, "unicode_escape") ++ return str(s, "unicode_escape") + + def str2bytes(s): + return s + + def str2unicode(s): +- return unicode(s, "unicode_escape") ++ return str(s, "unicode_escape") + + def bytes2str(b): + return b +@@ -66,5 +66,5 @@ + return b.decode(encoding) + + def advance_iterator(it): +- return it.next() ++ return next(it) + +--- a/src/3rdparty/chromium/third_party/pyelftools/examples/dwarf_decode_address.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/examples/dwarf_decode_address.py 2025-01-16 02:26:08.605429303 +0800 +@@ -7,7 +7,7 @@ + # Eli Bendersky (eliben@gmail.com) + # This code is in the public domain + #------------------------------------------------------------------------------- +-from __future__ import print_function ++ + import sys + + # If pyelftools is not installed, the example can also run from the root or +--- a/src/3rdparty/chromium/third_party/pyelftools/examples/dwarf_die_tree.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/examples/dwarf_die_tree.py 2025-01-16 02:26:08.605429303 +0800 +@@ -7,7 +7,7 @@ + # Eli Bendersky (eliben@gmail.com) + # This code is in the public domain + #------------------------------------------------------------------------------- +-from __future__ import print_function ++ + import sys + + # If pyelftools is not installed, the example can also run from the root or +--- a/src/3rdparty/chromium/third_party/pyelftools/examples/dwarf_location_lists.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/examples/dwarf_location_lists.py 2025-01-16 02:26:08.605429303 +0800 +@@ -7,7 +7,7 @@ + # Eli Bendersky (eliben@gmail.com) + # This code is in the public domain + #------------------------------------------------------------------------------- +-from __future__ import print_function ++ + import sys + + # If pyelftools is not installed, the example can also run from the root or +--- a/src/3rdparty/chromium/third_party/pyelftools/examples/dwarf_range_lists.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/examples/dwarf_range_lists.py 2025-01-16 02:26:08.605429303 +0800 +@@ -7,7 +7,7 @@ + # Eli Bendersky (eliben@gmail.com) + # This code is in the public domain + #------------------------------------------------------------------------------- +-from __future__ import print_function ++ + import sys + + # If pyelftools is not installed, the example can also run from the root or +--- a/src/3rdparty/chromium/third_party/pyelftools/examples/elf_low_high_api.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/examples/elf_low_high_api.py 2025-01-16 02:26:08.605429303 +0800 +@@ -8,7 +8,7 @@ + # Eli Bendersky (eliben@gmail.com) + # This code is in the public domain + #------------------------------------------------------------------------------- +-from __future__ import print_function ++ + import sys + + # If pyelftools is not installed, the example can also run from the root or +--- a/src/3rdparty/chromium/third_party/pyelftools/examples/elf_relocations.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/examples/elf_relocations.py 2025-01-16 02:26:08.605429303 +0800 +@@ -7,7 +7,7 @@ + # Eli Bendersky (eliben@gmail.com) + # This code is in the public domain + #------------------------------------------------------------------------------- +-from __future__ import print_function ++ + import sys + + # If pyelftools is not installed, the example can also run from the root or +--- a/src/3rdparty/chromium/third_party/pyelftools/examples/elf_show_debug_sections.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/examples/elf_show_debug_sections.py 2025-01-16 02:26:08.605429303 +0800 +@@ -6,7 +6,7 @@ + # Eli Bendersky (eliben@gmail.com) + # This code is in the public domain + #------------------------------------------------------------------------------- +-from __future__ import print_function ++ + import sys + + # If pyelftools is not installed, the example can also run from the root or +--- a/src/3rdparty/chromium/third_party/pyelftools/examples/elfclass_address_size.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/examples/elfclass_address_size.py 2025-01-16 02:26:08.605429303 +0800 +@@ -7,7 +7,7 @@ + # Eli Bendersky (eliben@gmail.com) + # This code is in the public domain + #------------------------------------------------------------------------------- +-from __future__ import print_function ++ + import sys + + # If pyelftools is not installed, the example can also run from the root or +--- a/src/3rdparty/chromium/third_party/pyelftools/examples/examine_dwarf_info.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/examples/examine_dwarf_info.py 2025-01-16 02:26:08.605429303 +0800 +@@ -6,7 +6,7 @@ + # Eli Bendersky (eliben@gmail.com) + # This code is in the public domain + #------------------------------------------------------------------------------- +-from __future__ import print_function ++ + import sys + + # If pyelftools is not installed, the example can also run from the root or +--- a/src/3rdparty/chromium/third_party/pyelftools/scripts/readelf.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyelftools/scripts/readelf.py 2025-01-16 02:26:08.605429303 +0800 +@@ -985,7 +985,7 @@ + # registers are sorted by their number, and the register matching + # ra_regnum is always listed last with a special heading. + decoded_table = entry.get_decoded() +- reg_order = sorted(ifilter( ++ reg_order = sorted(filter( + lambda r: r != ra_regnum, + decoded_table.reg_order)) + +--- a/src/3rdparty/chromium/third_party/pyjson5/src/benchmarks/run.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyjson5/src/benchmarks/run.py 2025-01-16 02:26:08.605429303 +0800 +@@ -13,7 +13,7 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from __future__ import print_function ++ + + import argparse + import json +--- a/src/3rdparty/chromium/third_party/pyjson5/src/json5/host.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyjson5/src/json5/host.py 2025-01-16 02:26:08.605429303 +0800 +@@ -20,7 +20,7 @@ + + if sys.version_info[0] < 3: + # pylint: disable=redefined-builtin, invalid-name +- str = unicode ++ str = str + + + class Host(object): +@@ -41,7 +41,7 @@ + def mkdtemp(self, **kwargs): + return tempfile.mkdtemp(**kwargs) + +- def print_(self, msg=u'', end=u'\n', stream=None): ++ def print_(self, msg='', end='\n', stream=None): + stream = stream or self.stdout + stream.write(str(msg) + end) + stream.flush() +--- a/src/3rdparty/chromium/third_party/pyjson5/src/json5/lib.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyjson5/src/json5/lib.py 2025-01-16 02:26:08.605429303 +0800 +@@ -21,7 +21,7 @@ + + + if sys.version_info[0] < 3: +- str = unicode # pylint: disable=redefined-builtin, invalid-name ++ str = str # pylint: disable=redefined-builtin, invalid-name + else: + long = int # pylint: disable=redefined-builtin, invalid-name + +@@ -215,9 +215,9 @@ + + if separators is None: + if indent is None: +- separators = (u', ', u': ') ++ separators = (', ', ': ') + else: +- separators = (u',', u': ') ++ separators = (',', ': ') + + default = default or _raise_type_error + +@@ -242,14 +242,14 @@ + seen, level, is_key): + s = None + if obj is True: +- s = u'true' ++ s = 'true' + if obj is False: +- s = u'false' ++ s = 'false' + if obj is None: +- s = u'null' ++ s = 'null' + + t = type(obj) +- if t == type('') or t == type(u''): ++ if t == type('') or t == type(''): + if (is_key and _is_ident(obj) and not quote_keys + and not _is_reserved_word(obj)): + return True, obj +@@ -327,14 +327,14 @@ + quote_keys, trailing_commas, allow_duplicate_keys, + seen, level, item_sep, kv_sep, indent_str, end_str): + if not obj: +- return u'{}' ++ return '{}' + + if sort_keys: + keys = sorted(obj.keys()) + else: +- keys = obj.keys() ++ keys = list(obj.keys()) + +- s = u'{' + indent_str ++ s = '{' + indent_str + + num_items_added = 0 + new_keys = set() +@@ -363,7 +363,7 @@ + elif not skipkeys: + raise TypeError('invalid key %s' % repr(key)) + +- s += end_str + u'}' ++ s += end_str + '}' + return s + + +@@ -372,14 +372,14 @@ + quote_keys, trailing_commas, allow_duplicate_keys, + seen, level, item_sep, indent_str, end_str): + if not obj: +- return u'[]' +- return (u'[' + indent_str + ++ return '[]' ++ return ('[' + indent_str + + item_sep.join([_dumps(el, skipkeys, ensure_ascii, check_circular, + allow_nan, indent, separators, default, + sort_keys, quote_keys, trailing_commas, + allow_duplicate_keys, + seen, level, False)[1] for el in obj]) + +- end_str + u']') ++ end_str + ']') + + + def _dump_float(obj, allow_nan): +@@ -403,9 +403,9 @@ + ret.append('\\\\') + elif ch == '"': + ret.append('\\"') +- elif ch == u'\u2028': ++ elif ch == '\u2028': + ret.append('\\u2028') +- elif ch == u'\u2029': ++ elif ch == '\u2029': + ret.append('\\u2029') + elif ch == '\n': + ret.append('\\n') +@@ -434,15 +434,15 @@ + high = 0xd800 + (val >> 10) + low = 0xdc00 + (val & 0x3ff) + ret.append('\\u%04x\\u%04x' % (high, low)) +- return u''.join(ret) + '"' ++ return ''.join(ret) + '"' + + + def _is_ident(k): + k = str(k) +- if not k or not _is_id_start(k[0]) and k[0] not in (u'$', u'_'): ++ if not k or not _is_id_start(k[0]) and k[0] not in ('$', '_'): + return False + for ch in k[1:]: +- if not _is_id_continue(ch) and ch not in (u'$', u'_'): ++ if not _is_id_continue(ch) and ch not in ('$', '_'): + return False + return True + +--- a/src/3rdparty/chromium/third_party/pyjson5/src/json5/parser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyjson5/src/json5/parser.py 2025-01-16 02:26:08.605429303 +0800 +@@ -5,9 +5,9 @@ + + if sys.version_info[0] < 3: + # pylint: disable=redefined-builtin,invalid-name +- chr = unichr ++ chr = chr + range = xrange +- str = unicode ++ str = str + + + class Parser(object): +@@ -193,10 +193,10 @@ + self._ch('\f') + + def _ws__c6_(self): +- self._ch(u'\xa0') ++ self._ch('\xa0') + + def _ws__c7_(self): +- self._ch(u'\ufeff') ++ self._ch('\ufeff') + + def _ws__c8_(self): + self._push('ws__c8') +@@ -225,10 +225,10 @@ + self._ch('\n') + + def _eol__c3_(self): +- self._ch(u'\u2028') ++ self._ch('\u2028') + + def _eol__c4_(self): +- self._ch(u'\u2029') ++ self._ch('\u2029') + + def _comment_(self): + self._choose([self._comment__c0_, self._comment__c1_]) +@@ -717,10 +717,10 @@ + self._seq([self._bslash_, self._unicode_esc_]) + + def _id_continue__c8_(self): +- self._ch(u'\u200c') ++ self._ch('\u200c') + + def _id_continue__c9_(self): +- self._ch(u'\u200d') ++ self._ch('\u200d') + + def _num_literal_(self): + self._choose([self._num_literal__c0_, self._num_literal__c1_, +--- a/src/3rdparty/chromium/third_party/pyjson5/src/json5/fakes/host_fake.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pyjson5/src/json5/fakes/host_fake.py 2025-01-16 02:26:08.605429303 +0800 +@@ -17,7 +17,7 @@ + + if sys.version_info[0] < 3: + # pylint: disable=redefined-builtin +- str = unicode ++ str = str + + + class FakeHost(object): +@@ -104,7 +104,7 @@ + self.dirs.add(self.last_tmpdir) + return self.last_tmpdir + +- def print_(self, msg=u'', end=u'\n', stream=None): ++ def print_(self, msg='', end='\n', stream=None): + stream = stream or self.stdout + stream.write(str(msg) + str(end)) + stream.flush() +--- a/src/3rdparty/chromium/third_party/pystache/common.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pystache/common.py 2025-01-16 02:26:08.605429303 +0800 +@@ -13,9 +13,9 @@ + # and byte strings, and 2to3 seems to convert all of "str", "unicode", + # and "basestring" to Python 3's "str". + if version_info < (3, ): +- return basestring ++ return str + # The latter evaluates to "bytes" in Python 3 -- even after conversion by 2to3. +- return (unicode, type(u"a".encode('utf-8'))) ++ return (str, type("a".encode('utf-8'))) + + + _STRING_TYPES = _get_string_types() +--- a/src/3rdparty/chromium/third_party/pystache/defaults.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pystache/defaults.py 2025-01-16 02:26:08.605429303 +0800 +@@ -39,7 +39,7 @@ + FILE_ENCODING = sys.getdefaultencoding() + + # The delimiters to start with when parsing. +-DELIMITERS = (u'{{', u'}}') ++DELIMITERS = ('{{', '}}') + + # How to handle missing tags when rendering a template. + MISSING_TAGS = MissingTags.ignore +--- a/src/3rdparty/chromium/third_party/pystache/loader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pystache/loader.py 2025-01-16 02:26:08.605429303 +0800 +@@ -24,7 +24,7 @@ + """ + if encoding is None: + encoding = defaults.STRING_ENCODING +- return unicode(s, encoding, defaults.DECODE_ERRORS) ++ return str(s, encoding, defaults.DECODE_ERRORS) + return to_unicode + + +@@ -86,7 +86,7 @@ + def _make_locator(self): + return Locator(extension=self.extension) + +- def unicode(self, s, encoding=None): ++ def str(self, s, encoding=None): + """ + Convert a string to unicode using the given encoding, and return it. + +@@ -104,8 +104,8 @@ + Defaults to None. + + """ +- if isinstance(s, unicode): +- return unicode(s) ++ if isinstance(s, str): ++ return str(s) + + return self.to_unicode(s, encoding) + +@@ -119,7 +119,7 @@ + if encoding is None: + encoding = self.file_encoding + +- return self.unicode(b, encoding) ++ return self.str(b, encoding) + + def load_file(self, file_name): + """ +--- a/src/3rdparty/chromium/third_party/pystache/parsed.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pystache/parsed.py 2025-01-16 02:26:08.605429303 +0800 +@@ -41,10 +41,10 @@ + """ + # We avoid use of the ternary operator for Python 2.4 support. + def get_unicode(node): +- if type(node) is unicode: ++ if type(node) is str: + return node + return node.render(engine, context) +- parts = map(get_unicode, self._parse_tree) ++ parts = list(map(get_unicode, self._parse_tree)) + s = ''.join(parts) + +- return unicode(s) ++ return str(s) +--- a/src/3rdparty/chromium/third_party/pystache/parser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pystache/parser.py 2025-01-16 02:26:08.605429303 +0800 +@@ -11,8 +11,8 @@ + from pystache.parsed import ParsedTemplate + + +-END_OF_LINE_CHARACTERS = [u'\r', u'\n'] +-NON_BLANK_RE = re.compile(ur'^(.)', re.M) ++END_OF_LINE_CHARACTERS = ['\r', '\n'] ++NON_BLANK_RE = re.compile(r'^(.)', re.M) + + + # TODO: add some unit tests for this. +@@ -35,7 +35,7 @@ + ['Hey ', _SectionNode(key='who', index_begin=12, index_end=21, parsed=[_EscapeNode(key='name'), '!'])] + + """ +- if type(template) is not unicode: ++ if type(template) is not str: + raise Exception("Template is not unicode: %s" % type(template)) + parser = _Parser(delimiters) + return parser.parse(template) +@@ -94,7 +94,7 @@ + return _format(self) + + def render(self, engine, context): +- return u'' ++ return '' + + + class _ChangeNode(object): +@@ -106,7 +106,7 @@ + return _format(self) + + def render(self, engine, context): +- return u'' ++ return '' + + + class _EscapeNode(object): +@@ -147,7 +147,7 @@ + def render(self, engine, context): + template = engine.resolve_partial(self.key) + # Indent before rendering. +- template = re.sub(NON_BLANK_RE, self.indent + ur'\1', template) ++ template = re.sub(NON_BLANK_RE, self.indent + r'\1', template) + + return engine.render(template, context) + +@@ -168,7 +168,7 @@ + # Note that lambdas are considered truthy for inverted sections + # per the spec. + if data: +- return u'' ++ return '' + return self.parsed_section.render(engine, context) + + +@@ -218,7 +218,7 @@ + parts.append(self.parsed.render(engine, context)) + context.pop() + +- return unicode(''.join(parts)) ++ return str(''.join(parts)) + + + class _Parser(object): +--- a/src/3rdparty/chromium/third_party/pystache/renderengine.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pystache/renderengine.py 2025-01-16 02:26:08.605429303 +0800 +@@ -160,7 +160,7 @@ + if not is_string(val): + # In case the template is an integer, for example. + val = self.to_str(val) +- if type(val) is not unicode: ++ if type(val) is not str: + val = self.literal(val) + return self.render(val, context, delimiters) + +--- a/src/3rdparty/chromium/third_party/pystache/renderer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pystache/renderer.py 2025-01-16 02:26:08.605429303 +0800 +@@ -130,7 +130,7 @@ + if string_encoding is None: + string_encoding = defaults.STRING_ENCODING + +- if isinstance(search_dirs, basestring): ++ if isinstance(search_dirs, str): + search_dirs = [search_dirs] + + self._context = None +@@ -177,16 +177,16 @@ + """ + # We type-check to avoid "TypeError: decoding Unicode is not supported". + # We avoid the Python ternary operator for Python 2.4 support. +- if isinstance(s, unicode): ++ if isinstance(s, str): + return s +- return self.unicode(s) ++ return self.str(s) + + def _to_unicode_hard(self, s): + """ + Convert a basestring to a string with type unicode (not subclass). + + """ +- return unicode(self._to_unicode_soft(s)) ++ return str(self._to_unicode_soft(s)) + + def _escape_to_unicode(self, s): + """ +@@ -195,9 +195,9 @@ + Returns a unicode string (not subclass). + + """ +- return unicode(self.escape(self._to_unicode_soft(s))) ++ return str(self.escape(self._to_unicode_soft(s))) + +- def unicode(self, b, encoding=None): ++ def str(self, b, encoding=None): + """ + Convert a byte string to unicode, using string_encoding and decode_errors. + +@@ -222,7 +222,7 @@ + + # TODO: Wrap UnicodeDecodeErrors with a message about setting + # the string_encoding and decode_errors attributes. +- return unicode(b, encoding, self.decode_errors) ++ return str(b, encoding, self.decode_errors) + + def _make_loader(self): + """ +@@ -230,7 +230,7 @@ + + """ + return Loader(file_encoding=self.file_encoding, extension=self.file_extension, +- to_unicode=self.unicode, search_dirs=self.search_dirs) ++ to_unicode=self.str, search_dirs=self.search_dirs) + + def _make_load_template(self): + """ +@@ -299,7 +299,7 @@ + try: + return load_partial(name) + except TemplateNotFoundError: +- return u'' ++ return '' + + return resolve_partial + +@@ -316,7 +316,7 @@ + try: + return context_get(stack, name) + except KeyNotFoundError: +- return u'' ++ return '' + + return resolve_context + +--- a/src/3rdparty/chromium/third_party/pystache/specloader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pystache/specloader.py 2025-01-16 02:26:08.605429303 +0800 +@@ -83,7 +83,7 @@ + + """ + if spec.template is not None: +- return self.loader.unicode(spec.template, spec.template_encoding) ++ return self.loader.str(spec.template, spec.template_encoding) + + path = self._find(spec) + +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/setup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/setup.py 2025-01-16 02:26:08.605429303 +0800 +@@ -31,8 +31,8 @@ + """Set up script for mod_pywebsocket. + """ + +-from __future__ import absolute_import +-from __future__ import print_function ++ ++ + from setuptools import setup, Extension + import sys + +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/example/abort_handshake_wsh.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/example/abort_handshake_wsh.py 2025-01-16 02:26:08.605429303 +0800 +@@ -27,7 +27,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-from __future__ import absolute_import ++ + from mod_pywebsocket import handshake + + +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/example/abort_wsh.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/example/abort_wsh.py 2025-01-16 02:26:08.605429303 +0800 +@@ -27,7 +27,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-from __future__ import absolute_import ++ + from mod_pywebsocket import handshake + + +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/example/bench_wsh.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/example/bench_wsh.py 2025-01-16 02:26:08.605429303 +0800 +@@ -34,7 +34,7 @@ + value. must be an integer value. + """ + +-from __future__ import absolute_import ++ + import time + from six.moves import range + +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/example/benchmark_helper_wsh.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/example/benchmark_helper_wsh.py 2025-01-16 02:26:08.605429303 +0800 +@@ -27,7 +27,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + """Handler for benchmark.html.""" +-from __future__ import absolute_import ++ + import six + + +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/example/close_wsh.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/example/close_wsh.py 2025-01-16 02:26:08.605429303 +0800 +@@ -27,7 +27,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-from __future__ import absolute_import ++ + import struct + + from mod_pywebsocket import common +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/example/cookie_wsh.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/example/cookie_wsh.py 2025-01-16 02:26:08.605429303 +0800 +@@ -26,7 +26,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-from __future__ import absolute_import ++ + from six.moves import urllib + + +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/example/echo_client.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/example/echo_client.py 2025-01-16 02:26:08.605429303 +0800 +@@ -45,8 +45,8 @@ + -o http://localhost -r /echo -m test + """ + +-from __future__ import absolute_import +-from __future__ import print_function ++ ++ + import base64 + import codecs + from hashlib import sha1 +@@ -636,7 +636,7 @@ + '--message', + dest='message', + type=six.text_type, +- default=u'Hello,\u65e5\u672c', ++ default='Hello,\u65e5\u672c', + help=('comma-separated messages to send. ' + '%s will force close the connection from server.' % + _GOODBYE_MESSAGE)) +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/example/echo_noext_wsh.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/example/echo_noext_wsh.py 2025-01-16 02:26:08.605429303 +0800 +@@ -29,7 +29,7 @@ + + import six + +-_GOODBYE_MESSAGE = u'Goodbye' ++_GOODBYE_MESSAGE = 'Goodbye' + + + def web_socket_do_extra_handshake(request): +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/example/echo_wsh.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/example/echo_wsh.py 2025-01-16 02:26:08.605429303 +0800 +@@ -29,7 +29,7 @@ + + import six + +-_GOODBYE_MESSAGE = u'Goodbye' ++_GOODBYE_MESSAGE = 'Goodbye' + + + def web_socket_do_extra_handshake(request): +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/example/internal_error_wsh.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/example/internal_error_wsh.py 2025-01-16 02:26:08.605429303 +0800 +@@ -27,7 +27,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-from __future__ import absolute_import ++ + from mod_pywebsocket import msgutil + + +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/common.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/common.py 2025-01-16 02:26:08.605429303 +0800 +@@ -29,7 +29,7 @@ + """This file must not depend on any module specific to the WebSocket protocol. + """ + +-from __future__ import absolute_import ++ + from mod_pywebsocket import http_header_util + + # Additional log level definitions. +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/dispatch.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/dispatch.py 2025-01-16 02:26:08.605429303 +0800 +@@ -29,7 +29,7 @@ + """Dispatch WebSocket request. + """ + +-from __future__ import absolute_import ++ + import logging + import os + import re +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/extensions.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/extensions.py 2025-01-16 02:26:08.605429303 +0800 +@@ -27,7 +27,7 @@ + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-from __future__ import absolute_import ++ + from mod_pywebsocket import common + from mod_pywebsocket import util + from mod_pywebsocket.http_header_util import quote_if_necessary +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/http_header_util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/http_header_util.py 2025-01-16 02:26:08.605429303 +0800 +@@ -30,7 +30,7 @@ + in HTTP RFC http://www.ietf.org/rfc/rfc2616.txt. + """ + +-from __future__ import absolute_import ++ + import six.moves.urllib.parse + + _SEPARATORS = '()<>@,;:\\"/[]?={} \t' +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/memorizingfile.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/memorizingfile.py 2025-01-16 02:26:08.605429303 +0800 +@@ -33,7 +33,7 @@ + A memorizing file wraps a file and memorizes lines read by readline. + """ + +-from __future__ import absolute_import ++ + import sys + + +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/msgutil.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/msgutil.py 2025-01-16 02:26:08.605429303 +0800 +@@ -35,7 +35,7 @@ + bytes writing/reading. + """ + +-from __future__ import absolute_import ++ + import six.moves.queue + import threading + +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/standalone.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/standalone.py 2025-01-16 02:26:08.605429303 +0800 +@@ -153,7 +153,7 @@ + used outside a firewall. + """ + +-from __future__ import absolute_import ++ + from six.moves import configparser + import base64 + import logging +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/util.py 2025-01-16 02:26:08.609762563 +0800 +@@ -28,7 +28,7 @@ + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + """WebSocket utilities.""" + +-from __future__ import absolute_import ++ + import array + import errno + import logging +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/websocket_server.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/websocket_server.py 2025-01-16 02:26:08.609762563 +0800 +@@ -33,7 +33,7 @@ + to use standalone.py, since it is intended to act as a skeleton of this module. + """ + +-from __future__ import absolute_import ++ + from six.moves import BaseHTTPServer + from six.moves import socketserver + import logging +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/handshake/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/handshake/__init__.py 2025-01-16 02:26:08.609762563 +0800 +@@ -31,7 +31,7 @@ + successfully established. + """ + +-from __future__ import absolute_import ++ + import logging + + from mod_pywebsocket import common +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/handshake/_base.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/handshake/_base.py 2025-01-16 02:26:08.609762563 +0800 +@@ -30,7 +30,7 @@ + processors. + """ + +-from __future__ import absolute_import ++ + from mod_pywebsocket import common + from mod_pywebsocket import http_header_util + +@@ -112,7 +112,7 @@ + + + def format_header(name, value): +- return u'%s: %s\r\n' % (name, value) ++ return '%s: %s\r\n' % (name, value) + + + def get_mandatory_header(request, key): +@@ -135,10 +135,10 @@ + def check_request_line(request): + # 5.1 1. The three character UTF-8 string "GET". + # 5.1 2. A UTF-8-encoded U+0020 SPACE character (0x20 byte). +- if request.method != u'GET': ++ if request.method != 'GET': + raise HandshakeException('Method is not GET: %r' % request.method) + +- if request.protocol != u'HTTP/1.1': ++ if request.protocol != 'HTTP/1.1': + raise HandshakeException('Version is not HTTP/1.1: %r' % + request.protocol) + +--- a/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/handshake/hybi.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/pywebsocket3/src/mod_pywebsocket/handshake/hybi.py 2025-01-16 02:26:08.609762563 +0800 +@@ -33,7 +33,7 @@ + http://tools.ietf.org/html/rfc6455 + """ + +-from __future__ import absolute_import ++ + import base64 + import logging + import os +@@ -354,7 +354,7 @@ + def _create_handshake_response(self, accept): + response = [] + +- response.append(u'HTTP/1.1 101 Switching Protocols\r\n') ++ response.append('HTTP/1.1 101 Switching Protocols\r\n') + + # WebSocket headers + response.append( +@@ -381,9 +381,9 @@ + for name, value in self._request.extra_headers: + response.append(format_header(name, value)) + +- response.append(u'\r\n') ++ response.append('\r\n') + +- return u''.join(response) ++ return ''.join(response) + + def _send_handshake(self, accept): + raw_response = self._create_handshake_response(accept) +--- a/src/3rdparty/chromium/third_party/re2/src/benchlog/benchplot.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/re2/src/benchlog/benchplot.py 2025-01-16 02:26:08.609762563 +0800 +@@ -63,7 +63,7 @@ + generate temporary csv files + """ + +- for name, data in self.benchdata.items(): ++ for name, data in list(self.benchdata.items()): + + with tempfile.NamedTemporaryFile(delete=False) as f: + +--- a/src/3rdparty/chromium/third_party/re2/src/re2/make_unicode_casefold.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/re2/src/re2/make_unicode_casefold.py 2025-01-16 02:26:08.609762563 +0800 +@@ -9,12 +9,12 @@ + + """Generate C++ table for Unicode case folding.""" + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import sys +-import unicode ++import str + + _header = """ + // GENERATED BY make_unicode_casefold.py; DO NOT EDIT. +@@ -62,7 +62,7 @@ + else: + return a-1 + print("Bad Delta:", delta, file=sys.stderr) +- raise unicode.Error("Bad Delta") ++ raise str.Error("Bad Delta") + + def _MakeRanges(pairs): + """Turn a list like [(65,97), (66, 98), ..., (90,122)] +@@ -113,20 +113,20 @@ + MaxCasefoldGroup = 4 + + def main(): +- lowergroups, casegroups = unicode.CaseGroups() ++ lowergroups, casegroups = str.CaseGroups() + foldpairs = [] + seen = {} + for c in casegroups: + if len(c) > MaxCasefoldGroup: +- raise unicode.Error("casefold group too long: %s" % (c,)) ++ raise str.Error("casefold group too long: %s" % (c,)) + for i in range(len(c)): + if c[i-1] in seen: +- raise unicode.Error("bad casegroups %d -> %d" % (c[i-1], c[i])) ++ raise str.Error("bad casegroups %d -> %d" % (c[i-1], c[i])) + seen[c[i-1]] = True + foldpairs.append([c[i-1], c[i]]) + + lowerpairs = [] +- for lower, group in lowergroups.items(): ++ for lower, group in list(lowergroups.items()): + for g in group: + if g != lower: + lowerpairs.append([g, lower]) +--- a/src/3rdparty/chromium/third_party/re2/src/re2/make_unicode_groups.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/re2/src/re2/make_unicode_groups.py 2025-01-16 02:26:08.609762563 +0800 +@@ -5,12 +5,12 @@ + + """Generate C++ tables for Unicode Script and Category groups.""" + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import sys +-import unicode ++import str + + _header = """ + // GENERATED BY make_unicode_groups.py; DO NOT EDIT. +@@ -96,8 +96,8 @@ + return ugroup + + def main(): +- categories = unicode.Categories() +- scripts = unicode.Scripts() ++ categories = str.Categories() ++ scripts = str.Scripts() + print(_header) + ugroups = [] + for name in sorted(categories): +--- a/src/3rdparty/chromium/third_party/re2/src/re2/unicode.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/re2/src/re2/unicode.py 2025-01-16 02:26:08.609762563 +0800 +@@ -4,9 +4,9 @@ + + """Parser for Unicode data files (as distributed by unicode.org).""" + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import os + import re +@@ -71,7 +71,7 @@ + lo = _UInt(a[0]) + hi = _UInt(a[1]) + if lo < hi: +- return range(lo, hi + 1) ++ return list(range(lo, hi + 1)) + raise InputError("invalid Unicode range %s" % (s,)) + + +@@ -197,7 +197,7 @@ + cont != "Last" or name != expect_last): + raise InputError("expected Last line for %s" % + (expect_last,)) +- codes = range(first, codes[0] + 1) ++ codes = list(range(first, codes[0] + 1)) + first = None + expect_last = None + fields[0] = "%04X..%04X" % (codes[0], codes[-1]) +--- a/src/3rdparty/chromium/third_party/shaderc/src/utils/add_copyright.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/shaderc/src/utils/add_copyright.py 2025-01-16 02:26:08.609762563 +0800 +@@ -124,7 +124,7 @@ + has_copyright = True + break + if not has_copyright: +- print(file, ' has no copyright message.') ++ print((file, ' has no copyright message.')) + printed_count += 1 + return printed_count + +--- a/src/3rdparty/chromium/third_party/simplejson/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/simplejson/__init__.py 2025-01-16 02:26:08.609762563 +0800 +@@ -108,14 +108,14 @@ + + from decimal import Decimal + +-from decoder import JSONDecoder, JSONDecodeError +-from encoder import JSONEncoder, JSONEncoderForHTML ++from .decoder import JSONDecoder, JSONDecodeError ++from .encoder import JSONEncoder, JSONEncoderForHTML + def _import_OrderedDict(): + import collections + try: + return collections.OrderedDict + except AttributeError: +- import ordered_dict ++ from . import ordered_dict + return ordered_dict.OrderedDict + OrderedDict = _import_OrderedDict() + +--- a/src/3rdparty/chromium/third_party/simplejson/decoder.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/simplejson/decoder.py 2025-01-16 02:26:08.609762563 +0800 +@@ -87,8 +87,8 @@ + + STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS) + BACKSLASH = { +- '"': u'"', '\\': u'\\', '/': u'/', +- 'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t', ++ '"': '"', '\\': '\\', '/': '/', ++ 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', + } + + DEFAULT_ENCODING = "utf-8" +@@ -117,8 +117,8 @@ + content, terminator = chunk.groups() + # Content is contains zero or more unescaped string characters + if content: +- if not isinstance(content, unicode): +- content = unicode(content, encoding) ++ if not isinstance(content, str): ++ content = str(content, encoding) + _append(content) + # Terminator is the end of string, a literal control character, + # or a backslash denoting that an escape sequence follows +@@ -164,11 +164,11 @@ + uni2 = int(esc2, 16) + uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00)) + next_end += 6 +- char = unichr(uni) ++ char = chr(uni) + end = next_end + # Append the unescaped character + _append(char) +- return u''.join(chunks), end ++ return ''.join(chunks), end + + + # Use speedup if available +@@ -177,10 +177,11 @@ + WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS) + WHITESPACE_STR = ' \t\n\r' + +-def JSONObject((s, end), encoding, strict, scan_once, object_hook, ++def JSONObject(xxx_todo_changeme, encoding, strict, scan_once, object_hook, + object_pairs_hook, memo=None, + _w=WHITESPACE.match, _ws=WHITESPACE_STR): + # Backwards compatibility ++ (s, end) = xxx_todo_changeme + if memo is None: + memo = {} + memo_get = memo.setdefault +@@ -273,7 +274,8 @@ + pairs = object_hook(pairs) + return pairs, end + +-def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR): ++def JSONArray(xxx_todo_changeme1, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR): ++ (s, end) = xxx_todo_changeme1 + values = [] + nextchar = s[end:end + 1] + if nextchar in _ws: +--- a/src/3rdparty/chromium/third_party/simplejson/encoder.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/simplejson/encoder.py 2025-01-16 02:26:08.609762563 +0800 +@@ -13,7 +13,7 @@ + + from simplejson.decoder import PosInf + +-ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]') ++ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]') + ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') + HAS_UTF8 = re.compile(r'[\x80-\xff]') + ESCAPE_DCT = { +@@ -24,8 +24,8 @@ + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', +- u'\u2028': '\\u2028', +- u'\u2029': '\\u2029', ++ '\u2028': '\\u2028', ++ '\u2029': '\\u2029', + } + for i in range(0x20): + #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) +@@ -41,7 +41,7 @@ + s = s.decode('utf-8') + def replace(match): + return ESCAPE_DCT[match.group(0)] +- return u'"' + ESCAPE.sub(replace, s) + u'"' ++ return '"' + ESCAPE.sub(replace, s) + '"' + + + def py_encode_basestring_ascii(s): +@@ -181,7 +181,7 @@ + self.tuple_as_array = tuple_as_array + self.bigint_as_string = bigint_as_string + self.item_sort_key = item_sort_key +- if indent is not None and not isinstance(indent, basestring): ++ if indent is not None and not isinstance(indent, str): + indent = indent * ' ' + self.indent = indent + if separators is not None: +@@ -221,7 +221,7 @@ + + """ + # This is for extremely simple cases and benchmarks. +- if isinstance(o, basestring): ++ if isinstance(o, str): + if isinstance(o, str): + _encoding = self.encoding + if (_encoding is not None +@@ -240,7 +240,7 @@ + if self.ensure_ascii: + return ''.join(chunks) + else: +- return u''.join(chunks) ++ return ''.join(chunks) + + def iterencode(self, o, _one_shot=False): + """Encode the given object and yield each string +@@ -329,7 +329,7 @@ + if self.ensure_ascii: + return ''.join(chunks) + else: +- return u''.join(chunks) ++ return ''.join(chunks) + + def iterencode(self, o, _one_shot=False): + chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot) +@@ -348,7 +348,7 @@ + False=False, + True=True, + ValueError=ValueError, +- basestring=basestring, ++ str=str, + Decimal=Decimal, + dict=dict, + float=float, +@@ -356,7 +356,7 @@ + int=int, + isinstance=isinstance, + list=list, +- long=long, ++ long=int, + str=str, + tuple=tuple, + ): +@@ -387,7 +387,7 @@ + first = False + else: + buf = separator +- if isinstance(value, basestring): ++ if isinstance(value, str): + yield buf + _encoder(value) + elif value is None: + yield buf + 'null' +@@ -395,7 +395,7 @@ + yield buf + 'true' + elif value is False: + yield buf + 'false' +- elif isinstance(value, (int, long)): ++ elif isinstance(value, int): + yield ((buf + str(value)) + if (not _bigint_as_string or + (-1 << 53) < value < (1 << 53)) +@@ -448,15 +448,15 @@ + item_separator = _item_separator + first = True + if _item_sort_key: +- items = dct.items() ++ items = list(dct.items()) + items.sort(key=_item_sort_key) + elif _sort_keys: +- items = dct.items() ++ items = list(dct.items()) + items.sort(key=lambda kv: kv[0]) + else: +- items = dct.iteritems() ++ items = iter(dct.items()) + for key, value in items: +- if isinstance(key, basestring): ++ if isinstance(key, str): + pass + # JavaScript is weakly typed for these, so it makes sense to + # also allow them. Many encoders seem to do something like this. +@@ -468,7 +468,7 @@ + key = 'false' + elif key is None: + key = 'null' +- elif isinstance(key, (int, long)): ++ elif isinstance(key, int): + key = str(key) + elif _skipkeys: + continue +@@ -480,7 +480,7 @@ + yield item_separator + yield _encoder(key) + yield _key_separator +- if isinstance(value, basestring): ++ if isinstance(value, str): + yield _encoder(value) + elif value is None: + yield 'null' +@@ -488,7 +488,7 @@ + yield 'true' + elif value is False: + yield 'false' +- elif isinstance(value, (int, long)): ++ elif isinstance(value, int): + yield (str(value) + if (not _bigint_as_string or + (-1 << 53) < value < (1 << 53)) +@@ -521,7 +521,7 @@ + del markers[markerid] + + def _iterencode(o, _current_indent_level): +- if isinstance(o, basestring): ++ if isinstance(o, str): + yield _encoder(o) + elif o is None: + yield 'null' +@@ -529,7 +529,7 @@ + yield 'true' + elif o is False: + yield 'false' +- elif isinstance(o, (int, long)): ++ elif isinstance(o, int): + yield (str(o) + if (not _bigint_as_string or + (-1 << 53) < o < (1 << 53)) +--- a/src/3rdparty/chromium/third_party/simplejson/ordered_dict.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/simplejson/ordered_dict.py 2025-01-16 02:26:08.609762563 +0800 +@@ -66,9 +66,9 @@ + # Modified from original to support Python 2.4, see + # http://code.google.com/p/simplejson/issues/detail?id=53 + if last: +- key = reversed(self).next() ++ key = next(reversed(self)) + else: +- key = iter(self).next() ++ key = next(iter(self)) + value = self.pop(key) + return key, value + +@@ -97,7 +97,7 @@ + def __repr__(self): + if not self: + return '%s()' % (self.__class__.__name__,) +- return '%s(%r)' % (self.__class__.__name__, self.items()) ++ return '%s(%r)' % (self.__class__.__name__, list(self.items())) + + def copy(self): + return self.__class__(self) +@@ -112,7 +112,7 @@ + def __eq__(self, other): + if isinstance(other, OrderedDict): + return len(self)==len(other) and \ +- all(p==q for p, q in zip(self.items(), other.items())) ++ all(p==q for p, q in zip(list(self.items()), list(other.items()))) + return dict.__eq__(self, other) + + def __ne__(self, other): +--- a/src/3rdparty/chromium/third_party/simplejson/tool.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/simplejson/tool.py 2025-01-16 02:26:08.609762563 +0800 +@@ -29,7 +29,7 @@ + obj = json.load(infile, + object_pairs_hook=json.OrderedDict, + use_decimal=True) +- except ValueError, e: ++ except ValueError as e: + raise SystemExit(e) + json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True) + outfile.write('\n') +--- a/src/3rdparty/chromium/third_party/six/src/setup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/six/src/setup.py 2025-01-16 02:26:08.609762563 +0800 +@@ -18,7 +18,7 @@ + # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + # SOFTWARE. + +-from __future__ import with_statement ++ + + # Six is a dependency of setuptools, so using setuptools creates a + # circular dependency when building a Python stack from source. We +--- a/src/3rdparty/chromium/third_party/six/src/six.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/six/src/six.py 2025-01-16 02:26:08.609762563 +0800 +@@ -20,7 +20,7 @@ + + """Utilities for writing code that runs on Python 2 and 3""" + +-from __future__ import absolute_import ++ + + import functools + import itertools +@@ -46,10 +46,10 @@ + + MAXSIZE = sys.maxsize + else: +- string_types = basestring, +- integer_types = (int, long) +- class_types = (type, types.ClassType) +- text_type = unicode ++ string_types = str, ++ integer_types = (int, int) ++ class_types = (type, type) ++ text_type = str + binary_type = str + + if sys.platform.startswith("java"): +@@ -529,7 +529,7 @@ + advance_iterator = next + except NameError: + def advance_iterator(it): +- return it.next() ++ return it.__next__() + next = advance_iterator + + +@@ -552,7 +552,7 @@ + Iterator = object + else: + def get_unbound_function(unbound): +- return unbound.im_func ++ return unbound.__func__ + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) +@@ -562,7 +562,7 @@ + + class Iterator(object): + +- def next(self): ++ def __next__(self): + return type(self).__next__(self) + + callable = callable +@@ -629,7 +629,7 @@ + + def u(s): + return s +- unichr = chr ++ chr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct +@@ -655,8 +655,8 @@ + # Workaround for standalone backslash + + def u(s): +- return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") +- unichr = unichr ++ return str(s.replace(r'\\', r'\\\\'), "unicode_escape") ++ chr = chr + int2byte = chr + + def byte2int(bs): +@@ -665,8 +665,8 @@ + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) +- import StringIO +- StringIO = BytesIO = StringIO.StringIO ++ import io ++ StringIO = BytesIO = io.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +@@ -747,11 +747,11 @@ + return + + def write(data): +- if not isinstance(data, basestring): ++ if not isinstance(data, str): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and +- isinstance(data, unicode) and ++ isinstance(data, str) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: +@@ -761,13 +761,13 @@ + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: +- if isinstance(sep, unicode): ++ if isinstance(sep, str): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: +- if isinstance(end, unicode): ++ if isinstance(end, str): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") +@@ -775,12 +775,12 @@ + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: +- if isinstance(arg, unicode): ++ if isinstance(arg, str): + want_unicode = True + break + if want_unicode: +- newline = unicode("\n") +- space = unicode(" ") ++ newline = str("\n") ++ space = str(" ") + else: + newline = "\n" + space = " " +--- a/src/3rdparty/chromium/third_party/six/src/test_six.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/six/src/test_six.py 2025-01-16 02:26:08.609762563 +0800 +@@ -106,7 +106,7 @@ + + have_gdbm = True + try: +- import gdbm ++ import dbm.gnu + except ImportError: + try: + import dbm.gnu +@@ -202,20 +202,20 @@ + + def test_filter(): + from six.moves import filter +- f = filter(lambda x: x % 2, range(10)) ++ f = [x for x in range(10) if x % 2] + assert six.advance_iterator(f) == 1 + + + def test_filter_false(): + from six.moves import filterfalse +- f = filterfalse(lambda x: x % 3, range(10)) ++ f = filterfalse(lambda x: x % 3, list(range(10))) + assert six.advance_iterator(f) == 0 + assert six.advance_iterator(f) == 3 + assert six.advance_iterator(f) == 6 + + def test_map(): + from six.moves import map +- assert six.advance_iterator(map(lambda x: x + 1, range(2))) == 1 ++ assert six.advance_iterator([x + 1 for x in range(2)]) == 1 + + + def test_getoutput(): +@@ -226,12 +226,12 @@ + + def test_zip(): + from six.moves import zip +- assert six.advance_iterator(zip(range(2), range(2))) == (0, 0) ++ assert six.advance_iterator(list(zip(list(range(2)), list(range(2))))) == (0, 0) + + + def test_zip_longest(): + from six.moves import zip_longest +- it = zip_longest(range(2), range(1)) ++ it = zip_longest(list(range(2)), list(range(1))) + + assert six.advance_iterator(it) == (0, 0) + assert six.advance_iterator(it) == (1, None) +@@ -383,7 +383,7 @@ + del MyDict.iterlists + setattr(MyDict, stock_method_name('lists'), f) + +- d = MyDict(zip(range(10), reversed(range(10)))) ++ d = MyDict(list(zip(list(range(10)), reversed(list(range(10)))))) + for name in "keys", "values", "items", "lists": + meth = getattr(six, "iter" + name) + it = meth(d) +@@ -402,7 +402,7 @@ + + + def test_dictionary_views(): +- d = dict(zip(range(10), (range(11, 20)))) ++ d = dict(list(zip(list(range(10)), (list(range(11, 20)))))) + for name in "keys", "values", "items": + meth = getattr(six, "view" + name) + view = meth(d) +@@ -410,13 +410,13 @@ + + + def test_advance_iterator(): +- assert six.next is six.advance_iterator ++ assert six.__next__ is six.advance_iterator + l = [1, 2] + it = iter(l) + assert six.next(it) == 1 + assert six.next(it) == 2 +- pytest.raises(StopIteration, six.next, it) +- pytest.raises(StopIteration, six.next, it) ++ pytest.raises(StopIteration, six.__next__, it) ++ pytest.raises(StopIteration, six.__next__, it) + + + def test_iterator(): +@@ -481,9 +481,9 @@ + + + def test_u(): +- s = six.u("hi \u0439 \U00000439 \\ \\\\ \n") ++ s = six.u("hi \\u0439 \\U00000439 \\ \\\\ \n") + assert isinstance(s, str) +- assert s == "hi \u0439 \U00000439 \\ \\\\ \n" ++ assert s == "hi \\u0439 \\U00000439 \\ \\\\ \n" + + else: + +@@ -495,19 +495,19 @@ + + + def test_u(): +- s = six.u("hi \u0439 \U00000439 \\ \\\\ \n") +- assert isinstance(s, unicode) ++ s = six.u("hi \\u0439 \\U00000439 \\ \\\\ \n") ++ assert isinstance(s, str) + assert s == "hi \xd0\xb9 \xd0\xb9 \\ \\\\ \n".decode("utf8") + + + def test_u_escapes(): +- s = six.u("\u1234") ++ s = six.u("\\u1234") + assert len(s) == 1 + + + def test_unichr(): +- assert six.u("\u1234") == six.unichr(0x1234) +- assert type(six.u("\u1234")) is type(six.unichr(0x1234)) ++ assert six.u("\\u1234") == six.chr(0x1234) ++ assert type(six.u("\\u1234")) is type(six.chr(0x1234)) + + + def test_int2byte(): +@@ -529,7 +529,7 @@ + it = six.iterbytes(six.b("hi")) + assert six.next(it) == ord("h") + assert six.next(it) == ord("i") +- pytest.raises(StopIteration, six.next, it) ++ pytest.raises(StopIteration, six.__next__, it) + + + def test_StringIO(): +@@ -975,7 +975,7 @@ + + if six.PY2: + assert str(my_test) == six.b("hello") +- assert unicode(my_test) == six.u("hello") ++ assert str(my_test) == six.u("hello") + elif six.PY3: + assert bytes(my_test) == six.b("hello") + assert str(my_test) == six.u("hello") +@@ -986,7 +986,7 @@ + class EnsureTests: + + # grinning face emoji +- UNICODE_EMOJI = six.u("\U0001F600") ++ UNICODE_EMOJI = six.u("\\U0001F600") + BINARY_EMOJI = b"\xf0\x9f\x98\x80" + + def test_ensure_binary_raise_type_error(self): +@@ -1031,9 +1031,9 @@ + converted_binary = six.ensure_text(self.BINARY_EMOJI, encoding="utf-8", errors='strict') + if six.PY2: + # PY2: unicode -> unicode +- assert converted_unicode == self.UNICODE_EMOJI and isinstance(converted_unicode, unicode) ++ assert converted_unicode == self.UNICODE_EMOJI and isinstance(converted_unicode, str) + # PY2: str -> unicode +- assert converted_binary == self.UNICODE_EMOJI and isinstance(converted_unicode, unicode) ++ assert converted_binary == self.UNICODE_EMOJI and isinstance(converted_unicode, str) + else: + # PY3: str -> str + assert converted_unicode == self.UNICODE_EMOJI and isinstance(converted_unicode, str) +--- a/src/3rdparty/chromium/third_party/six/src/documentation/conf.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/six/src/documentation/conf.py 2025-01-16 02:26:08.609762563 +0800 +@@ -32,8 +32,8 @@ + master_doc = "index" + + # General information about the project. +-project = u"six" +-copyright = u"2010-2020, Benjamin Peterson" ++project = "six" ++copyright = "2010-2020, Benjamin Peterson" + + sys.path.append(os.path.abspath(os.path.join(".", ".."))) + from six import __version__ as six_version +@@ -174,8 +174,8 @@ + # Grouping the document tree into LaTeX files. List of tuples + # (source start file, target name, title, author, documentclass [howto/manual]). + latex_documents = [ +- ("index", "six.tex", u"six Documentation", +- u"Benjamin Peterson", "manual"), ++ ("index", "six.tex", "six Documentation", ++ "Benjamin Peterson", "manual"), + ] + + # The name of an image file (relative to this directory) to place at the top of +@@ -207,8 +207,8 @@ + # One entry per manual page. List of tuples + # (source start file, name, description, authors, manual section). + man_pages = [ +- ("index", "six", u"six Documentation", +- [u"Benjamin Peterson"], 1) ++ ("index", "six", "six Documentation", ++ ["Benjamin Peterson"], 1) + ] + + # -- Intersphinx --------------------------------------------------------------- +--- a/src/3rdparty/chromium/third_party/skia/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/PRESUBMIT.py 2025-01-16 02:26:08.609762563 +0800 +@@ -106,7 +106,7 @@ + affected_file_path = affected_file.LocalPath() + if affected_file_path.endswith('.cpp') or affected_file_path.endswith('.h'): + f = open(affected_file_path) +- for line in f.xreadlines(): ++ for line in f: + if is_comment(line) or is_empty_line(line): + continue + # The below will be the first real line after comments and newlines. +--- a/src/3rdparty/chromium/third_party/skia/bench/check_bench_regressions.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/bench/check_bench_regressions.py 2025-01-16 02:26:08.609762563 +0800 +@@ -5,14 +5,14 @@ + ''' + import bench_util + import getopt +-import httplib ++import http.client + import itertools + import json + import os + import re + import sys +-import urllib +-import urllib2 ++import urllib.request, urllib.parse, urllib.error ++import urllib.request, urllib.error, urllib.parse + import xml.sax.saxutils + + # Maximum expected number of characters we expect in an svn revision. +@@ -34,15 +34,15 @@ + def usage(): + """Prints simple usage information.""" + +- print '-a bench representation algorithm to use. ' +- print ' Defaults to "25th". See bench_util.py for details.' +- print '-b name of the builder whose bench data we are checking.' +- print '-d a directory containing bench__ files.' +- print '-e file containing expected bench builder values/ranges.' +- print ' Will raise exception if actual bench values are out of range.' +- print ' See bench_expectations_.txt for data format / examples.' +- print '-r the git commit hash or svn revision for checking ' +- print ' bench values.' ++ print('-a bench representation algorithm to use. ') ++ print(' Defaults to "25th". See bench_util.py for details.') ++ print('-b name of the builder whose bench data we are checking.') ++ print('-d a directory containing bench__ files.') ++ print('-e file containing expected bench builder values/ranges.') ++ print(' Will raise exception if actual bench values are out of range.') ++ print(' See bench_expectations_.txt for data format / examples.') ++ print('-r the git commit hash or svn revision for checking ') ++ print(' bench values.') + + + class Label: +@@ -81,7 +81,7 @@ + return (hash(self.bench) ^ + hash(self.config) ^ + hash(self.time_type) ^ +- hash(frozenset(self.settings.iteritems()))) ++ hash(frozenset(iter(self.settings.items())))) + + def create_bench_dict(revision_data_points): + """Convert current revision data into a dictionary of line data. +@@ -172,7 +172,7 @@ + outputs = [] + for i in [SLOWER, FASTER]: + if exceptions[i]: +- ratios = exceptions[i].keys() ++ ratios = list(exceptions[i].keys()) + ratios.sort(reverse=True) + li = [] + for ratio in ratios: +@@ -196,8 +196,8 @@ + opts, _ = getopt.getopt(sys.argv[1:], + "a:b:d:e:r:", + "default-setting=") +- except getopt.GetoptError, err: +- print str(err) ++ except getopt.GetoptError as err: ++ print(str(err)) + usage() + sys.exit(2) + +--- a/src/3rdparty/chromium/third_party/skia/bin/try.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/bin/try.py 2025-01-16 02:26:08.609762563 +0800 +@@ -16,7 +16,7 @@ + import subprocess + import sys + import tempfile +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + + BUCKET_SKIA_PRIMARY = 'skia/skia.primary' +@@ -80,7 +80,7 @@ + # Tools, this configuration will be present in the git config. + branch = subprocess.check_output(['git', 'branch', '--show-current']).rstrip() + if not branch: +- print 'Not on any branch; cannot trigger try jobs.' ++ print('Not on any branch; cannot trigger try jobs.') + sys.exit(1) + branch_issue_config = 'branch.%s.gerritissue' % branch + try: +@@ -89,7 +89,7 @@ + except subprocess.CalledProcessError: + # Not using Depot Tools. Find the Change-Id line in the most recent commit + # and obtain the issue number using that. +- print '"git cl issue" not set; searching for Change-Id footer.' ++ print('"git cl issue" not set; searching for Change-Id footer.') + msg = subprocess.check_output(['git', 'log', '-n1', branch]) + m = re.search('Change-Id: (I[a-f0-9]+)', msg) + if not m: +@@ -97,9 +97,9 @@ + ' found in most recent commit message.') + sys.exit(1) + url = 'https://skia-review.googlesource.com/changes/%s' % m.groups()[0] +- resp = urllib2.urlopen(url).read() ++ resp = urllib.request.urlopen(url).read() + issue = str(json.loads('\n'.join(resp.splitlines()[1:]))['_number']) +- print 'Setting "git cl issue %s"' % issue ++ print('Setting "git cl issue %s"' % issue) + subprocess.check_call(['git', 'cl', 'issue', issue]) + # Load and filter the list of jobs. + jobs = [] +@@ -107,7 +107,7 @@ + with open(tasks_json) as f: + tasks_cfg = json.load(f) + skia_primary_jobs = [] +- for k, v in tasks_cfg['jobs'].iteritems(): ++ for k, v in tasks_cfg['jobs'].items(): + skia_primary_jobs.append(k) + skia_primary_jobs.sort() + +@@ -127,24 +127,24 @@ + + # Display the list of jobs. + if len(jobs) == 0: +- print 'Found no jobs matching "%s"' % repr(args.job) ++ print('Found no jobs matching "%s"' % repr(args.job)) + sys.exit(1) + count = 0 + for bucket, job_list in jobs: + count += len(job_list) +- print 'Found %d jobs:' % count ++ print('Found %d jobs:' % count) + for bucket, job_list in jobs: +- print ' %s:' % bucket ++ print(' %s:' % bucket) + for j in job_list: +- print ' %s' % j ++ print(' %s' % j) + if args.list: + return + + if count > 1: + # Prompt before triggering jobs. +- resp = raw_input('\nDo you want to trigger these jobs? (y/n or i for ' ++ resp = input('\nDo you want to trigger these jobs? (y/n or i for ' + 'interactive): ') +- print '' ++ print('') + if resp != 'y' and resp != 'i': + sys.exit(1) + if resp == 'i': +@@ -152,7 +152,7 @@ + for bucket, job_list in jobs: + new_job_list = [] + for j in job_list: +- incl = raw_input(('Trigger %s? (y/n): ' % j)) ++ incl = input(('Trigger %s? (y/n): ' % j)) + if incl == 'y': + new_job_list.append(j) + if len(new_job_list) > 0: +--- a/src/3rdparty/chromium/third_party/skia/experimental/skottiekit/serve.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/experimental/skottiekit/serve.py 2025-01-16 02:26:08.609762563 +0800 +@@ -3,18 +3,18 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-import SimpleHTTPServer +-import SocketServer ++import http.server ++import socketserver + + PORT = 8001 + +-class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler): ++class Handler(http.server.SimpleHTTPRequestHandler): + pass + + Handler.extensions_map['.js'] = 'application/javascript' + # Without the correct MIME type, async compilation doesn't work + Handler.extensions_map['.wasm'] = 'application/wasm' + +-httpd = SocketServer.TCPServer(("", PORT), Handler) ++httpd = socketserver.TCPServer(("", PORT), Handler) + + httpd.serve_forever() +--- a/src/3rdparty/chromium/third_party/skia/experimental/tools/get_examples.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/experimental/tools/get_examples.py 2025-01-16 02:26:08.609762563 +0800 +@@ -12,8 +12,8 @@ + import sys + + if sys.version_info[0] < 3: +- from urllib2 import urlopen +- from HTMLParser import HTMLParser ++ from urllib.request import urlopen ++ from html.parser import HTMLParser + def unescape(v): return HTMLParser().unescape(v) + else: + from urllib.request import urlopen +--- a/src/3rdparty/chromium/third_party/skia/experimental/tools/mskp_parser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/experimental/tools/mskp_parser.py 2025-01-16 02:26:08.609762563 +0800 +@@ -7,7 +7,7 @@ + + # Experimental Skia Multi-Picture Doc parser. + +-from __future__ import print_function ++ + + import fileinput + import sys +--- a/src/3rdparty/chromium/third_party/skia/experimental/tools/pdf-comparison.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/experimental/tools/pdf-comparison.py 2025-01-16 02:26:08.609762563 +0800 +@@ -302,7 +302,7 @@ + sys.stdout.flush() + scores[path] = s + shard(compare_differing_pngs, differing_pngs) +- paths = sorted(scores.iterkeys(), key=lambda p: -scores[p]) ++ paths = sorted(iter(scores.keys()), key=lambda p: -scores[p]) + out.write('\n\n') + for p in paths: + pdfpath = printable_path(tmpdir + '/*/' + p.replace('.0.png', '')) +--- a/src/3rdparty/chromium/third_party/skia/experimental/wasm-skp-debugger/serve.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/experimental/wasm-skp-debugger/serve.py 2025-01-16 02:26:08.609762563 +0800 +@@ -3,18 +3,18 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-import SimpleHTTPServer +-import SocketServer ++import http.server ++import socketserver + + PORT = 8000 + +-class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler): ++class Handler(http.server.SimpleHTTPRequestHandler): + pass + + Handler.extensions_map['.js'] = 'application/javascript' + # Without the correct MIME type, async compilation doesn't work + Handler.extensions_map['.wasm'] = 'application/wasm' + +-httpd = SocketServer.TCPServer(("", PORT), Handler) ++httpd = socketserver.TCPServer(("", PORT), Handler) + + httpd.serve_forever() +--- a/src/3rdparty/chromium/third_party/skia/gn/checkdir.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/gn/checkdir.py 2025-01-16 02:26:08.609762563 +0800 +@@ -5,7 +5,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/third_party/skia/gn/checkpath.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/gn/checkpath.py 2025-01-16 02:26:08.609762563 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/third_party/skia/gn/compile_ib_files.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/gn/compile_ib_files.py 2025-01-16 02:26:08.609762563 +0800 +@@ -42,7 +42,7 @@ + try: + stdout = subprocess.check_output(ibtool_args) + except subprocess.CalledProcessError as e: +- print(e.output) ++ print((e.output)) + raise + current_section_header = None + for line in stdout.splitlines(): +--- a/src/3rdparty/chromium/third_party/skia/gn/compile_processors.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/gn/compile_processors.py 2025-01-16 02:26:08.609762563 +0800 +@@ -34,6 +34,6 @@ + subprocess.check_call(clangFormat + " --sort-includes=false -i \"" + + target + ".cpp\"", shell=True) + except subprocess.CalledProcessError as err: +- print("### Error compiling " + p + ":") +- print(err.output) ++ print(("### Error compiling " + p + ":")) ++ print((err.output)) + exit(1) +--- a/src/3rdparty/chromium/third_party/skia/gn/dehydrate_sksl.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/gn/dehydrate_sksl.py 2025-01-16 02:26:08.609762563 +0800 +@@ -14,7 +14,7 @@ + includes = sys.argv[3:] + + for inc in includes: +- print("Recompiling " + inc + "...") ++ print(("Recompiling " + inc + "...")) + try: + noExt, _ = os.path.splitext(inc) + head, tail = os.path.split(noExt) +@@ -23,6 +23,6 @@ + target = os.path.join(targetDir, tail) + subprocess.check_output([skslc, inc, target + ".dehydrated.sksl"]) + except subprocess.CalledProcessError as err: +- print("### Error compiling " + inc + ":") +- print(err.output) ++ print(("### Error compiling " + inc + ":")) ++ print((err.output)) + exit(1) +--- a/src/3rdparty/chromium/third_party/skia/gn/find_headers.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/gn/find_headers.py 2025-01-16 02:26:08.609762563 +0800 +@@ -5,7 +5,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import collections + import json +@@ -48,7 +48,7 @@ + + sources = set() + +-for target in desc_json.values(): ++for target in list(desc_json.values()): + # We'll use `public` headers if they're listed, or pull them from `sources` + # if not. GN sneaks in a default "public": "*" into the JSON if you don't + # set one explicitly. +@@ -76,7 +76,7 @@ + if key not in headers or len(include_path) < len(headers[key].include): + headers[key] = Header(source, include_path) + +-headers = sorted(headers.values(), key=lambda x: x.include) ++headers = sorted(list(headers.values()), key=lambda x: x.include) + + with open(skia_h, 'w') as f: + f.write('// skia.h generated by GN.\n') +--- a/src/3rdparty/chromium/third_party/skia/gn/find_xcode_sysroot.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/gn/find_xcode_sysroot.py 2025-01-16 02:26:08.609762563 +0800 +@@ -5,7 +5,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import subprocess + import sys +--- a/src/3rdparty/chromium/third_party/skia/gn/gn_meta_sln.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/gn/gn_meta_sln.py 2025-01-16 02:26:08.609762563 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import os + import glob +@@ -30,7 +30,7 @@ + if "') ++ print(('Usage: ' + sys.argv[0] + ' ')) + exit(1) + + json_path = sys.argv[1] +--- a/src/3rdparty/chromium/third_party/skia/gn/highest_version_dir.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/gn/highest_version_dir.py 2025-01-16 02:26:08.609762563 +0800 +@@ -5,7 +5,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import os + import re +--- a/src/3rdparty/chromium/third_party/skia/gn/is_clang.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/gn/is_clang.py 2025-01-16 02:26:08.609762563 +0800 +@@ -5,7 +5,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import subprocess + import sys +--- a/src/3rdparty/chromium/third_party/skia/gn/run_sksllex.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/gn/run_sksllex.py 2025-01-16 02:26:08.609762563 +0800 +@@ -30,5 +30,5 @@ + "/sksl/SkSLLexer.cpp\"", shell=True) + except subprocess.CalledProcessError as err: + print("### Lexer error:") +- print(err.output) ++ print((err.output)) + exit(1) +--- a/src/3rdparty/chromium/third_party/skia/gn/toolchain/num_cpus.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/gn/toolchain/num_cpus.py 2025-01-16 02:26:08.609762563 +0800 +@@ -5,7 +5,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import multiprocessing + +--- a/src/3rdparty/chromium/third_party/skia/modules/canvaskit/serve.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/modules/canvaskit/serve.py 2025-01-16 02:26:08.609762563 +0800 +@@ -3,18 +3,18 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-import SimpleHTTPServer +-import SocketServer ++import http.server ++import socketserver + + PORT = 8000 + +-class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler): ++class Handler(http.server.SimpleHTTPRequestHandler): + pass + + Handler.extensions_map['.js'] = 'application/javascript' + # Without the correct MIME type, async compilation doesn't work + Handler.extensions_map['.wasm'] = 'application/wasm' + +-httpd = SocketServer.TCPServer(("", PORT), Handler) ++httpd = socketserver.TCPServer(("", PORT), Handler) + + httpd.serve_forever() +--- a/src/3rdparty/chromium/third_party/skia/modules/pathkit/serve.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/modules/pathkit/serve.py 2025-01-16 02:26:08.609762563 +0800 +@@ -3,18 +3,18 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-import SimpleHTTPServer +-import SocketServer ++import http.server ++import socketserver + + PORT = 8000 + +-class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler): ++class Handler(http.server.SimpleHTTPRequestHandler): + pass + + Handler.extensions_map['.js'] = 'application/javascript' + # Without the correct MIME type, async compilation doesn't work + Handler.extensions_map['.wasm'] = 'application/wasm' + +-httpd = SocketServer.TCPServer(("", PORT), Handler) ++httpd = socketserver.TCPServer(("", PORT), Handler) + + httpd.serve_forever() +--- a/src/3rdparty/chromium/third_party/skia/platform_tools/android/skp_gen/android_skp_capture.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/platform_tools/android/skp_gen/android_skp_capture.py 2025-01-16 02:26:08.609762563 +0800 +@@ -6,7 +6,7 @@ + # found in the LICENSE file. + + +-from __future__ import with_statement ++ + + # Imports the monkeyrunner modules used by this program + from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice +@@ -156,21 +156,21 @@ + + for app_file in app_files: + app = load_app(app_file) +- print app.name +- print ' Package %s' % app.package ++ print(app.name) ++ print(' Package %s' % app.package) + app.launch(device) +- print ' Launched activity %s' % app.activity ++ print(' Launched activity %s' % app.activity) + + for action in app.actions: +- print ' %s' % action.__class__.__name__ ++ print(' %s' % action.__class__.__name__) + action.run(device) + + time.sleep(WAIT_FOR_SKP_CAPTURE) +- print ' Capturing SKP.' ++ print(' Capturing SKP.') + skp_file = '%s.skp' % app.name + capture_skp(skp_file, app.package, device) +- print ' Wrote SKP to %s' % skp_file +- print ++ print(' Wrote SKP to %s' % skp_file) ++ print() + app.kill() + + +--- a/src/3rdparty/chromium/third_party/skia/specs/web-img-decode/proposed/serve.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/specs/web-img-decode/proposed/serve.py 2025-01-16 02:26:08.609762563 +0800 +@@ -3,18 +3,18 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-import SimpleHTTPServer +-import SocketServer ++import http.server ++import socketserver + + PORT = 8005 + +-class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler): ++class Handler(http.server.SimpleHTTPRequestHandler): + pass + + Handler.extensions_map['.js'] = 'application/javascript' + # Without the correct MIME type, async compilation doesn't work + Handler.extensions_map['.wasm'] = 'application/wasm' + +-httpd = SocketServer.TCPServer(("", PORT), Handler) ++httpd = socketserver.TCPServer(("", PORT), Handler) + + httpd.serve_forever() +--- a/src/3rdparty/chromium/third_party/skia/third_party/icu/make_data_cpp.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/third_party/icu/make_data_cpp.py 2025-01-16 02:26:08.609762563 +0800 +@@ -10,7 +10,7 @@ + Output type is C++. + ''' + +-from __future__ import print_function ++ + + import os + import struct +@@ -35,7 +35,7 @@ + with open(dst_path, 'w') as o: + o.write(header.format(name)) + while True: +- line = ','.join('%d' % v for _, v in zip(range(8), src)) ++ line = ','.join('%d' % v for _, v in zip(list(range(8)), src)) + if not line: + break + o.write('%s%s%s\n' % (line_begin, line, line_end)) +--- a/src/3rdparty/chromium/third_party/skia/tools/BUILD_simulator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/BUILD_simulator.py 2025-01-16 02:26:08.609762563 +0800 +@@ -87,11 +87,11 @@ + 'EXTERNAL_INCLUDES': [], + } + local_names = {} +-execfile('BUILD.public', global_names, local_names) ++exec(compile(open('BUILD.public', "rb").read(), 'BUILD.public', 'exec'), global_names, local_names) + + with open('tools/BUILD.public.expected', 'w') as out: +- print >>out, "This file is auto-generated by tools/BUILD_simulator.py." +- print >>out, "It expands BUILD.public to make it easy to see changes." ++ print("This file is auto-generated by tools/BUILD_simulator.py.", file=out) ++ print("It expands BUILD.public to make it easy to see changes.", file=out) + for name, value in sorted(local_names.items()): +- print >>out, name, '= ', ++ print(name, '= ', end=' ', file=out) + pprint.pprint(value, out) +--- a/src/3rdparty/chromium/third_party/skia/tools/abandon_gerrit_cls.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/abandon_gerrit_cls.py 2025-01-16 02:26:08.609762563 +0800 +@@ -15,8 +15,8 @@ + import subprocess + import sys + +-from infra import git +-from infra import go ++from .infra import git ++from .infra import go + + + def run_abandon_cls(args): +--- a/src/3rdparty/chromium/third_party/skia/tools/add_codereview_message.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/add_codereview_message.py 2025-01-16 02:26:08.609762563 +0800 +@@ -23,7 +23,7 @@ + import optparse + import sys + +-import fix_pythonpath # pylint: disable=W0611 ++from . import fix_pythonpath # pylint: disable=W0611 + from common.py.utils import find_depot_tools # pylint: disable=W0611 + import rietveld + +--- a/src/3rdparty/chromium/third_party/skia/tools/build_command_buffer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/build_command_buffer.py 2025-01-16 02:26:08.609762563 +0800 +@@ -159,7 +159,7 @@ + if not os.path.isfile(shared_lib_dst): + sys.exit('Command buffer library not copied to ' + shared_lib_dst) + +- print('Command buffer library copied to ' + shared_lib_dst) ++ print(('Command buffer library copied to ' + shared_lib_dst)) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/skia/tools/chrome_release_branch.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/chrome_release_branch.py 2025-01-16 02:26:08.609762563 +0800 +@@ -11,8 +11,8 @@ + import subprocess + import sys + +-from infra import git +-from infra import go ++from .infra import git ++from .infra import go + + _TOOLS_DIR = os.path.dirname(os.path.abspath(__file__)) + _REPO_ROOT = os.path.realpath(os.path.join(_TOOLS_DIR, os.pardir)) +@@ -40,8 +40,8 @@ + m = re.match(SK_MILESTONE_RE, line) + if m: + return int(m.groups()[0]) +- print >> sys.stderr, ( +- 'Failed to parse %s; has the format changed?' % SK_MILESTONE_H) ++ print(( ++ 'Failed to parse %s; has the format changed?' % SK_MILESTONE_H), file=sys.stderr) + sys.exit(1) + + +@@ -72,8 +72,8 @@ + '''Create a CL to add infra support for the new branch and remove the old.''' + owner = git.git('config', 'user.email').rstrip() + if not owner: +- print >> sys.stderr, ('No configured git user; please run ' +- '"git config user.email ".') ++ print(('No configured git user; please run ' ++ '"git config user.email ".'), file=sys.stderr) + sys.exit(1) + go.mod_download() + go.install(go.INFRA_GO+'/go/supported_branches/cmd/new-branch') +@@ -89,15 +89,15 @@ + + def main(): + if len(sys.argv) != 2 or '--help' in sys.argv or '-h' in sys.argv: +- print >> sys.stderr, 'Usage: %s ' % sys.argv[0] ++ print('Usage: %s ' % sys.argv[0], file=sys.stderr) + sys.exit(1) + go.check() + branch_at = sys.argv[1] + m = get_current_milestone() + new_branch = '%s%d' % (CHROME_REF_PREFIX, m) + old_branch = '%s%d' % (CHROME_REF_PREFIX, m-SUPPORTED_CHROME_BRANCHES) +- print 'Creating branch %s and removing support (eg. CQ) for %s' % ( +- new_branch, old_branch) ++ print('Creating branch %s and removing support (eg. CQ) for %s' % ( ++ new_branch, old_branch)) + create_new_branch(new_branch, branch_at) + update_milestone(m+1) + update_infra_config(old_branch, new_branch) +--- a/src/3rdparty/chromium/third_party/skia/tools/compare_codereview.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/compare_codereview.py 2025-01-16 02:26:08.609762563 +0800 +@@ -18,11 +18,11 @@ + import os + import re + import sys +-import urllib2 +-import HTMLParser ++import urllib.request, urllib.error, urllib.parse ++import html.parser + + +-class CodeReviewHTMLParser(HTMLParser.HTMLParser): ++class CodeReviewHTMLParser(html.parser.HTMLParser): + """Parses CodeReview web page. + + Use the CodeReviewHTMLParser.parse static function to make use of +@@ -48,9 +48,9 @@ + """ + parser = CodeReviewHTMLParser() + try: +- parser.feed(urllib2.urlopen(url).read()) +- except (urllib2.URLError,): +- print >> sys.stderr, 'Error getting', url ++ parser.feed(urllib.request.urlopen(url).read()) ++ except (urllib.error.URLError,): ++ print('Error getting', url, file=sys.stderr) + return None + parser.close() + return parser.statuses +@@ -60,7 +60,7 @@ + Status = collections.namedtuple('Status', ['status', 'url']) + + def __init__(self): +- HTMLParser.HTMLParser.__init__(self) ++ html.parser.HTMLParser.__init__(self) + self._id = None + self._status = None + self._href = None +@@ -144,7 +144,7 @@ + self._href = None + + +-class BuilderHTMLParser(HTMLParser.HTMLParser): ++class BuilderHTMLParser(html.parser.HTMLParser): + """parses Trybot web pages. + + Use the BuilderHTMLParser.parse static function to make use of +@@ -169,9 +169,9 @@ + """ + parser = BuilderHTMLParser() + try: +- parser.feed(urllib2.urlopen(url).read()) +- except (urllib2.URLError,): +- print >> sys.stderr, 'Error getting', url ++ parser.feed(urllib.request.urlopen(url).read()) ++ except (urllib.error.URLError,): ++ print('Error getting', url, file=sys.stderr) + return [] + parser.close() + return parser.failure_results +@@ -179,7 +179,7 @@ + Result = collections.namedtuple('Result', ['text', 'url']) + + def __init__(self): +- HTMLParser.HTMLParser.__init__(self) ++ html.parser.HTMLParser.__init__(self) + self.failure_results = [] + self._current_failure_result = None + self._divlevel = None +@@ -327,9 +327,9 @@ + roll = CodeReviewHTMLParser.parse(roll_url) + all_bots = set(control) & set(roll) # Set intersection. + if not all_bots: +- print >> sys.stderr, ( ++ print(( + 'Error: control %s and roll %s have no common trybots.' +- % (list(control), list(roll))) ++ % (list(control), list(roll))), file=sys.stderr) + return + + control_name = '[control %s]' % control_url.split('/')[-1] +@@ -407,7 +407,7 @@ + + if __name__ == '__main__': + if len(sys.argv) < 3: +- print >> sys.stderr, __doc__ ++ print(__doc__, file=sys.stderr) + exit(1) + main(sys.argv[1], sys.argv[2], + int(os.environ.get('COMPARE_CODEREVIEW_VERBOSITY', 1))) +--- a/src/3rdparty/chromium/third_party/skia/tools/jsondiff.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/jsondiff.py 2025-01-16 02:26:08.609762563 +0800 +@@ -20,7 +20,7 @@ + import json + import os + import sys +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + # Imports from within Skia + # +@@ -53,7 +53,7 @@ + if filepath is None: + return None + elif filepath.startswith('http:') or filepath.startswith('https:'): +- return urllib2.urlopen(filepath).read() ++ return urllib.request.urlopen(filepath).read() + else: + return open(filepath, 'r').read() + +@@ -85,7 +85,7 @@ + if not all_expectations: + return result_dict + +- for test_name in all_expectations.keys(): ++ for test_name in list(all_expectations.keys()): + test_expectations = all_expectations[test_name] + allowed_digests = test_expectations[ + gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS] +@@ -125,10 +125,10 @@ + result_dict = {} + json_dict = gm_json.LoadFromString(contents) + all_result_types = json_dict[gm_json.JSONKEY_ACTUALRESULTS] +- for result_type in all_result_types.keys(): ++ for result_type in list(all_result_types.keys()): + results_of_this_type = all_result_types[result_type] + if results_of_this_type: +- for test_name in results_of_this_type.keys(): ++ for test_name in list(results_of_this_type.keys()): + digest_pair = results_of_this_type[test_name] + if (digest_pair[0] != + gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5): +@@ -142,7 +142,7 @@ + """Generate a dictionary showing diffs between old_dict and new_dict. + Any entries which are identical across them will be left out.""" + diff_dict = {} +- all_keys = set(old_dict.keys() + new_dict.keys()) ++ all_keys = set(list(old_dict.keys()) + list(new_dict.keys())) + for key in all_keys: + if old_dict.get(key) != new_dict.get(key): + new_entry = {} +--- a/src/3rdparty/chromium/third_party/skia/tools/merge_static_libs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/merge_static_libs.py 2025-01-16 02:26:08.609762563 +0800 +@@ -10,7 +10,7 @@ + import tempfile + + def _Usage(): +- print 'Usage: merge_static_libs OUTPUT_LIB INPUT_LIB [INPUT_LIB]*' ++ print('Usage: merge_static_libs OUTPUT_LIB INPUT_LIB [INPUT_LIB]*') + sys.exit(1) + + def MergeLibs(in_libs, out_lib): +--- a/src/3rdparty/chromium/third_party/skia/tools/parse_llvm_coverage.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/parse_llvm_coverage.py 2025-01-16 02:26:08.609762563 +0800 +@@ -49,8 +49,8 @@ + if len(matched) == 1: + return matched[0] + elif len(matched) > 1: +- print >> sys.stderr, ('WARNING: multiple matches for %s; skipping:\n\t%s' +- % (new_file, '\n\t'.join(matched))) ++ print(('WARNING: multiple matches for %s; skipping:\n\t%s' ++ % (new_file, '\n\t'.join(matched))), file=sys.stderr) + return None + + +@@ -143,7 +143,7 @@ + raise Exception('Invalid key/value pairs: %s' % kv_list) + + rv = {} +- for i in xrange(len(kv_list) / 2): ++ for i in range(len(kv_list) / 2): + rv[kv_list[i*2]] = kv_list[i*2+1] + return rv + +@@ -151,7 +151,7 @@ + def _get_per_file_summaries(line_by_line): + """Summarize the full line-by-line coverage report by file.""" + per_file = [] +- for filepath, lines in line_by_line.iteritems(): ++ for filepath, lines in line_by_line.items(): + total_lines = 0 + covered_lines = 0 + for _, cov, _ in lines: +--- a/src/3rdparty/chromium/third_party/skia/tools/reformat-json.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/reformat-json.py 2025-01-16 02:26:08.609762563 +0800 +@@ -39,7 +39,7 @@ + import gm_json + + def Reformat(filename): +- print 'Reformatting file %s...' % filename ++ print('Reformatting file %s...' % filename) + gm_json.WriteToFile(gm_json.LoadFromFile(filename), filename) + + def _Main(): +--- a/src/3rdparty/chromium/third_party/skia/tools/retrieve_from_googlesource.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/retrieve_from_googlesource.py 2025-01-16 02:26:08.609762563 +0800 +@@ -12,7 +12,7 @@ + + import base64 + import sys +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + + def get(repo_url, filepath): +@@ -26,12 +26,12 @@ + string; the contents of the given file. + """ + base64_url = '/'.join((repo_url, '+', 'master', filepath)) + '?format=TEXT' +- with closing(urllib2.urlopen(base64_url)) as f: ++ with closing(urllib.request.urlopen(base64_url)) as f: + return base64.b64decode(f.read()) + + + if __name__ == '__main__': + if len(sys.argv) != 3: +- print >> sys.stderr, 'Usage: %s ' % sys.argv[0] ++ print('Usage: %s ' % sys.argv[0], file=sys.stderr) + sys.exit(1) + sys.stdout.write(get(sys.argv[1], sys.argv[2])) +--- a/src/3rdparty/chromium/third_party/skia/tools/rewrite_includes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/rewrite_includes.py 2025-01-16 02:26:08.609762563 +0800 +@@ -5,7 +5,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-import StringIO ++import io + import argparse + import os + import sys +@@ -53,7 +53,7 @@ + for file_name in files: + if file_name.endswith('.h'): + if file_name in headers: +- print path, file_name, headers[file_name] ++ print(path, file_name, headers[file_name]) + assert file_name not in headers + headers[file_name] = os.path.abspath(os.path.join(path, file_name)) + +@@ -86,7 +86,7 @@ + lines = open(file_path).readlines() + + # Write it back out again line by line with substitutions for #includes. +- output = StringIO.StringIO() if args.dry_run else open(file_path, 'wb') ++ output = io.StringIO() if args.dry_run else open(file_path, 'wb') + + includes = [] + for line in lines: +@@ -109,9 +109,9 @@ + output.close() + + if need_rewriting: +- print 'Some files need rewritten #includes:' ++ print('Some files need rewritten #includes:') + for path in need_rewriting: +- print '\t' + path +- print 'To do this automatically, run' +- print 'python tools/rewrite_includes.py ' + ' '.join(need_rewriting) ++ print('\t' + path) ++ print('To do this automatically, run') ++ print('python tools/rewrite_includes.py ' + ' '.join(need_rewriting)) + sys.exit(1) +--- a/src/3rdparty/chromium/third_party/skia/tools/sanitize_source_files.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/sanitize_source_files.py 2025-01-16 02:26:08.609762563 +0800 +@@ -6,7 +6,7 @@ + """Module that sanitizes source files with specified modifiers.""" + + +-import commands ++import subprocess + import os + import sys + +@@ -72,7 +72,7 @@ + f.write(new_content) + finally: + f.close() +- print 'Made changes to %s' % full_item_path ++ print('Made changes to %s' % full_item_path) + + elif item not in _SUBDIRS_TO_IGNORE: + # Item is a directory recursively call the method. +@@ -86,21 +86,21 @@ + """Strips out trailing whitespaces from the specified line.""" + stripped_line = line.rstrip() + '\n' + if line != stripped_line: +- print 'Removing trailing whitespace in %s:%s' % (file_path, line_number) ++ print('Removing trailing whitespace in %s:%s' % (file_path, line_number)) + return stripped_line + + + def CrlfReplacer(line, file_path, line_number): + """Replaces CRLF with LF.""" + if '\r\n' in line: +- print 'Replacing CRLF with LF in %s:%s' % (file_path, line_number) ++ print('Replacing CRLF with LF in %s:%s' % (file_path, line_number)) + return line.replace('\r\n', '\n') + + + def TabReplacer(line, file_path, line_number): + """Replaces Tabs with 4 whitespaces.""" + if '\t' in line: +- print 'Replacing Tab with whitespace in %s:%s' % (file_path, line_number) ++ print('Replacing Tab with whitespace in %s:%s' % (file_path, line_number)) + return line.replace('\t', ' ') + + +@@ -119,16 +119,16 @@ + if file_content and (file_content[-1] != '\n' or file_content[-2:-1] == '\n'): + file_content = file_content.rstrip() + file_content += '\n' +- print 'Added exactly one newline to %s' % file_path ++ print('Added exactly one newline to %s' % file_path) + return file_content + + + def SvnEOLChecker(file_content, file_path): + """Sets svn:eol-style property to LF.""" +- output = commands.getoutput( ++ output = subprocess.getoutput( + 'svn propget svn:eol-style %s' % file_path) + if output != 'LF': +- print 'Setting svn:eol-style property to LF in %s' % file_path ++ print('Setting svn:eol-style property to LF in %s' % file_path) + os.system('svn ps svn:eol-style LF %s' % file_path) + return file_content + +--- a/src/3rdparty/chromium/third_party/skia/tools/test_all.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/test_all.py 2025-01-16 02:26:08.609762563 +0800 +@@ -17,7 +17,7 @@ + suite = unittest.TestLoader().discover(os.path.dirname(__file__), + pattern='*_test.py') + results = unittest.TextTestRunner(verbosity=2).run(suite) +- print repr(results) ++ print(repr(results)) + if not results.wasSuccessful(): + raise Exception('failed one or more unittests') + +--- a/src/3rdparty/chromium/third_party/skia/tools/android/measure_fps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/android/measure_fps.py 2025-01-16 02:26:08.609762563 +0800 +@@ -35,7 +35,7 @@ + endframe = query_surfaceflinger_frame_count() + endtime = time.time() + fps = (endframe - startframe) / (endtime - starttime) +- print "%.2f" % fps ++ print("%.2f" % fps) + + startframe = endframe + starttime = endtime +--- a/src/3rdparty/chromium/third_party/skia/tools/android/upload_to_android.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/android/upload_to_android.py 2025-01-16 02:26:08.609762563 +0800 +@@ -33,7 +33,7 @@ + import os + import subprocess + import stat +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + + REPO_TOOL_URL = 'https://storage.googleapis.com/git-repo-downloads/repo' +@@ -45,7 +45,7 @@ + + + def get_change_details(change_num): +- response = urllib2.urlopen('%s/changes/%s/detail?o=ALL_REVISIONS' % ( ++ response = urllib.request.urlopen('%s/changes/%s/detail?o=ALL_REVISIONS' % ( + SKIA_GERRIT_INSTANCE, change_num), timeout=5) + content = response.read() + # Remove the first line which contains ")]}'\n". +@@ -54,18 +54,18 @@ + + def init_work_dir(work_dir): + if not os.path.isdir(work_dir): +- print 'Creating %s' % work_dir ++ print('Creating %s' % work_dir) + os.makedirs(work_dir) + + # Ensure the repo tool exists in the work_dir. + repo_dir = os.path.join(work_dir, 'bin') + repo_binary = os.path.join(repo_dir, 'repo') + if not os.path.isdir(repo_dir): +- print 'Creating %s' % repo_dir ++ print('Creating %s' % repo_dir) + os.makedirs(repo_dir) + if not os.path.exists(repo_binary): +- print 'Downloading %s from %s' % (repo_binary, REPO_TOOL_URL) +- response = urllib2.urlopen(REPO_TOOL_URL, timeout=5) ++ print('Downloading %s from %s' % (repo_binary, REPO_TOOL_URL)) ++ response = urllib.request.urlopen(REPO_TOOL_URL, timeout=5) + content = response.read() + with open(repo_binary, 'w') as f: + f.write(content) +@@ -76,24 +76,24 @@ + # Create android-repo directory in the work_dir. + android_dir = os.path.join(work_dir, 'android-repo') + if not os.path.isdir(android_dir): +- print 'Creating %s' % android_dir ++ print('Creating %s' % android_dir) + os.makedirs(android_dir) + +- print """ ++ print(""" + + About to run repo init. If it hangs asking you to run glogin then please: + * Exit the script (ctrl-c). + * Run 'glogin'. + * Re-run the script. + +-""" ++""") + os.chdir(android_dir) + subprocess.check_call( + '%s init -u %s/a/platform/manifest -g "all,-notdefault,-darwin" ' + '-b master --depth=1' + % (repo_binary, ANDROID_REPO_URL), shell=True) + +- print 'Syncing the Android checkout at %s' % android_dir ++ print('Syncing the Android checkout at %s' % android_dir) + subprocess.check_call('%s sync %s tools/repohooks -j 32 -c' % ( + repo_binary, SKIA_PATH_IN_ANDROID), shell=True) + +@@ -214,7 +214,7 @@ + # Upload to Android Gerrit. + subprocess.check_call('%s upload --verify' % repo_binary, shell=True) + +- print modifier.get_user_msg() ++ print(modifier.get_user_msg()) + finally: + # Abandon repo branch. + subprocess.call('%s abandon %s' % (repo_binary, REPO_BRANCH_NAME), +--- a/src/3rdparty/chromium/third_party/skia/tools/calmbench/ab.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/calmbench/ab.py 2025-01-16 02:26:08.609762563 +0800 +@@ -188,7 +188,7 @@ + + if len(exceptions): + for exc in exceptions: +- print exc ++ print(exc) + raise exceptions[0] + + +@@ -214,7 +214,7 @@ + if should_run: + if i > 0: + timesLock.acquire() +- print "Init run %d for %s..." % (i, name) ++ print("Init run %d for %s..." % (i, name)) + timesLock.release() + subprocess.check_call(["touch", file_i]) + with open(file_i, 'w') as f: +@@ -250,7 +250,7 @@ + # test in the future. + def get_suspects(): + suspects = [] +- for bench in timesA.keys(): ++ for bench in list(timesA.keys()): + if bench not in timesB: + continue + lowerA, upperA = get_lower_upper(timesA[bench]) +@@ -268,7 +268,7 @@ + + + def suspects_arg(suspects): +- patterns = map(process_bench_pattern, suspects) ++ patterns = list(map(process_bench_pattern, suspects)) + return " --match " + (" ".join(patterns)) + + +@@ -316,7 +316,7 @@ + if (len(suspects) == 0 or it - last_unchanged_iter >= TERM): + break + +- print "Number of suspects at iteration %d: %d" % (it, len(suspects)) ++ print("Number of suspects at iteration %d: %d" % (it, len(suspects))) + threadRunner = ThreadRunner(args) + for j in range(1, max(1, args.threads / 2) + 1): + run(args, threadRunner, args.a, args.nano_a, +@@ -328,19 +328,19 @@ + + suspects = get_suspects() + if len(suspects) == 0: +- print ("%s and %s does not seem to have significant " + \ +- "performance differences.") % (args.a, args.b) ++ print(("%s and %s does not seem to have significant " + \ ++ "performance differences.") % (args.a, args.b)) + else: + suspects.sort(key = regression) +- print "%s (compared to %s) is likely" % (args.a, args.b) ++ print("%s (compared to %s) is likely" % (args.a, args.b)) + for suspect in suspects: + r = regression(suspect) + if r < 1: +- print "\033[31m %s slower in %s\033[0m" % \ +- (format_r(1/r), suspect) ++ print("\033[31m %s slower in %s\033[0m" % \ ++ (format_r(1/r), suspect)) + else: +- print "\033[32m %s faster in %s\033[0m" % \ +- (format_r(r), suspect) ++ print("\033[32m %s faster in %s\033[0m" % \ ++ (format_r(r), suspect)) + + with open("%s/bench_%s_%s.json" % (args.outdir, args.a, args.b), 'w') as f: + results = {} +@@ -371,13 +371,13 @@ + keys[args.keys[i * 2]] = args.keys[i * 2 + 1] + output["key"] = keys + f.write(json.dumps(output, indent=4)) +- print ("\033[36mJSON results available in %s\033[0m" % f.name) ++ print(("\033[36mJSON results available in %s\033[0m" % f.name)) + + with open("%s/bench_%s_%s.csv" % (args.outdir, args.a, args.b), 'w') as out: + out.write(("bench, significant?, raw regresion, " + + "%(A)s quantile (ns), %(B)s quantile (ns), " + + "%(A)s (ns), %(B)s (ns)\n") % {'A': args.a, 'B': args.b}) +- for bench in suspects + timesA.keys(): ++ for bench in suspects + list(timesA.keys()): + if (bench not in timesA or bench not in timesB): + continue + ta = timesA[bench] +@@ -388,20 +388,20 @@ + ' '.join(map(str, get_lower_upper(tb))) + ", " + + ("%s, %s\n" % (' '.join(map(str, ta)), ' '.join(map(str, tb)))) + ) +- print (("\033[36m" + ++ print((("\033[36m" + + "Compared %d benches. " + + "%d of them seem to be significantly differrent." + + "\033[0m") % +- (len([x for x in timesA if x in timesB]), len(suspects))) +- print ("\033[36mPlease see detailed bench results in %s\033[0m" % +- out.name) ++ (len([x for x in timesA if x in timesB]), len(suspects)))) ++ print(("\033[36mPlease see detailed bench results in %s\033[0m" % ++ out.name)) + + + if __name__ == "__main__": + try: + test() + except Exception as e: +- print e +- print HELP ++ print(e) ++ print(HELP) + traceback.print_exc() + raise e +--- a/src/3rdparty/chromium/third_party/skia/tools/calmbench/calmbench.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/calmbench/calmbench.py 2025-01-16 02:26:08.609762563 +0800 +@@ -38,7 +38,7 @@ + + def parse_args(): + if len(sys.argv) <= 1 or sys.argv[1] == '-h' or sys.argv[1] == '--help': +- print README ++ print(README) + + parser = ArgumentParser( + description='Noiselessly (hence calm) becnhmark a git branch against ' + +@@ -125,7 +125,7 @@ + + + def compile_branch(args, branch): +- print "Compiling branch %s" % args.branch ++ print("Compiling branch %s" % args.branch) + + commands = [ + ['git', 'checkout', branch], +@@ -137,14 +137,14 @@ + + + def compile_modified(args): +- print "Compiling modified code" ++ print("Compiling modified code") + subprocess.check_call( + ['ninja', '-C', args.ninjadir, 'nanobench'], cwd=args.skiadir) + subprocess.check_call( + ['cp', args.ninjadir + '/nanobench', nano_path(args, args.branch)], + cwd=args.skiadir) + +- print "Compiling stashed code" ++ print("Compiling stashed code") + stash_output = subprocess.check_output(['git', 'stash'], cwd=args.skiadir) + if 'No local changes to save' in stash_output: + subprocess.check_call(['git', 'reset', 'HEAD^', '--soft']) +@@ -209,7 +209,7 @@ + try: + p.terminate() + except OSError as e: +- print e ++ print(e) + + + if __name__ == "__main__": +--- a/src/3rdparty/chromium/third_party/skia/tools/copyright/main.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/copyright/main.py 2025-01-16 02:26:08.609762563 +0800 +@@ -75,7 +75,7 @@ + def ReportWarning(text): + """Report a warning, but continue. + """ +- print 'warning: %s' % text ++ print('warning: %s' % text) + + def ReportError(text): + """Report an error and raise an exception. +--- a/src/3rdparty/chromium/third_party/skia/tools/fonts/generate_fir_coeff.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/fonts/generate_fir_coeff.py 2025-01-16 02:26:08.609762563 +0800 +@@ -98,7 +98,7 @@ + delta = 1 + + if delta: +- print "Initial sum is 0x%0.2X, adjusting." % (coeffs_rounded_sum,) ++ print("Initial sum is 0x%0.2X, adjusting." % (coeffs_rounded_sum,)) + coeff_diff = [(coeff_rounded - coeff) * delta + for coeff, coeff_rounded in zip(coeffs, coeffs_rounded)] + +@@ -119,15 +119,15 @@ + # * an awful lot of the curve is out side our sample + # either is pretty bad, and probably means the results will not be useful. + num_elements_to_force_round = abs(coeffs_rounded_sum - target_sum) +- for i in xrange(num_elements_to_force_round): +- print "Adding %d to index %d to force round %f." % ( +- delta, coeff_pkg[i].index, coeffs[coeff_pkg[i].index]) ++ for i in range(num_elements_to_force_round): ++ print("Adding %d to index %d to force round %f." % ( ++ delta, coeff_pkg[i].index, coeffs[coeff_pkg[i].index])) + coeffs_rounded[coeff_pkg[i].index] += delta + +- print "Prepending %d 0x00 for allignment." % (sample_align,) ++ print("Prepending %d 0x00 for allignment." % (sample_align,)) + coeffs_rounded_aligned = ([0] * int(sample_align)) + coeffs_rounded + +- print ', '.join(["0x%0.2X" % coeff_rounded +- for coeff_rounded in coeffs_rounded_aligned]) +- print sum(coeffs), hex(sum(coeffs_rounded)) +- print ++ print(', '.join(["0x%0.2X" % coeff_rounded ++ for coeff_rounded in coeffs_rounded_aligned])) ++ print(sum(coeffs), hex(sum(coeffs_rounded))) ++ print() +--- a/src/3rdparty/chromium/third_party/skia/tools/gdb/bitmap.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/gdb/bitmap.py 2025-01-16 02:26:08.609762563 +0800 +@@ -80,9 +80,9 @@ + # Fails on premultiplied alpha, it cannot convert to RGB. + image.show() + else: +- print ("Need to add support for %s %s." % ( ++ print(("Need to add support for %s %s." % ( + str(ColorType(int(color_type))), + str(AlphaType(int(alpha_type))) +- )) ++ ))) + + sk_bitmap() +--- a/src/3rdparty/chromium/third_party/skia/tools/infra/go.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/infra/go.py 2025-01-16 02:26:08.609762563 +0800 +@@ -18,7 +18,7 @@ + def check(): + '''Verify that golang is properly installed. If not, exit with an error.''' + def _fail(msg): +- print >> sys.stderr, msg ++ print(msg, file=sys.stderr) + sys.exit(1) + + try: +--- a/src/3rdparty/chromium/third_party/skia/tools/malisc/malisc.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/malisc/malisc.py 2025-01-16 02:26:08.609762563 +0800 +@@ -8,7 +8,7 @@ + import sys + + if len(sys.argv) != 3: +- print sys.argv[0], ' ' ++ print(sys.argv[0], ' ') + sys.exit(1) + + compiler = sys.argv[1] +@@ -34,7 +34,7 @@ + inst = line.split(':')[1].split() + stats[basename][ext] = inst + +-for k, v in stats.iteritems(): ++for k, v in stats.items(): + gl = v.get('.frag', ['', '', '']) + vk = v.get('.spv', ['', '', '']) +- print '{0},{1},{2},{3},{4},{5},{6}'.format(k, gl[0], gl[1], gl[2], vk[0], vk[1], vk[2]) ++ print('{0},{1},{2},{3},{4},{5},{6}'.format(k, gl[0], gl[1], gl[2], vk[0], vk[1], vk[2])) +--- a/src/3rdparty/chromium/third_party/skia/tools/rebaseline/toggle_legacy_flag.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/rebaseline/toggle_legacy_flag.py 2025-01-16 02:26:08.609762563 +0800 +@@ -48,8 +48,8 @@ + stash_output = subprocess.check_output(['git', 'stash']).strip() + + if branch != "master" or stash_output != EXPECTED_STASH_OUT: +- print ("Please checkout a clean master branch at your chromium repo (%s) " +- "before running this script") % args.chromium_dir ++ print(("Please checkout a clean master branch at your chromium repo (%s) " ++ "before running this script") % args.chromium_dir) + if stash_output != EXPECTED_STASH_OUT: + subprocess.check_call(['git', 'stash', 'pop']) + exit(1) +@@ -113,7 +113,7 @@ + + def main(): + if len(sys.argv) <= 1 or sys.argv[1] == '-h' or sys.argv[1] == '--help': +- print README ++ print(README) + + parser = argparse.ArgumentParser() + parser.add_argument( +@@ -131,12 +131,12 @@ + args = parser.parse_args() + + if not args.android_dir and not args.chromium_dir and not args.google3: +- print """ ++ print(""" + Nothing to do. Please give me at least one of these three arguments: + -a (--android-dir) + -c (--chromium-dir) + -g (--google3) +-""" ++""") + exit(1) + + end_message = "CLs generated. Now go review and land them:\n" +@@ -152,7 +152,7 @@ + toggle_android(args) + end_message += " * http://goto.google.com/androidcl\n" + +- print end_message ++ print(end_message) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/skia/tools/skottie-wasm-perf/parse_perf_csvs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skottie-wasm-perf/parse_perf_csvs.py 2025-01-16 02:26:08.613012508 +0800 +@@ -16,6 +16,7 @@ + import optparse + import sys + import re ++from functools import reduce + + + MISSING_STR = 'N/A' +@@ -27,7 +28,7 @@ + with open(csv_file, 'rb') as f: + csv_reader = csv.reader(f, delimiter=',') + # First row should contain headers. Validate that it does. +- header_row = csv_reader.next() ++ header_row = next(csv_reader) + if header_row[0] != 'id': + raise Exception('%s in unexpected format' % csv_file) + p = re.compile('^.*,test=(.*),$') +@@ -48,7 +49,7 @@ + + def combine_results(d1, d2): + test_to_result = {} +- for test1, v1 in d1.items(): ++ for test1, v1 in list(d1.items()): + v2 = d2.get(test1, MISSING_STR) + perc_diff = MISSING_STR + if v2 != MISSING_STR: +@@ -64,7 +65,7 @@ + test_to_result[test1] = result + + # Also add keys in d2 and not d1. +- for test2, v2 in d2.items(): ++ for test2, v2 in list(d2.items()): + if test2 in test_to_result: + continue + test_to_result[test2] = { +@@ -82,7 +83,7 @@ + fieldnames = ['test_name', 'csv1', 'csv2', 'perc_diff'] + writer = csv.DictWriter(f, fieldnames=fieldnames) + writer.writeheader() +- tests = output_dict.keys() ++ tests = list(output_dict.keys()) + tests.sort() + for test in tests: + writer.writerow(output_dict[test]) +--- a/src/3rdparty/chromium/third_party/skia/tools/skp/generate_page_set.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skp/generate_page_set.py 2025-01-16 02:26:08.613012508 +0800 +@@ -16,10 +16,10 @@ + def main(): + created_page_sets = [] + while True: +- user_agent = raw_input('user agent? (mobile/desktop/tablet): ') +- url_name = raw_input('URL name? (eg: google): ') +- url = raw_input('URL? (eg: http://www.google.com): ') +- comment = raw_input('Reason for adding the URL? (eg: go/skia-skps-3-2019): ') ++ user_agent = input('user agent? (mobile/desktop/tablet): ') ++ url_name = input('URL name? (eg: google): ') ++ url = input('URL? (eg: http://www.google.com): ') ++ comment = input('Reason for adding the URL? (eg: go/skia-skps-3-2019): ') + + with open(PAGE_SET_TEMPLATE) as f: + t = jinja2.Template(f.read()) +@@ -35,15 +35,15 @@ + with open(page_set_path, 'w') as f: + f.write(t.render(**subs)) + created_page_sets.append(page_set_path) +- print '\nPage set has been created in %s\n\n' % page_set_path ++ print('\nPage set has been created in %s\n\n' % page_set_path) + +- keep_going = raw_input('Do you have more page sets to create? (y/n)') ++ keep_going = input('Do you have more page sets to create? (y/n)') + if keep_going != 'y': + break + +- print '\n\nSummarizing all created page sets:' ++ print('\n\nSummarizing all created page sets:') + for page_set_path in created_page_sets: +- print '* %s' % page_set_path ++ print('* %s' % page_set_path) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/skia/tools/skp/webpages_playback.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skp/webpages_playback.py 2025-01-16 02:26:08.613012508 +0800 +@@ -241,7 +241,7 @@ + page_set_dir = os.path.dirname(page_set) + + if self._IsChromiumPageSet(page_set): +- print 'Using Chromium\'s captured archives for Chromium\'s page sets.' ++ print('Using Chromium\'s captured archives for Chromium\'s page sets.') + elif self._record: + # Create an archive of the specified webpages if '--record=True' is + # specified. +@@ -306,12 +306,12 @@ + + for _ in range(RETRY_RUN_MEASUREMENT_COUNT): + try: +- print '\n\n=======Capturing SKP of %s=======\n\n' % page_set ++ print('\n\n=======Capturing SKP of %s=======\n\n' % page_set) + subprocess.check_call(' '.join(run_benchmark_cmd), shell=True) + except subprocess.CalledProcessError: + # There was a failure continue with the loop. + traceback.print_exc() +- print '\n\n=======Retrying %s=======\n\n' % page_set ++ print('\n\n=======Retrying %s=======\n\n' % page_set) + time.sleep(10) + continue + +@@ -321,7 +321,7 @@ + except InvalidSKPException: + # There was a failure continue with the loop. + traceback.print_exc() +- print '\n\n=======Retrying %s=======\n\n' % page_set ++ print('\n\n=======Retrying %s=======\n\n' % page_set) + time.sleep(10) + continue + +@@ -332,8 +332,8 @@ + # break out of the loop. + raise Exception('run_benchmark failed for page_set: %s' % page_set) + +- print '\n\n=======Capturing SKP files took %s seconds=======\n\n' % ( +- time.time() - start_time) ++ print('\n\n=======Capturing SKP files took %s seconds=======\n\n' % ( ++ time.time() - start_time)) + + if self._skia_tools: + render_pictures_cmd = [ +@@ -346,18 +346,18 @@ + ] + + for tools_cmd in (render_pictures_cmd, render_pdfs_cmd): +- print '\n\n=======Running %s=======' % ' '.join(tools_cmd) ++ print('\n\n=======Running %s=======' % ' '.join(tools_cmd)) + subprocess.check_call(tools_cmd) + + if not self._non_interactive: +- print '\n\n=======Running debugger=======' ++ print('\n\n=======Running debugger=======') + os.system('%s %s' % (os.path.join(self._skia_tools, 'debugger'), + self._local_skp_dir)) + +- print '\n\n' ++ print('\n\n') + + if self._upload: +- print '\n\n=======Uploading to %s=======\n\n' % self.gs.target_type() ++ print('\n\n=======Uploading to %s=======\n\n' % self.gs.target_type()) + # Copy the directory structure in the root directory into Google Storage. + dest_dir_name = ROOT_PLAYBACK_DIR_NAME + if self._alternate_upload_dir: +@@ -366,29 +366,29 @@ + self.gs.upload_dir_contents( + self._local_skp_dir, dest_dir=dest_dir_name) + +- print '\n\n=======New SKPs have been uploaded to %s =======\n\n' % ( ++ print('\n\n=======New SKPs have been uploaded to %s =======\n\n' % ( + posixpath.join(self.gs.target_name(), dest_dir_name, +- SKPICTURES_DIR_NAME)) ++ SKPICTURES_DIR_NAME))) + + else: +- print '\n\n=======Not Uploading to %s=======\n\n' % self.gs.target_type() +- print 'Generated resources are available in %s\n\n' % ( +- self._local_skp_dir) ++ print('\n\n=======Not Uploading to %s=======\n\n' % self.gs.target_type()) ++ print('Generated resources are available in %s\n\n' % ( ++ self._local_skp_dir)) + + if self._upload_to_partner_bucket: +- print '\n\n=======Uploading to Partner bucket %s =======\n\n' % ( +- PARTNERS_GS_BUCKET) ++ print('\n\n=======Uploading to Partner bucket %s =======\n\n' % ( ++ PARTNERS_GS_BUCKET)) + partner_gs = GoogleStorageDataStore(PARTNERS_GS_BUCKET) + timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%d') + upload_dir = posixpath.join(SKPICTURES_DIR_NAME, timestamp) + try: + partner_gs.delete_path(upload_dir) + except subprocess.CalledProcessError: +- print 'Cannot delete %s because it does not exist yet.' % upload_dir +- print 'Uploading %s to %s' % (self._local_skp_dir, upload_dir) ++ print('Cannot delete %s because it does not exist yet.' % upload_dir) ++ print('Uploading %s to %s' % (self._local_skp_dir, upload_dir)) + partner_gs.upload_dir_contents(self._local_skp_dir, upload_dir) +- print '\n\n=======New SKPs have been uploaded to %s =======\n\n' % ( +- posixpath.join(partner_gs.target_name(), upload_dir)) ++ print('\n\n=======New SKPs have been uploaded to %s =======\n\n' % ( ++ posixpath.join(partner_gs.target_name(), upload_dir))) + + return 0 + +@@ -442,7 +442,7 @@ + largest_skp = max(glob.glob(os.path.join(site, '*.skp')), + key=lambda path: os.stat(path).st_size) + dest = os.path.join(self._local_skp_dir, filename) +- print 'Moving', largest_skp, 'to', dest ++ print('Moving', largest_skp, 'to', dest) + shutil.move(largest_skp, dest) + self._skp_files.append(filename) + shutil.rmtree(site) +--- a/src/3rdparty/chromium/third_party/skia/tools/skpbench/_adb.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skpbench/_adb.py 2025-01-16 02:26:08.613012508 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + import re + import time + import subprocess +--- a/src/3rdparty/chromium/third_party/skia/tools/skpbench/_adb_path.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skpbench/_adb_path.py 2025-01-16 02:26:08.613012508 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from _adb import Adb ++from ._adb import Adb + import re + import subprocess + +--- a/src/3rdparty/chromium/third_party/skia/tools/skpbench/_benchresult.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skpbench/_benchresult.py 2025-01-16 02:26:08.613012508 +0800 +@@ -5,7 +5,7 @@ + + """Parses an skpbench result from a line of output text.""" + +-from __future__ import print_function ++ + import re + import sys + +--- a/src/3rdparty/chromium/third_party/skia/tools/skpbench/_hardware_android.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skpbench/_hardware_android.py 2025-01-16 02:26:08.613012508 +0800 +@@ -3,8 +3,8 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function +-from _hardware import Hardware ++ ++from ._hardware import Hardware + import sys + import time + +--- a/src/3rdparty/chromium/third_party/skia/tools/skpbench/_hardware_nexus_6p.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skpbench/_hardware_nexus_6p.py 2025-01-16 02:26:08.613012508 +0800 +@@ -3,8 +3,8 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from _hardware import HardwareException, Expectation +-from _hardware_android import HardwareAndroid ++from ._hardware import HardwareException, Expectation ++from ._hardware_android import HardwareAndroid + + CPU_CLOCK_RATE = 1728000 + GPU_CLOCK_RATE = 510000000 +@@ -82,9 +82,9 @@ + Expectation(str, exact_value='4-6', name='online cpus'), + Expectation(int, max_value=88, name='tsens_tz_sensor13'), + Expectation(int, max_value=88, name='tsens_tz_sensor14'), +- Expectation(long, min_value=(GPU_CLOCK_RATE - 5000), ++ Expectation(int, min_value=(GPU_CLOCK_RATE - 5000), + max_value=(GPU_CLOCK_RATE + 5000), name='gpu clock rate'), +- Expectation(long, min_value=647995000, max_value=648007500, ++ Expectation(int, min_value=647995000, max_value=648007500, + name='ddr clock rate', sleeptime=10)] + \ + [Expectation(int, exact_value=CPU_CLOCK_RATE, name='cpu_%i clock rate' %i) + for i in range(4, 7)] +--- a/src/3rdparty/chromium/third_party/skia/tools/skpbench/_hardware_pixel.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skpbench/_hardware_pixel.py 2025-01-16 02:26:08.613012508 +0800 +@@ -3,8 +3,8 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from _hardware import Expectation +-from _hardware_android import HardwareAndroid ++from ._hardware import Expectation ++from ._hardware_android import HardwareAndroid + + CPU_CLOCK_RATE = 1670400 + GPU_CLOCK_RATE = 315000000 +@@ -82,7 +82,7 @@ + Expectation(str, exact_value='2-3', name='online cpus')] + \ + [Expectation(int, exact_value=CPU_CLOCK_RATE, name='cpu_%i clock rate' %i) + for i in range(2, 4)] + \ +- [Expectation(long, min_value=902390000, max_value=902409999, ++ [Expectation(int, min_value=902390000, max_value=902409999, + name='measured ddr clock', sleeptime=10), + Expectation(int, max_value=41000, name='pm8994_tz temperature'), + Expectation(int, max_value=40, name='msm_therm temperature')] +--- a/src/3rdparty/chromium/third_party/skia/tools/skpbench/_hardware_pixel2.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skpbench/_hardware_pixel2.py 2025-01-16 02:26:08.613012508 +0800 +@@ -3,8 +3,8 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from _hardware import Expectation +-from _hardware_android import HardwareAndroid ++from ._hardware import Expectation ++from ._hardware_android import HardwareAndroid + + CPU_CLOCK_RATE = 2035200 + MEM_CLOCK_RATE = 13763 +@@ -109,7 +109,7 @@ + Expectation(str, exact_value='4-6', name='online cpus')] + \ + [Expectation(int, exact_value=CPU_CLOCK_RATE, name='cpu_%i clock rate' %i) + for i in range(4, 7)] + \ +- [Expectation(long, min_value=902390000, max_value=902409999, ++ [Expectation(int, min_value=902390000, max_value=902409999, + name='measured ddr clock', sleeptime=10), + Expectation(int, max_value=750, name='gpu temperature'), + Expectation(int, exact_value=1, name='gpu throttling'), +--- a/src/3rdparty/chromium/third_party/skia/tools/skpbench/_hardware_pixel_c.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skpbench/_hardware_pixel_c.py 2025-01-16 02:26:08.613012508 +0800 +@@ -3,8 +3,8 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from _hardware import HardwareException, Expectation +-from _hardware_android import HardwareAndroid ++from ._hardware import HardwareException, Expectation ++from ._hardware_android import HardwareAndroid + + CPU_CLOCK_RATE = 1326000 + # If you run adb cat /sys/devices/57000000.gpu/pstate it shows all +--- a/src/3rdparty/chromium/third_party/skia/tools/skpbench/sheet.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skpbench/sheet.py 2025-01-16 02:26:08.613012508 +0800 +@@ -5,8 +5,8 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function +-from _benchresult import BenchResult ++ ++from ._benchresult import BenchResult + from argparse import ArgumentParser + from collections import defaultdict, namedtuple + from datetime import datetime +@@ -14,9 +14,10 @@ + import os + import sys + import tempfile +-import urllib +-import urlparse ++import urllib.request, urllib.parse, urllib.error ++import urllib.parse + import webbrowser ++from functools import reduce + + __argparse = ArgumentParser(description=""" + +@@ -60,7 +61,7 @@ + if not qualifiers: + return name + else: +- args = ('%s=%s' % (k,v) for k,v in qualifiers.iteritems()) ++ args = ('%s=%s' % (k,v) for k,v in qualifiers.items()) + return '%s (%s)' % (name, ' '.join(args)) + + class Parser: +@@ -82,7 +83,7 @@ + if not fullconfig in self.fullconfigs: + self.fullconfigs.append(fullconfig) + +- for qualifier, value in self.sheet_qualifiers.items(): ++ for qualifier, value in list(self.sheet_qualifiers.items()): + if value is None: + self.sheet_qualifiers[qualifier] = match.get_string(qualifier) + elif value != match.get_string(qualifier): +@@ -103,7 +104,7 @@ + outfile.write('\n') + + # Write the rows. +- for bench, row in self.rows.iteritems(): ++ for bench, row in self.rows.items(): + outfile.write('%s,' % bench) + for fullconfig in self.fullconfigs: + if fullconfig in row: +@@ -120,10 +121,10 @@ + if len(self.rows) > 1: + outfile.write('\n') + self._print_computed_row('MEAN', +- lambda col: reduce(operator.add, col.values()) / len(col), ++ lambda col: reduce(operator.add, list(col.values())) / len(col), + outfile=outfile) + self._print_computed_row('GEOMEAN', +- lambda col: reduce(operator.mul, col.values()) ** (1.0 / len(col)), ++ lambda col: reduce(operator.mul, list(col.values())) ** (1.0 / len(col)), + outfile=outfile) + + def _print_computed_row(self, name, func, outfile=sys.stdout): +@@ -157,7 +158,7 @@ + pathname = os.path.join(dirname, basename) + with open(pathname, mode='w') as tmpfile: + parser.print_csv(outfile=tmpfile) +- fileuri = urlparse.urljoin('file:', urllib.pathname2url(pathname)) ++ fileuri = urllib.parse.urljoin('file:', urllib.request.pathname2url(pathname)) + print('opening %s' % fileuri) + webbrowser.open(fileuri) + +--- a/src/3rdparty/chromium/third_party/skia/tools/skpbench/skiaperf.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skpbench/skiaperf.py 2025-01-16 02:26:08.613012508 +0800 +@@ -5,8 +5,8 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function +-from _benchresult import BenchResult ++ ++from ._benchresult import BenchResult + from argparse import ArgumentParser + from collections import defaultdict + import json +--- a/src/3rdparty/chromium/third_party/skia/tools/skpbench/skpbench.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skpbench/skpbench.py 2025-01-16 02:26:08.613012508 +0800 +@@ -5,10 +5,10 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function +-from _adb import Adb +-from _benchresult import BenchResult +-from _hardware import HardwareException, Hardware ++ ++from ._adb import Adb ++from ._benchresult import BenchResult ++from ._hardware import HardwareException, Hardware + from argparse import ArgumentParser + from multiprocessing import Queue + from threading import Thread, Timer +@@ -91,10 +91,10 @@ + + FLAGS = __argparse.parse_args() + if FLAGS.adb: +- import _adb_path as _path ++ from . import _adb_path as _path + _path.init(FLAGS.device_serial, FLAGS.adb_binary) + else: +- import _os_path as _path ++ from . import _os_path as _path + + def dump_commandline_if_verbose(commandline): + if FLAGS.verbosity >= 5: +@@ -340,19 +340,19 @@ + echo=(FLAGS.verbosity >= 5)) + model = adb.check('getprop ro.product.model').strip() + if model == 'Pixel C': +- from _hardware_pixel_c import HardwarePixelC ++ from ._hardware_pixel_c import HardwarePixelC + hardware = HardwarePixelC(adb) + elif model == 'Pixel': +- from _hardware_pixel import HardwarePixel ++ from ._hardware_pixel import HardwarePixel + hardware = HardwarePixel(adb) + elif model == 'Pixel 2': +- from _hardware_pixel2 import HardwarePixel2 ++ from ._hardware_pixel2 import HardwarePixel2 + hardware = HardwarePixel2(adb) + elif model == 'Nexus 6P': +- from _hardware_nexus_6p import HardwareNexus6P ++ from ._hardware_nexus_6p import HardwareNexus6P + hardware = HardwareNexus6P(adb) + else: +- from _hardware_android import HardwareAndroid ++ from ._hardware_android import HardwareAndroid + print("WARNING: %s: don't know how to monitor this hardware; results " + "may be unreliable." % model, file=sys.stderr) + hardware = HardwareAndroid(adb) +--- a/src/3rdparty/chromium/third_party/skia/tools/skqp/create_apk.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skqp/create_apk.py 2025-01-16 02:26:08.613012508 +0800 +@@ -148,7 +148,7 @@ + for arch in opts.architectures: + build = os.path.join(build_dir, arch) + gn_args = opts.gn_args(arch) +- args = ' '.join('%s=%s' % (k, v) for k, v in gn_args.items()) ++ args = ' '.join('%s=%s' % (k, v) for k, v in list(gn_args.items())) + check_call(['bin/gn', 'gen', build, '--args=' + args]) + try: + check_call(['ninja', '-C', build, lib]) +@@ -207,8 +207,8 @@ + for arg in args: + if arg not in skia_to_android_arch_name_map: + self.error += ('Argument %r is not in %r\n' % +- (arg, skia_to_android_arch_name_map.keys())) +- self.architectures = args if args else skia_to_android_arch_name_map.keys() ++ (arg, list(skia_to_android_arch_name_map.keys()))) ++ self.architectures = args if args else list(skia_to_android_arch_name_map.keys()) + default_build = os.path.dirname(__file__) + '/../../out/skqp' + self.build_dir = os.path.abspath(os.environ.get('SKQP_BUILD_DIR', default_build)) + self.final_output_dir = os.path.abspath(os.environ.get('SKQP_OUTPUT_DIR', default_build)) +--- a/src/3rdparty/chromium/third_party/skia/tools/skqp/cut_release.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skqp/cut_release.py 2025-01-16 02:26:08.613012508 +0800 +@@ -9,8 +9,8 @@ + import shutil + import sys + import tempfile +-import urllib +-import urllib2 ++import urllib.request, urllib.parse, urllib.error ++import urllib.request, urllib.error, urllib.parse + + from subprocess import check_call, check_output + +@@ -21,13 +21,13 @@ + + def urlopen(url): + cookie = os.environ.get('SKIA_GOLD_COOKIE', '') +- return urllib2.urlopen(urllib2.Request(url, headers={'Cookie': cookie})) ++ return urllib.request.urlopen(urllib.request.Request(url, headers={'Cookie': cookie})) + + def make_skqp_model(arg): + name, urls, exe = arg + tmp = tempfile.mkdtemp() + for url in urls: +- urllib.urlretrieve(url, tmp + '/' + url[url.rindex('/') + 1:]) ++ urllib.request.urlretrieve(url, tmp + '/' + url[url.rindex('/') + 1:]) + check_call([exe, tmp, ASSETS + '/gmkb/' + name]) + shutil.rmtree(tmp) + sys.stdout.write(name + ' ') +@@ -50,7 +50,7 @@ + def gold(first_commit, last_commit): + c1, c2 = (check_output(['git', 'rev-parse', c]).strip() + for c in (first_commit, last_commit)) +- f = urlopen('https://public-gold.skia.org/json/export?' + urllib.urlencode([ ++ f = urlopen('https://public-gold.skia.org/json/export?' + urllib.parse.urlencode([ + ('fbegin', c1), + ('fend', c2), + ('query', 'config=gles&config=vk&source_type=gm'), +--- a/src/3rdparty/chromium/third_party/skia/tools/skqp/download_model.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skqp/download_model.py 2025-01-16 02:26:08.613012508 +0800 +@@ -10,7 +10,7 @@ + import shutil + import sys + import tempfile +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + def checksum(path): + if not os.path.exists(path): +@@ -33,7 +33,7 @@ + pass # ignore race condition + url = 'https://storage.googleapis.com/skia-skqp-assets/' + md5 + with open(path, 'wb') as o: +- shutil.copyfileobj(urllib2.urlopen(url), o) ++ shutil.copyfileobj(urllib.request.urlopen(url), o) + + def tmp(prefix): + fd, path = tempfile.mkstemp(prefix=prefix) +--- a/src/3rdparty/chromium/third_party/skia/tools/skqp/find_commit_with_best_gold_results.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skqp/find_commit_with_best_gold_results.py 2025-01-16 02:26:08.613012508 +0800 +@@ -9,8 +9,8 @@ + import subprocess + import sys + import threading +-import urllib +-import urllib2 ++import urllib.request, urllib.parse, urllib.error ++import urllib.request, urllib.error, urllib.parse + + + assert '/' in [os.sep, os.altsep] +@@ -45,18 +45,18 @@ + query = [ + ('fbegin', first_commit), + ('fend', last_commit), +- ('query', urllib.urlencode(qq)), ++ ('query', urllib.parse.urlencode(qq)), + ('pos', 'true'), + ('neg', 'false'), + ('unt', 'false'), + ('head', 'true') + ] +- return 'https://public-gold.skia.org/json/export?' + urllib.urlencode(query) ++ return 'https://public-gold.skia.org/json/export?' + urllib.parse.urlencode(query) + + + def urlopen(url): + cookie = os.environ.get('SKIA_GOLD_COOKIE', '') +- return urllib2.urlopen(urllib2.Request(url, headers={'Cookie': cookie})) ++ return urllib.request.urlopen(urllib.request.Request(url, headers={'Cookie': cookie})) + + + def get_results_for_commit(commit, jobs): +@@ -67,7 +67,7 @@ + def process(url): + try: + testResults = json.load(urlopen(url)) +- except urllib2.URLError: ++ except urllib.error.URLError: + sys.stderr.write('\nerror "%s":\n' % url) + return + sys.stderr.write('.') +--- a/src/3rdparty/chromium/third_party/skia/tools/skqp/gn_to_bp.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skqp/gn_to_bp.py 2025-01-16 02:26:08.613012508 +0800 +@@ -132,7 +132,7 @@ + 'skia_use_system_zlib': 'true', + } + +-for k, v in SkqpGnArgs.iteritems(): ++for k, v in SkqpGnArgs.items(): + gn_args[k] = v + + js = gn_to_bp_utils.GenerateJSONFromGN(gn_args) +@@ -185,7 +185,7 @@ + + # OK! We have everything to fill in Android.bp... + with open('Android.bp', 'w') as f: +- print >>f, bp.substitute({ ++ print(bp.substitute({ + 'local_includes': bpfmt(8, local_includes), + 'srcs': bpfmt(8, srcs), + 'cflags': bpfmt(8, cflags, False), +@@ -202,5 +202,5 @@ + defs['sse42'] + + defs['avx'] + + defs['hsw']), +- }) ++ }), file=f) + +--- a/src/3rdparty/chromium/third_party/skia/tools/skqp/make_apk_list.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/skqp/make_apk_list.py 2025-01-16 02:26:08.613012508 +0800 +@@ -62,8 +62,8 @@ + return None + + def nowrap(s): +- return (s.replace(' ', u'\u00A0'.encode('utf-8')) +- .replace('-', u'\u2011'.encode('utf-8'))) ++ return (s.replace(' ', '\u00A0'.encode('utf-8')) ++ .replace('-', '\u2011'.encode('utf-8'))) + + def rev_parse(arg): + if isinstance(arg, tuple): +@@ -122,10 +122,10 @@ + table(o, origin, 'skqp/release', [(origin, 'master'), '09ab171c5c0']) + table(o, aosp_skqp, 'pie-cts-dev', ['f084c17322']) + o.write(FOOTER) +- print path ++ print(path) + call([sys.executable, 'bin/sysopen', path]) + gscmd = 'gsutil -h "Content-Type:text/html" cp "%s" gs://skia-skqp/apklist' +- print gscmd % path ++ print(gscmd % path) + + if __name__ == '__main__': + main() +--- a/src/3rdparty/chromium/third_party/skia/tools/svg/svg_downloader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/skia/tools/svg/svg_downloader.py 2025-01-16 02:26:08.613012508 +0800 +@@ -9,7 +9,7 @@ + import optparse + import os + import sys +-import urllib ++import urllib.request, urllib.parse, urllib.error + + + PARENT_DIR = os.path.dirname(os.path.realpath(__file__)) +@@ -17,11 +17,11 @@ + + def downloadSVGs(svgs_file, output_dir, prefix): + with open(svgs_file, 'r') as f: +- for url in f.xreadlines(): ++ for url in f: + svg_url = url.strip() + dest_file = os.path.join(output_dir, prefix + os.path.basename(svg_url)) +- print 'Downloading %s' % svg_url +- urllib.urlretrieve(svg_url, dest_file) ++ print('Downloading %s' % svg_url) ++ urllib.request.urlretrieve(svg_url, dest_file) + + + if '__main__' == __name__: +--- a/src/3rdparty/chromium/third_party/sqlite/scripts/sqlite_cherry_picker.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/sqlite/scripts/sqlite_cherry_picker.py 2025-01-16 02:26:08.613012508 +0800 +@@ -1,6 +1,6 @@ + #!/usr/bin/env python3 + +-from __future__ import print_function ++ + + import argparse + import generate_amalgamation +--- a/src/3rdparty/chromium/third_party/tint/src/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/tint/src/PRESUBMIT.py 2025-01-16 02:26:08.613012508 +0800 +@@ -21,7 +21,7 @@ + """Returns the license header regexp.""" + # Accept any year number from 2019 to the current year + current_year = int(input_api.time.strftime('%Y')) +- allowed_years = (str(s) for s in reversed(xrange(2019, current_year + 1))) ++ allowed_years = (str(s) for s in reversed(range(2019, current_year + 1))) + years_re = '(' + '|'.join(allowed_years) + ')' + license_header = ( + r'.*? Copyright( \(c\))? %(year)s The Tint [Aa]uthors\n ' +--- a/src/3rdparty/chromium/third_party/tint/src/tools/run_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/tint/src/tools/run_tests.py 2025-01-16 02:26:08.613012508 +0800 +@@ -65,7 +65,7 @@ + class TestRunner: + def RunTest(self, tc): + """Runs a single test.""" +- print("Testing {}".format(tc.GetInputPath())) ++ print(("Testing {}".format(tc.GetInputPath()))) + + cmd = [self.options.test_prog_path] + if tc.IsParseOnly(): +@@ -82,7 +82,7 @@ + + except Exception as e: + if not tc.IsExpectedFail(): +- print("{}".format("".join(map(chr, bytearray(e.output))))) ++ print(("{}".format("".join(map(chr, bytearray(e.output)))))) + print(e) + return False + +@@ -96,8 +96,8 @@ + if not tc.IsExpectedFail() and not result: + self.failures.append(tc.GetInputPath()) + elif tc.IsExpectedFail() and result: +- print("Expected: " + tc.GetInputPath() + +- " to fail but passed.") ++ print(("Expected: " + tc.GetInputPath() + ++ " to fail but passed.")) + self.failures.append(tc.GetInputPath()) + + def SummarizeResults(self): +@@ -111,10 +111,10 @@ + print(failure) + + print('') +- print('Test cases executed: {}'.format(len(self.test_cases))) +- print(' Successes: {}'.format( +- (len(self.test_cases) - len(self.failures)))) +- print(' Failures: {}'.format(len(self.failures))) ++ print(('Test cases executed: {}'.format(len(self.test_cases)))) ++ print((' Successes: {}'.format( ++ (len(self.test_cases) - len(self.failures))))) ++ print((' Failures: {}'.format(len(self.failures)))) + print('') + + def Run(self): +@@ -148,14 +148,14 @@ + test_prog = os.path.abspath( + os.path.join(self.options.build_dir, 'tint')) + if not os.path.isfile(test_prog): +- print("Cannot find test program {}".format(test_prog)) ++ print(("Cannot find test program {}".format(test_prog))) + return 1 + + self.options.test_prog_path = test_prog + + if not os.path.isfile(self.options.test_prog_path): +- print("Cannot find test program '{}'".format( +- self.options.test_prog_path)) ++ print(("Cannot find test program '{}'".format( ++ self.options.test_prog_path))) + return 1 + + input_file_re = re.compile('^.+[\.]wgsl') +@@ -165,7 +165,7 @@ + for filename in self.args: + input_path = os.path.join(self.options.test_dir, filename) + if not os.path.isfile(input_path): +- print("Cannot find test file '{}'".format(filename)) ++ print(("Cannot find test file '{}'".format(filename))) + return 1 + + self.test_cases.append( +--- a/src/3rdparty/chromium/third_party/tlslite/scripts/tls.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/tlslite/scripts/tls.py 2025-01-16 02:26:08.613012508 +0800 +@@ -6,7 +6,7 @@ + # Martin von Loewis - python 3 port + # + # See the LICENSE file for legal information regarding use of this file. +-from __future__ import print_function ++ + import sys + import os + import os.path +@@ -14,10 +14,10 @@ + import time + import getopt + try: +- import httplib +- from SocketServer import * +- from BaseHTTPServer import * +- from SimpleHTTPServer import * ++ import http.client ++ from socketserver import * ++ from http.server import * ++ from http.server import * + except ImportError: + # Python 3.x + from http import client as httplib +--- a/src/3rdparty/chromium/third_party/tlslite/scripts/tlsdb.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/tlslite/scripts/tlsdb.py 2025-01-16 02:26:08.613012508 +0800 +@@ -6,7 +6,7 @@ + # + # See the LICENSE file for legal information regarding use of this file. + +-from __future__ import print_function ++ + import sys + import os + import socket +@@ -141,7 +141,7 @@ + if n==0: + return 0 + return int(math.floor(math.log(n, 2))+1) +- for username in db.keys(): ++ for username in list(db.keys()): + N, g, s, v = db[username] + print(numBits(N), username) + else: +--- a/src/3rdparty/chromium/third_party/tlslite/tlslite/basedb.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/tlslite/tlslite/basedb.py 2025-01-16 02:26:08.613012508 +0800 +@@ -7,10 +7,10 @@ + """Base class for SharedKeyDB and VerifierDB.""" + + try: +- import anydbm ++ import dbm + except ImportError: + # Python 3 +- import dbm as anydbm ++ import dbm.ndbm as anydbm + import threading + + class BaseDB(object): +@@ -29,7 +29,7 @@ + @raise anydbm.error: If there's a problem creating the database. + """ + if self.filename: +- self.db = anydbm.open(self.filename, "n") #raises anydbm.error ++ self.db = dbm.open(self.filename, "n") #raises anydbm.error + self.db["--Reserved--type"] = self.type + self.db.sync() + else: +@@ -43,7 +43,7 @@ + """ + if not self.filename: + raise ValueError("Can only open on-disk databases") +- self.db = anydbm.open(self.filename, "w") #raises anydbm.error ++ self.db = dbm.open(self.filename, "w") #raises anydbm.error + try: + if self.db["--Reserved--type"] != self.type: + raise ValueError("Not a %s database" % self.type) +@@ -104,7 +104,7 @@ + + self.lock.acquire() + try: +- return self.db.has_key(username) ++ return username in self.db + finally: + self.lock.release() + +@@ -123,7 +123,7 @@ + + self.lock.acquire() + try: +- usernames = self.db.keys() ++ usernames = list(self.db.keys()) + finally: + self.lock.release() + usernames = [u for u in usernames if not u.startswith("--Reserved--")] +--- a/src/3rdparty/chromium/third_party/tlslite/tlslite/tlsrecordlayer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/tlslite/tlslite/tlsrecordlayer.py 2025-01-16 02:26:08.613012508 +0800 +@@ -7,7 +7,7 @@ + # See the LICENSE file for legal information regarding use of this file. + + """Helper class for TLSConnection.""" +-from __future__ import generators ++ + + from .utils.compat import * + from .utils.cryptomath import * +--- a/src/3rdparty/chromium/third_party/tlslite/tlslite/integration/asyncstatemachine.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/tlslite/tlslite/integration/asyncstatemachine.py 2025-01-16 02:26:08.613012508 +0800 +@@ -157,7 +157,7 @@ + + def _doHandshakeOp(self): + try: +- self.result = self.handshaker.next() ++ self.result = next(self.handshaker) + except StopIteration: + self.handshaker = None + self.result = None +@@ -165,14 +165,14 @@ + + def _doCloseOp(self): + try: +- self.result = self.closer.next() ++ self.result = next(self.closer) + except StopIteration: + self.closer = None + self.result = None + self.outCloseEvent() + + def _doReadOp(self): +- self.result = self.reader.next() ++ self.result = next(self.reader) + if not self.result in (0,1): + readBuffer = self.result + self.reader = None +@@ -181,7 +181,7 @@ + + def _doWriteOp(self): + try: +- self.result = self.writer.next() ++ self.result = next(self.writer) + except StopIteration: + self.writer = None + self.result = None +--- a/src/3rdparty/chromium/third_party/tlslite/tlslite/integration/httptlsconnection.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/tlslite/tlslite/integration/httptlsconnection.py 2025-01-16 02:26:08.613012508 +0800 +@@ -10,7 +10,7 @@ + + import socket + try: +- import httplib ++ import http.client + except ImportError: + # Python 3 + from http import client as httplib +@@ -18,7 +18,7 @@ + from tlslite.integration.clienthelper import ClientHelper + + +-class HTTPTLSConnection(httplib.HTTPConnection, ClientHelper): ++class HTTPTLSConnection(http.client.HTTPConnection, ClientHelper): + """This class extends L{httplib.HTTPConnection} to support TLS.""" + + def __init__(self, host, port=None, strict=None, +@@ -92,10 +92,10 @@ + unexpected hangup. + """ + if source_address: +- httplib.HTTPConnection.__init__(self, host, port, strict, ++ http.client.HTTPConnection.__init__(self, host, port, strict, + timeout, source_address) + if not source_address: +- httplib.HTTPConnection.__init__(self, host, port, strict, ++ http.client.HTTPConnection.__init__(self, host, port, strict, + timeout) + self.ignoreAbruptClose = ignoreAbruptClose + ClientHelper.__init__(self, +@@ -106,7 +106,7 @@ + anon) + + def connect(self): +- httplib.HTTPConnection.connect(self) ++ http.client.HTTPConnection.connect(self) + self.sock = TLSConnection(self.sock) + self.sock.ignoreAbruptClose = self.ignoreAbruptClose + ClientHelper._handshake(self, self.sock) +--- a/src/3rdparty/chromium/third_party/tlslite/tlslite/integration/xmlrpcserver.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/tlslite/tlslite/integration/xmlrpcserver.py 2025-01-16 02:26:08.613012508 +0800 +@@ -6,7 +6,7 @@ + + """xmlrpcserver.py - simple XML RPC server supporting TLS""" + try: +- from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler ++ from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler + except ImportError: + # Python 3 + from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler +--- a/src/3rdparty/chromium/third_party/tlslite/tlslite/integration/xmlrpctransport.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/tlslite/tlslite/integration/xmlrpctransport.py 2025-01-16 02:26:08.613012508 +0800 +@@ -9,8 +9,8 @@ + """TLS Lite + xmlrpclib.""" + + try: +- import xmlrpclib +- import httplib ++ import xmlrpc.client ++ import http.client + except ImportError: + # Python 3 + from xmlrpc import client as xmlrpclib +@@ -20,11 +20,11 @@ + import tlslite.errors + + +-class XMLRPCTransport(xmlrpclib.Transport, ClientHelper): ++class XMLRPCTransport(xmlrpc.client.Transport, ClientHelper): + """Handles an HTTPS transaction to an XML-RPC server.""" + + # Pre python 2.7, the make_connection returns a HTTP class +- transport = xmlrpclib.Transport() ++ transport = xmlrpc.client.Transport() + conn_class_is_http = not hasattr(transport, '_connection') + del(transport) + +@@ -101,7 +101,7 @@ + # self._connection is new in python 2.7, since we're using it here, + # we'll add this ourselves too, just in case we're pre-2.7 + self._connection = (None, None) +- xmlrpclib.Transport.__init__(self, use_datetime) ++ xmlrpc.client.Transport.__init__(self, use_datetime) + self.ignoreAbruptClose = ignoreAbruptClose + ClientHelper.__init__(self, + username, password, +@@ -128,6 +128,6 @@ + self._connection = host, http + if not self.conn_class_is_http: + return http +- http2 = httplib.HTTP() ++ http2 = http.client.HTTP() + http2._setup(http) + return http2 +--- a/src/3rdparty/chromium/third_party/tlslite/tlslite/utils/compat.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/tlslite/tlslite/utils/compat.py 2025-01-16 02:26:08.613012508 +0800 +@@ -19,7 +19,7 @@ + def compatHMAC(x): return bytes(x) + + def raw_input(s): +- return input(s) ++ return eval(input(s)) + + # So, the python3 binascii module deals with bytearrays, and python2 + # deals with strings... I would rather deal with the "a" part as +@@ -82,6 +82,6 @@ + + import traceback + def formatExceptionTrace(e): +- newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) ++ newStr = "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])) + return newStr + +--- a/src/3rdparty/chromium/third_party/tlslite/tlslite/utils/cryptomath.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/tlslite/tlslite/utils/cryptomath.py 2025-01-16 02:26:08.613012508 +0800 +@@ -8,7 +8,7 @@ + """cryptomath module + + This module has basic math/crypto code.""" +-from __future__ import print_function ++ + import os + import math + import base64 +@@ -210,7 +210,7 @@ + power = gmpy.mpz(power) + modulus = gmpy.mpz(modulus) + result = pow(base, power, modulus) +- return long(result) ++ return int(result) + + else: + def powMod(base, power, modulus): +--- a/src/3rdparty/chromium/third_party/tlslite/tlslite/utils/pycrypto_rsakey.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/tlslite/tlslite/utils/pycrypto_rsakey.py 2025-01-16 02:26:08.613012508 +0800 +@@ -15,9 +15,9 @@ + class PyCrypto_RSAKey(RSAKey): + def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0): + if not d: +- self.rsa = RSA.construct( (long(n), long(e)) ) ++ self.rsa = RSA.construct( (int(n), int(e)) ) + else: +- self.rsa = RSA.construct( (long(n), long(e), long(d), long(p), long(q)) ) ++ self.rsa = RSA.construct( (int(n), int(e), int(d), int(p), int(q)) ) + + def __getattr__(self, name): + return getattr(self.rsa, name) +--- a/src/3rdparty/chromium/third_party/usrsctp/usrsctplib/gen-def.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/usrsctp/usrsctplib/gen-def.py 2025-01-16 02:26:08.613012508 +0800 +@@ -23,4 +23,4 @@ + for line in dumpbin_cmd.stdout.decode('utf-8').splitlines(): + match = pattern.match(line) + if match: +- print(match.group('functionname')) ++ print((match.group('functionname'))) +--- a/src/3rdparty/chromium/third_party/vulkan_headers/registry/cgenerator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/vulkan_headers/registry/cgenerator.py 2025-01-16 02:26:08.613012508 +0800 +@@ -310,12 +310,12 @@ + + # Everyone with an explicit mayalias="true" + self.may_alias = set(typeName +- for typeName, data in self.registry.typedict.items() ++ for typeName, data in list(self.registry.typedict.items()) + if data.elem.get('mayalias') == 'true') + + # Every type mentioned in some other type's parentstruct attribute. + parent_structs = (otherType.elem.get('parentstruct') +- for otherType in self.registry.typedict.values()) ++ for otherType in list(self.registry.typedict.values())) + self.may_alias.update(set(x for x in parent_structs + if x is not None)) + return typeName in self.may_alias +--- a/src/3rdparty/chromium/third_party/vulkan_headers/registry/generator.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/vulkan_headers/registry/generator.py 2025-01-16 02:26:08.613012508 +0800 +@@ -15,7 +15,7 @@ + # limitations under the License. + """Base class for source/header/doc generators, as well as some utility functions.""" + +-from __future__ import unicode_literals ++ + + import io + import os +@@ -325,7 +325,7 @@ + declared first when emitting this enum.""" + name = elem.get('name') + numVal = None +- if 'value' in elem.keys(): ++ if 'value' in list(elem.keys()): + value = elem.get('value') + # print('About to translate value =', value, 'type =', type(value)) + if needsNum: +@@ -337,7 +337,7 @@ + # value += enuminfo.type + self.logMsg('diag', 'Enum', name, '-> value [', numVal, ',', value, ']') + return [numVal, value] +- if 'bitpos' in elem.keys(): ++ if 'bitpos' in list(elem.keys()): + value = elem.get('bitpos') + bitpos = int(value, 0) + numVal = 1 << bitpos +@@ -349,13 +349,13 @@ + value = value + 'ULL' + self.logMsg('diag', 'Enum', name, '-> bitpos [', numVal, ',', value, ']') + return [numVal, value] +- if 'offset' in elem.keys(): ++ if 'offset' in list(elem.keys()): + # Obtain values in the mapping from the attributes + enumNegative = False + offset = int(elem.get('offset'), 0) + extnumber = int(elem.get('extnumber'), 0) + extends = elem.get('extends') +- if 'dir' in elem.keys(): ++ if 'dir' in list(elem.keys()): + enumNegative = True + self.logMsg('diag', 'Enum', name, 'offset =', offset, + 'extnumber =', extnumber, 'extends =', extends, +@@ -369,7 +369,7 @@ + # More logic needed! + self.logMsg('diag', 'Enum', name, '-> offset [', numVal, ',', value, ']') + return [numVal, value] +- if 'alias' in elem.keys(): ++ if 'alias' in list(elem.keys()): + return [None, elem.get('alias')] + return [None, None] + +--- a/src/3rdparty/chromium/third_party/vulkan_headers/registry/reg.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/vulkan_headers/registry/reg.py 2025-01-16 02:26:08.613012508 +0800 +@@ -111,7 +111,7 @@ + If 'required' is not True, also returns True if neither element + has an attribute value for key.""" + +- if required and key not in self.elem.keys(): ++ if required and key not in list(self.elem.keys()): + return False + return self.elem.get(key) == info.elem.get(key) + +@@ -1187,7 +1187,7 @@ + # being generated. Add extensions matching the pattern specified in + # regExtensions, then remove extensions matching the pattern + # specified in regRemoveExtensions +- for (extName, ei) in sorted(self.extdict.items(), key=lambda x: x[1].number if x[1].number is not None else '0'): ++ for (extName, ei) in sorted(list(self.extdict.items()), key=lambda x: x[1].number if x[1].number is not None else '0'): + extName = ei.name + include = False + +--- a/src/3rdparty/chromium/third_party/vulkan_memory_allocator/tools/VmaDumpVis/VmaDumpVis.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/vulkan_memory_allocator/tools/VmaDumpVis/VmaDumpVis.py 2025-01-16 02:26:08.613012508 +0800 +@@ -68,13 +68,13 @@ + + def IsDataEmpty(): + global data +- for dictMemType in data.values(): ++ for dictMemType in list(data.values()): + if 'DedicatedAllocations' in dictMemType and len(dictMemType['DedicatedAllocations']) > 0: + return False + if 'DefaultPoolBlocks' in dictMemType and len(dictMemType['DefaultPoolBlocks']) > 0: + return False + if 'CustomPools' in dictMemType: +- for lBlockList in dictMemType['CustomPools'].values(): ++ for lBlockList in list(dictMemType['CustomPools'].values()): + if len(lBlockList) > 0: + return False + return True +@@ -88,7 +88,7 @@ + iImgSizeY = IMG_MARGIN + iImgSizeY += FONT_SIZE + IMG_MARGIN # Grid lines legend - sizes + iMaxBlockSize = 0 +- for dictMemType in data.values(): ++ for dictMemType in list(data.values()): + iImgSizeY += IMG_MARGIN + FONT_SIZE + lDedicatedAllocations = dictMemType['DedicatedAllocations'] + iImgSizeY += len(lDedicatedAllocations) * (IMG_MARGIN * 2 + FONT_SIZE + MAP_SIZE) +@@ -99,7 +99,7 @@ + for objBlock in lDefaultPoolBlocks: + iMaxBlockSize = max(iMaxBlockSize, objBlock['Size']) + dCustomPools = dictMemType['CustomPools'] +- for lBlocks in dCustomPools.values(): ++ for lBlocks in list(dCustomPools.values()): + iImgSizeY += len(lBlocks) * (IMG_MARGIN * 2 + FONT_SIZE + MAP_SIZE) + for objBlock in lBlocks: + iMaxBlockSize = max(iMaxBlockSize, objBlock['Size']) +@@ -186,7 +186,7 @@ + + jsonSrc = json.load(args.DumpFile) + if 'DedicatedAllocations' in jsonSrc: +- for tType in jsonSrc['DedicatedAllocations'].items(): ++ for tType in list(jsonSrc['DedicatedAllocations'].items()): + sType = tType[0] + assert sType[:5] == 'Type ' + iType = int(sType[5:]) +@@ -194,16 +194,16 @@ + for objAlloc in tType[1]: + typeData['DedicatedAllocations'].append((objAlloc['Type'], int(objAlloc['Size']), int(objAlloc['Usage']) if ('Usage' in objAlloc) else 0)) + if 'DefaultPools' in jsonSrc: +- for tType in jsonSrc['DefaultPools'].items(): ++ for tType in list(jsonSrc['DefaultPools'].items()): + sType = tType[0] + assert sType[:5] == 'Type ' + iType = int(sType[5:]) + typeData = GetDataForMemoryType(iType) +- for sBlockId, objBlock in tType[1]['Blocks'].items(): ++ for sBlockId, objBlock in list(tType[1]['Blocks'].items()): + ProcessBlock(typeData['DefaultPoolBlocks'], int(sBlockId), objBlock, '') + if 'Pools' in jsonSrc: + objPools = jsonSrc['Pools'] +- for sPoolId, objPool in objPools.items(): ++ for sPoolId, objPool in list(objPools.items()): + iType = int(objPool['MemoryTypeIndex']) + typeData = GetDataForMemoryType(iType) + objBlocks = objPool['Blocks'] +@@ -215,7 +215,7 @@ + sFullName = sPoolId + dstBlockArray = [] + typeData['CustomPools'][sFullName] = dstBlockArray +- for sBlockId, objBlock in objBlocks.items(): ++ for sBlockId, objBlock in list(objBlocks.items()): + ProcessBlock(dstBlockArray, int(sBlockId), objBlock, sAlgorithm) + + if IsDataEmpty(): +@@ -272,7 +272,7 @@ + DrawBlock(draw, y, objBlock) + y += MAP_SIZE + IMG_MARGIN + index = 0 +- for sPoolName, listPool in dictMemType['CustomPools'].items(): ++ for sPoolName, listPool in list(dictMemType['CustomPools'].items()): + for objBlock in listPool: + if 'Algorithm' in objBlock and objBlock['Algorithm']: + sAlgorithm = ' (Algorithm: %s)' % (objBlock['Algorithm']) +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/docs/source/conf.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/docs/source/conf.py 2025-01-16 02:26:08.613012508 +0800 +@@ -41,8 +41,8 @@ + master_doc = 'index' + + # General information about the project. +-project = u'Selenium' +-copyright = u'2011, plightbo, simon.m.stewart, hbchai, jrhuggins, et al.' ++project = 'Selenium' ++copyright = '2011, plightbo, simon.m.stewart, hbchai, jrhuggins, et al.' + + # The version info for the project you're documenting, acts as replacement for + # |version| and |release|, also used in various other places throughout the +@@ -179,8 +179,8 @@ + # Grouping the document tree into LaTeX files. List of tuples + # (source start file, target name, title, author, documentclass [howto/manual]). + latex_documents = [ +- ('index', 'Selenium.tex', u'Selenium Documentation', +- u'plightbo, simon.m.stewart, hbchai, jrhuggins, et al.', 'manual'), ++ ('index', 'Selenium.tex', 'Selenium Documentation', ++ 'plightbo, simon.m.stewart, hbchai, jrhuggins, et al.', 'manual'), + ] + + # The name of an image file (relative to this directory) to place at the top of +@@ -212,18 +212,18 @@ + # One entry per manual page. List of tuples + # (source start file, name, description, authors, manual section). + man_pages = [ +- ('index', 'selenium', u'Selenium Documentation', +- [u'plightbo, simon.m.stewart, hbchai, jrhuggins, et al.'], 1) ++ ('index', 'selenium', 'Selenium Documentation', ++ ['plightbo, simon.m.stewart, hbchai, jrhuggins, et al.'], 1) + ] + + + # -- Options for Epub output --------------------------------------------------- + + # Bibliographic Dublin Core info. +-epub_title = u'Selenium' +-epub_author = u'plightbo, simon.m.stewart, hbchai, jrhuggins, et al.' +-epub_publisher = u'plightbo, simon.m.stewart, hbchai, jrhuggins, et al.' +-epub_copyright = u'2011, plightbo, simon.m.stewart, hbchai, jrhuggins, et al.' ++epub_title = 'Selenium' ++epub_author = 'plightbo, simon.m.stewart, hbchai, jrhuggins, et al.' ++epub_publisher = 'plightbo, simon.m.stewart, hbchai, jrhuggins, et al.' ++epub_copyright = '2011, plightbo, simon.m.stewart, hbchai, jrhuggins, et al.' + + # The language of the text. It defaults to the language option + # or en if the language is not set. +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/__init__.py 2025-01-16 02:26:08.613012508 +0800 +@@ -13,7 +13,7 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from selenium import selenium ++from .selenium import selenium + + + __version__ = "2.28.0" +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/selenium.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/selenium.py 2025-01-16 02:26:08.613012508 +0800 +@@ -16,8 +16,8 @@ + """ + __docformat__ = "restructuredtext en" + +-import httplib +-import urllib ++import http.client ++import urllib.request, urllib.parse, urllib.error + + class selenium(object): + """ +@@ -190,21 +190,21 @@ + try: + self.sessionId = result + except ValueError: +- raise Exception, result ++ raise Exception(result) + + def stop(self): + self.do_command("testComplete", []) + self.sessionId = None + + def do_command(self, verb, args): +- conn = httplib.HTTPConnection(self.host, self.port) ++ conn = http.client.HTTPConnection(self.host, self.port) + try: +- body = u'cmd=' + urllib.quote_plus(unicode(verb).encode('utf-8')) ++ body = 'cmd=' + urllib.parse.quote_plus(str(verb).encode('utf-8')) + for i in range(len(args)): +- body += '&' + unicode(i+1) + '=' + \ +- urllib.quote_plus(unicode(args[i]).encode('utf-8')) ++ body += '&' + str(i+1) + '=' + \ ++ urllib.parse.quote_plus(str(args[i]).encode('utf-8')) + if (None != self.sessionId): +- body += "&sessionId=" + unicode(self.sessionId) ++ body += "&sessionId=" + str(self.sessionId) + headers = { + "Content-Type": + "application/x-www-form-urlencoded; charset=utf-8" +@@ -212,9 +212,9 @@ + conn.request("POST", "/selenium-server/driver/", body, headers) + + response = conn.getresponse() +- data = unicode(response.read(), "UTF-8") ++ data = str(response.read(), "UTF-8") + if (not data.startswith('OK')): +- raise Exception, data ++ raise Exception(data) + return data + finally: + conn.close() +@@ -263,7 +263,7 @@ + return True + if ("false" == boolstr): + return False +- raise ValueError, "result is neither 'true' nor 'false': " + boolstr ++ raise ValueError("result is neither 'true' nor 'false': " + boolstr) + + def get_boolean_array(self, verb, args): + boolarr = self.get_string_array(verb, args) +@@ -274,7 +274,7 @@ + if ("false" == boolstr): + boolarr[i] = False + continue +- raise ValueError, "result is neither 'true' nor 'false': " + boolarr[i] ++ raise ValueError("result is neither 'true' nor 'false': " + boolarr[i]) + return boolarr + + +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/common/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/common/__init__.py 2025-01-16 02:26:08.613012508 +0800 +@@ -13,4 +13,4 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-import exceptions +\ No newline at end of file ++from . import exceptions +\ No newline at end of file +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/__init__.py 2025-01-16 02:26:08.613012508 +0800 +@@ -15,17 +15,17 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from firefox.webdriver import WebDriver as Firefox +-from firefox.firefox_profile import FirefoxProfile +-from chrome.webdriver import WebDriver as Chrome +-from chrome.options import Options as ChromeOptions +-from ie.webdriver import WebDriver as Ie +-from opera.webdriver import WebDriver as Opera +-from phantomjs.webdriver import WebDriver as PhantomJS +-from remote.webdriver import WebDriver as Remote +-from common.desired_capabilities import DesiredCapabilities +-from common.action_chains import ActionChains +-from common.touch_actions import TouchActions +-from common.proxy import Proxy ++from .firefox.webdriver import WebDriver as Firefox ++from .firefox.firefox_profile import FirefoxProfile ++from .chrome.webdriver import WebDriver as Chrome ++from .chrome.options import Options as ChromeOptions ++from .ie.webdriver import WebDriver as Ie ++from .opera.webdriver import WebDriver as Opera ++from .phantomjs.webdriver import WebDriver as PhantomJS ++from .remote.webdriver import WebDriver as Remote ++from .common.desired_capabilities import DesiredCapabilities ++from .common.action_chains import ActionChains ++from .common.touch_actions import TouchActions ++from .common.proxy import Proxy + + __version__ = '2.28.0' +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/chrome/service.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/chrome/service.py 2025-01-16 02:26:08.613012508 +0800 +@@ -85,8 +85,8 @@ + return + + #Tell the Server to die! +- import urllib2 +- urllib2.urlopen("http://127.0.0.1:%d/shutdown" % self.port) ++ import urllib.request, urllib.error, urllib.parse ++ urllib.request.urlopen("http://127.0.0.1:%d/shutdown" % self.port) + count = 0 + while utils.is_connectable(self.port): + if count == 30: +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/chrome/webdriver.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/chrome/webdriver.py 2025-01-16 02:26:08.613012508 +0800 +@@ -16,12 +16,12 @@ + # limitations under the License. + + import base64 +-import httplib ++import http.client + from selenium.webdriver.remote.command import Command + from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver + from selenium.common.exceptions import WebDriverException +-from service import Service +-from options import Options ++from .service import Service ++from .options import Options + + class WebDriver(RemoteWebDriver): + """ +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/common/keys.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/common/keys.py 2025-01-16 02:26:08.613012508 +0800 +@@ -16,69 +16,69 @@ + + class Keys(object): + +- NULL = u'\ue000' +- CANCEL = u'\ue001' # ^break +- HELP = u'\ue002' +- BACK_SPACE = u'\ue003' +- TAB = u'\ue004' +- CLEAR = u'\ue005' +- RETURN = u'\ue006' +- ENTER = u'\ue007' +- SHIFT = u'\ue008' +- LEFT_SHIFT = u'\ue008' # alias +- CONTROL = u'\ue009' +- LEFT_CONTROL = u'\ue009' # alias +- ALT = u'\ue00a' +- LEFT_ALT = u'\ue00a' # alias +- PAUSE = u'\ue00b' +- ESCAPE = u'\ue00c' +- SPACE = u'\ue00d' +- PAGE_UP = u'\ue00e' +- PAGE_DOWN = u'\ue00f' +- END = u'\ue010' +- HOME = u'\ue011' +- LEFT = u'\ue012' +- ARROW_LEFT = u'\ue012' # alias +- UP = u'\ue013' +- ARROW_UP = u'\ue013' # alias +- RIGHT = u'\ue014' +- ARROW_RIGHT = u'\ue014' # alias +- DOWN = u'\ue015' +- ARROW_DOWN = u'\ue015' # alias +- INSERT = u'\ue016' +- DELETE = u'\ue017' +- SEMICOLON = u'\ue018' +- EQUALS = u'\ue019' ++ NULL = '\ue000' ++ CANCEL = '\ue001' # ^break ++ HELP = '\ue002' ++ BACK_SPACE = '\ue003' ++ TAB = '\ue004' ++ CLEAR = '\ue005' ++ RETURN = '\ue006' ++ ENTER = '\ue007' ++ SHIFT = '\ue008' ++ LEFT_SHIFT = '\ue008' # alias ++ CONTROL = '\ue009' ++ LEFT_CONTROL = '\ue009' # alias ++ ALT = '\ue00a' ++ LEFT_ALT = '\ue00a' # alias ++ PAUSE = '\ue00b' ++ ESCAPE = '\ue00c' ++ SPACE = '\ue00d' ++ PAGE_UP = '\ue00e' ++ PAGE_DOWN = '\ue00f' ++ END = '\ue010' ++ HOME = '\ue011' ++ LEFT = '\ue012' ++ ARROW_LEFT = '\ue012' # alias ++ UP = '\ue013' ++ ARROW_UP = '\ue013' # alias ++ RIGHT = '\ue014' ++ ARROW_RIGHT = '\ue014' # alias ++ DOWN = '\ue015' ++ ARROW_DOWN = '\ue015' # alias ++ INSERT = '\ue016' ++ DELETE = '\ue017' ++ SEMICOLON = '\ue018' ++ EQUALS = '\ue019' + +- NUMPAD0 = u'\ue01a' # numbe pad keys +- NUMPAD1 = u'\ue01b' +- NUMPAD2 = u'\ue01c' +- NUMPAD3 = u'\ue01d' +- NUMPAD4 = u'\ue01e' +- NUMPAD5 = u'\ue01f' +- NUMPAD6 = u'\ue020' +- NUMPAD7 = u'\ue021' +- NUMPAD8 = u'\ue022' +- NUMPAD9 = u'\ue023' +- MULTIPLY = u'\ue024' +- ADD = u'\ue025' +- SEPARATOR = u'\ue026' +- SUBTRACT = u'\ue027' +- DECIMAL = u'\ue028' +- DIVIDE = u'\ue029' ++ NUMPAD0 = '\ue01a' # numbe pad keys ++ NUMPAD1 = '\ue01b' ++ NUMPAD2 = '\ue01c' ++ NUMPAD3 = '\ue01d' ++ NUMPAD4 = '\ue01e' ++ NUMPAD5 = '\ue01f' ++ NUMPAD6 = '\ue020' ++ NUMPAD7 = '\ue021' ++ NUMPAD8 = '\ue022' ++ NUMPAD9 = '\ue023' ++ MULTIPLY = '\ue024' ++ ADD = '\ue025' ++ SEPARATOR = '\ue026' ++ SUBTRACT = '\ue027' ++ DECIMAL = '\ue028' ++ DIVIDE = '\ue029' + +- F1 = u'\ue031' # function keys +- F2 = u'\ue032' +- F3 = u'\ue033' +- F4 = u'\ue034' +- F5 = u'\ue035' +- F6 = u'\ue036' +- F7 = u'\ue037' +- F8 = u'\ue038' +- F9 = u'\ue039' +- F10 = u'\ue03a' +- F11 = u'\ue03b' +- F12 = u'\ue03c' ++ F1 = '\ue031' # function keys ++ F2 = '\ue032' ++ F3 = '\ue033' ++ F4 = '\ue034' ++ F5 = '\ue035' ++ F6 = '\ue036' ++ F7 = '\ue037' ++ F8 = '\ue038' ++ F9 = '\ue039' ++ F10 = '\ue03a' ++ F11 = '\ue03b' ++ F12 = '\ue03c' + +- META = u'\ue03d' +- COMMAND = u'\ue03d' ++ META = '\ue03d' ++ COMMAND = '\ue03d' +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/common/proxy.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/common/proxy.py 2025-01-16 02:26:08.613012508 +0800 +@@ -26,19 +26,19 @@ + + def __init__(self, raw=None): + if raw is not None: +- if raw.has_key('proxyType') and raw['proxyType'] is not None: ++ if 'proxyType' in raw and raw['proxyType'] is not None: + self.proxy_type = raw['proxyType'] +- if raw.has_key('ftpProxy') and raw['ftpProxy'] is not None: ++ if 'ftpProxy' in raw and raw['ftpProxy'] is not None: + self.ftp_proxy = raw['ftpProxy'] +- if raw.has_key('httpProxy') and raw['httpProxy'] is not None: ++ if 'httpProxy' in raw and raw['httpProxy'] is not None: + self.http_proxy = raw['httpProxy'] +- if raw.has_key('noProxy') and raw['noProxy'] is not None: ++ if 'noProxy' in raw and raw['noProxy'] is not None: + self.no_proxy = raw['noProxy'] +- if raw.has_key('proxyAutoconfigUrl') and raw['proxyAutoconfigUrl'] is not None: ++ if 'proxyAutoconfigUrl' in raw and raw['proxyAutoconfigUrl'] is not None: + self.proxy_autoconfig_url = raw['proxyAutoconfigUrl'] +- if raw.has_key('sslProxy') and raw['sslProxy'] is not None: ++ if 'sslProxy' in raw and raw['sslProxy'] is not None: + self.sslProxy = raw['sslProxy'] +- if raw.has_key('autodetect') and raw['autodetect'] is not None: ++ if 'autodetect' in raw and raw['autodetect'] is not None: + self.auto_detect = raw['autodetect'] + + @property +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/common/utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/common/utils.py 2025-01-16 02:26:08.613012508 +0800 +@@ -35,9 +35,9 @@ + return False + + def is_url_connectable(port): +- import urllib2 ++ import urllib.request, urllib.error, urllib.parse + try: +- res = urllib2.urlopen("http://localhost:%s/status" % port) ++ res = urllib.request.urlopen("http://localhost:%s/status" % port) + if res.getcode() == 200: + return True + else: +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/firefox/firefox_binary.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/firefox/firefox_binary.py 2025-01-16 02:26:08.613012508 +0800 +@@ -98,7 +98,7 @@ + return True + + def _find_exe_in_registry(self): +- from _winreg import OpenKey, QueryValue, HKEY_LOCAL_MACHINE ++ from winreg import OpenKey, QueryValue, HKEY_LOCAL_MACHINE + import shlex + keys = ( + r"SOFTWARE\Classes\FirefoxHTML\shell\open\command", +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/firefox/firefox_profile.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/firefox/firefox_profile.py 2025-01-16 02:26:08.613012508 +0800 +@@ -12,7 +12,7 @@ + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. +-from __future__ import with_statement ++ + + import base64 + import copy +@@ -21,7 +21,7 @@ + import shutil + import tempfile + import zipfile +-from cStringIO import StringIO ++from io import StringIO + from xml.dom import minidom + from distutils import dir_util + from selenium.webdriver.common.proxy import ProxyType +@@ -126,7 +126,7 @@ + clean_value = 'false' + elif isinstance(value, str): + clean_value = '"%s"' % value +- elif isinstance(value, unicode): ++ elif isinstance(value, str): + clean_value = '"%s"' % value + else: + clean_value = str(int(value)) +@@ -261,7 +261,7 @@ + writes the current user prefs dictionary to disk + """ + with open(self.userPrefs, "w") as f: +- for key, value in user_prefs.items(): ++ for key, value in list(user_prefs.items()): + f.write('user_pref("%s", %s);\n' % (key, value)) + + def _read_existing_userjs(self): +@@ -370,7 +370,7 @@ + for node in description.childNodes: + # Remove the namespace prefix from the tag for comparison + entry = node.nodeName.replace(em, "") +- if entry in details.keys(): ++ if entry in list(details.keys()): + details.update({ entry: get_text(node) }) + + return details +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/firefox/webdriver.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/firefox/webdriver.py 2025-01-16 02:26:08.613012508 +0800 +@@ -15,11 +15,11 @@ + + + import base64 +-import httplib ++import http.client + import shutil + import sys +-import urllib2 +-from firefox_binary import FirefoxBinary ++import urllib.request, urllib.error, urllib.parse ++from .firefox_binary import FirefoxBinary + from selenium.common.exceptions import ErrorInResponseException + from selenium.webdriver.common.desired_capabilities import DesiredCapabilities + from selenium.webdriver.firefox.extension_connection import ExtensionConnection +@@ -67,7 +67,7 @@ + """Quits the driver and close every associated window.""" + try: + RemoteWebDriver.quit(self) +- except httplib.BadStatusLine: ++ except http.client.BadStatusLine: + # Happens if Firefox shutsdown before we've read the response from + # the socket. + pass +@@ -76,8 +76,8 @@ + shutil.rmtree(self.profile.path) + if self.profile.tempfolder is not None: + shutil.rmtree(self.profile.tempfolder) +- except Exception, e: +- print str(e) ++ except Exception as e: ++ print(str(e)) + + @property + def firefox_profile(self): +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/ie/service.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/ie/service.py 2025-01-16 02:26:08.613012508 +0800 +@@ -87,8 +87,8 @@ + return + + #Tell the Server to die! +- import urllib2 +- urllib2.urlopen("http://127.0.0.1:%d/shutdown" % self.port) ++ import urllib.request, urllib.error, urllib.parse ++ urllib.request.urlopen("http://127.0.0.1:%d/shutdown" % self.port) + count = 0 + while utils.is_connectable(self.port): + if count == 30: +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/ie/webdriver.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/ie/webdriver.py 2025-01-16 02:26:08.613012508 +0800 +@@ -21,7 +21,7 @@ + from selenium.webdriver.remote.command import Command + from selenium.common.exceptions import WebDriverException + import base64 +-from service import Service ++from .service import Service + + DEFAULT_TIMEOUT = 30 + DEFAULT_PORT = 0 +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/opera/webdriver.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/opera/webdriver.py 2025-01-16 02:26:08.613012508 +0800 +@@ -16,12 +16,12 @@ + # limitations under the License. + + import base64 +-import httplib ++import http.client + import os + from selenium.webdriver.common.desired_capabilities import DesiredCapabilities + from selenium.webdriver.remote.command import Command + from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver +-from service import Service ++from .service import Service + + class WebDriver(RemoteWebDriver): + """ +@@ -62,7 +62,7 @@ + """ + try: + RemoteWebDriver.quit(self) +- except httplib.BadStatusLine: ++ except http.client.BadStatusLine: + pass + finally: + self.service.stop() +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/phantomjs/service.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/phantomjs/service.py 2025-01-16 02:26:08.613012508 +0800 +@@ -56,7 +56,7 @@ + try: + self.process = subprocess.Popen(self.service_args, + stdout=self._log, stderr=self._log) +- except Exception, e: ++ except Exception as e: + raise WebDriverException("Unable to start phantomjs with ghostdriver.", e) + count = 0 + while not utils.is_connectable(self.port): +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/phantomjs/webdriver.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/phantomjs/webdriver.py 2025-01-16 02:26:08.613012508 +0800 +@@ -15,12 +15,12 @@ + # limitations under the License. + + import base64 +-import httplib ++import http.client + from selenium.webdriver.remote.command import Command + from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver + from selenium.webdriver.common.desired_capabilities import DesiredCapabilities + from selenium.common.exceptions import WebDriverException +-from service import Service ++from .service import Service + + class WebDriver(RemoteWebDriver): + """ +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/remote/errorhandler.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/remote/errorhandler.py 2025-01-16 02:26:08.613012508 +0800 +@@ -140,7 +140,7 @@ + zeroeth = value['stackTrace'][0] + except: + pass +- if zeroeth.has_key('methodName'): ++ if 'methodName' in zeroeth: + stacktrace = "Method %s threw an error in %s" % \ + (zeroeth['methodName'], + self._value_or_default(zeroeth, 'fileName', '[No file name]')) +@@ -149,4 +149,4 @@ + raise exception_class(message, screen, stacktrace) + + def _value_or_default(self, obj, key, default): +- return obj[key] if obj.has_key(key) else default ++ return obj[key] if key in obj else default +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/remote/remote_connection.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/remote/remote_connection.py 2025-01-16 02:26:08.613012508 +0800 +@@ -16,15 +16,15 @@ + import logging + import socket + import string +-import urllib2 +-import urlparse ++import urllib.request, urllib.error, urllib.parse ++import urllib.parse + +-from command import Command +-import utils ++from .command import Command ++from . import utils + + LOGGER = logging.getLogger(__name__) + +-class Request(urllib2.Request): ++class Request(urllib.request.Request): + """ + Extends the urllib2.Request to support all HTTP request types. + """ +@@ -42,7 +42,7 @@ + elif method != 'POST' and method != 'PUT': + data = None + self._method = method +- urllib2.Request.__init__(self, url, data=data) ++ urllib.request.Request.__init__(self, url, data=data) + + def get_method(self): + """ +@@ -92,7 +92,7 @@ + return self.url + + +-class HttpErrorHandler(urllib2.HTTPDefaultErrorHandler): ++class HttpErrorHandler(urllib.request.HTTPDefaultErrorHandler): + """ + A custom HTTP error handler. + +@@ -126,7 +126,7 @@ + + def __init__(self, remote_server_addr): + # Attempt to resolve the hostname and get an IP address. +- parsed_url = urlparse.urlparse(remote_server_addr) ++ parsed_url = urllib.parse.urlparse(remote_server_addr) + if parsed_url.hostname: + try: + netloc = socket.gethostbyname(parsed_url.hostname) +@@ -137,7 +137,7 @@ + if parsed_url.password: + auth += ':%s' % parsed_url.password + netloc = '%s@%s' % (auth, netloc) +- remote_server_addr = urlparse.urlunparse( ++ remote_server_addr = urllib.parse.urlunparse( + (parsed_url.scheme, netloc, parsed_url.path, + parsed_url.params, parsed_url.query, parsed_url.fragment)) + except socket.gaierror: +@@ -352,16 +352,16 @@ + """ + LOGGER.debug('%s %s %s' % (method, url, data)) + +- parsed_url = urlparse.urlparse(url) ++ parsed_url = urllib.parse.urlparse(url) + auth = None + password_manager = None + if parsed_url.username: + netloc = parsed_url.hostname + if parsed_url.port: + netloc += ":%s" % parsed_url.port +- cleaned_url = urlparse.urlunparse((parsed_url.scheme, netloc, parsed_url.path, ++ cleaned_url = urllib.parse.urlunparse((parsed_url.scheme, netloc, parsed_url.path, + parsed_url.params, parsed_url.query, parsed_url.fragment)) +- password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() ++ password_manager = urllib.request.HTTPPasswordMgrWithDefaultRealm() + password_manager.add_password(None, "%s://%s" % (parsed_url.scheme, netloc), parsed_url.username, parsed_url.password) + request = Request(cleaned_url, data=data, method=method) + else: +@@ -372,11 +372,11 @@ + request.add_header('Content-Type', 'application/json;charset=UTF-8') + + if password_manager: +- opener = urllib2.build_opener(urllib2.HTTPRedirectHandler(), ++ opener = urllib.request.build_opener(urllib.request.HTTPRedirectHandler(), + HttpErrorHandler(), +- urllib2.HTTPBasicAuthHandler(password_manager)) ++ urllib.request.HTTPBasicAuthHandler(password_manager)) + else: +- opener = urllib2.build_opener(urllib2.HTTPRedirectHandler(), ++ opener = urllib.request.build_opener(urllib.request.HTTPRedirectHandler(), + HttpErrorHandler()) + response = opener.open(request) + try: +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/remote/utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/remote/utils.py 2025-01-16 02:26:08.613012508 +0800 +@@ -107,6 +107,6 @@ + LOGGER.info("Unzipped file can be found at %s" % tempdir) + return tempdir + +- except IOError, err: ++ except IOError as err: + LOGGER.error("Error in extracting webdriver.xpi: %s" % err) + return None +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/remote/webdriver.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/remote/webdriver.py 2025-01-16 02:26:08.613012508 +0800 +@@ -16,10 +16,10 @@ + The WebDriver implementation. + """ + import base64 +-from command import Command +-from webelement import WebElement +-from remote_connection import RemoteConnection +-from errorhandler import ErrorHandler ++from .command import Command ++from .webelement import WebElement ++from .remote_connection import RemoteConnection ++from .errorhandler import ErrorHandler + from selenium.common.exceptions import WebDriverException + from selenium.common.exceptions import InvalidSelectorException + from selenium.webdriver.common.by import By +@@ -55,7 +55,7 @@ + if not isinstance(desired_capabilities, dict): + raise WebDriverException("Desired Capabilities must be a dictionary") + self.command_executor = command_executor +- if type(self.command_executor) is str or type(self.command_executor) is unicode: ++ if type(self.command_executor) is str or type(self.command_executor) is str: + self.command_executor = RemoteConnection(command_executor) + self.session_id = None + self.capabilities = {} +@@ -111,7 +111,7 @@ + def _wrap_value(self, value): + if isinstance(value, dict): + converted = {} +- for key, val in value.items(): ++ for key, val in list(value.items()): + converted[key] = self._wrap_value(val) + return converted + elif isinstance(value, WebElement): +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/remote/webelement.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/remote/webelement.py 2025-01-16 02:26:08.613012508 +0800 +@@ -17,11 +17,11 @@ + """WebElement implementation.""" + import os + import zipfile +-from StringIO import StringIO ++from io import StringIO + import base64 + + +-from command import Command ++from .command import Command + from selenium.common.exceptions import WebDriverException + from selenium.common.exceptions import InvalidSelectorException + from selenium.webdriver.common.by import By +@@ -66,7 +66,7 @@ + if resp['value'] is None: + attributeValue = None + else: +- attributeValue = unicode(resp['value']) ++ attributeValue = str(resp['value']) + if type(resp['value']) is bool: + attributeValue = attributeValue.lower() + +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/support/color.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/support/color.py 2025-01-16 02:26:08.613012508 +0800 +@@ -75,7 +75,7 @@ + return Color(*rgb) + elif m.match(HSL_PATTERN, str_) or m.match(HSLA_PATTERN, str_): + return Color._from_hsl(*m.groups) +- elif str_.upper() in Colors.keys(): ++ elif str_.upper() in list(Colors.keys()): + return Colors[str_.upper()] + else: + raise ValueError("Could not convert %s into color" % str_) +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/support/event_firing_webdriver.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/support/event_firing_webdriver.py 2025-01-16 02:26:08.613012508 +0800 +@@ -18,7 +18,7 @@ + from selenium.webdriver.common.by import By + from selenium.webdriver.remote.webdriver import WebDriver + from selenium.webdriver.remote.webelement import WebElement +-from abstract_event_listener import AbstractEventListener ++from .abstract_event_listener import AbstractEventListener + + + def _wrap_elements(result, ef_driver): +@@ -152,7 +152,7 @@ + getattr(self._listener, "before_%s" % l_call)(*l_args) + try: + result = getattr(self._driver, d_call)(*d_args) +- except Exception, e: ++ except Exception as e: + self._listener.on_exception(e, self._driver) + raise e + getattr(self._listener, "after_%s" % l_call)(*l_args) +@@ -174,7 +174,7 @@ + else: + try: + object.__setattr__(self._driver, item, value) +- except Exception, e: ++ except Exception as e: + self._listener.on_exception(e, self._driver) + raise e + +@@ -184,7 +184,7 @@ + try: + result = attrib(*args) + return _wrap_elements(result, self) +- except Exception, e: ++ except Exception as e: + self._listener.on_exception(e, self._driver) + raise e + +@@ -193,7 +193,7 @@ + attrib = getattr(self._driver, name) + if not callable(attrib): + return attrib +- except Exception, e: ++ except Exception as e: + self._listener.on_exception(e, self._driver) + raise e + return _wrap +@@ -287,7 +287,7 @@ + getattr(self._listener, "before_%s" % l_call)(*l_args) + try: + result = getattr(self._webelement, d_call)(*d_args) +- except Exception, e: ++ except Exception as e: + self._listener.on_exception(e, self._driver) + raise e + getattr(self._listener, "after_%s" % l_call)(*l_args) +@@ -299,7 +299,7 @@ + else: + try: + object.__setattr__(self._webelement, item, value) +- except Exception, e: ++ except Exception as e: + self._listener.on_exception(e, self._driver) + raise e + +@@ -309,7 +309,7 @@ + try: + result = attrib(*args) + return _wrap_elements(result, self._ef_driver) +- except Exception, e: ++ except Exception as e: + self._listener.on_exception(e, self._driver) + raise e + +@@ -318,7 +318,7 @@ + attrib = getattr(self._webelement, name) + if not callable(attrib): + return attrib +- except Exception, e: ++ except Exception as e: + self._listener.on_exception(e, self._driver) + raise e + return _wrap +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/support/events.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/support/events.py 2025-01-16 02:26:08.613012508 +0800 +@@ -14,5 +14,5 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from abstract_event_listener import AbstractEventListener +-from event_firing_webdriver import EventFiringWebDriver ++from .abstract_event_listener import AbstractEventListener ++from .event_firing_webdriver import EventFiringWebDriver +--- a/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/support/ui.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webdriver/pylib/selenium/webdriver/support/ui.py 2025-01-16 02:26:08.613012508 +0800 +@@ -14,6 +14,6 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + +-from select import Select +-from wait import WebDriverWait ++from .select import Select ++from .wait import WebDriverWait + +--- a/src/3rdparty/chromium/third_party/webrtc/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/PRESUBMIT.py 2025-01-16 02:26:08.616262452 +0800 +@@ -441,7 +441,7 @@ + 'Mixed sources: \n' + '%s\n' + 'Violating GN files:\n%s\n' % (json.dumps(errors, indent=2), +- '\n'.join(errors.keys())))] ++ '\n'.join(list(errors.keys()))))] + return [] + + +@@ -827,7 +827,7 @@ + """Returns the license header regexp.""" + # Accept any year number from 2003 to the current year + current_year = int(input_api.time.strftime('%Y')) +- allowed_years = (str(s) for s in reversed(xrange(2003, current_year + 1))) ++ allowed_years = (str(s) for s in reversed(range(2003, current_year + 1))) + years_re = '(' + '|'.join(allowed_years) + ')' + license_header = ( + r'.*? Copyright( \(c\))? %(year)s The WebRTC [Pp]roject [Aa]uthors\. ' +@@ -1154,7 +1154,7 @@ + if rule.startswith('+') or rule.startswith('!') + ]) + for _, rules in parsed_deps.get('specific_include_rules', +- {}).iteritems(): ++ {}).items(): + add_rules.update([ + rule[1:] for rule in rules + if rule.startswith('+') or rule.startswith('!') +@@ -1181,7 +1181,7 @@ + global_scope = { + 'Var': VarImpl(local_scope).Lookup, + } +- exec contents in global_scope, local_scope ++ exec(contents, global_scope, local_scope) + return local_scope + + +--- a/src/3rdparty/chromium/third_party/webrtc/presubmit_test_mocks.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/presubmit_test_mocks.py 2025-01-16 02:26:08.616262452 +0800 +@@ -48,7 +48,7 @@ + with open(filename, mode) as f: + return f.read() + # Otherwise, file is not in our mock API. +- raise IOError, "No such file or directory: '%s'" % filename ++ raise IOError("No such file or directory: '%s'" % filename) + + + class MockOutputApi(object): +--- a/src/3rdparty/chromium/third_party/webrtc/audio/test/low_bandwidth_audio_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/audio/test/low_bandwidth_audio_test.py 2025-01-16 02:26:08.616262452 +0800 +@@ -306,13 +306,13 @@ + + analyzer_results = analyzer.func(analyzer.executable, + reference_file, degraded_file) +- for metric, (value, units) in analyzer_results.items(): ++ for metric, (value, units) in list(analyzer_results.items()): + hist = histograms.CreateHistogram(metric, units, [value]) + user_story = generic_set.GenericSet([test_name]) + hist.diagnostics[reserved_infos.STORIES.name] = user_story + + # Output human readable results. +- print 'RESULT %s: %s= %s %s' % (metric, test_name, value, units) ++ print('RESULT %s: %s= %s %s' % (metric, test_name, value, units)) + + if args.remove: + os.remove(reference_file) +--- a/src/3rdparty/chromium/third_party/webrtc/examples/androidapp/start_loopback_stubbed_camera_saved_video_out.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/examples/androidapp/start_loopback_stubbed_camera_saved_video_out.py 2025-01-16 02:26:08.616262452 +0800 +@@ -55,7 +55,7 @@ + + (options, args) = parser.parse_args() + +- print (options, args) ++ print((options, args)) + + devname = options.devname + +@@ -97,18 +97,18 @@ + 'org.appspot.apprtc.SAVE_REMOTE_VIDEO_TO_FILE_WIDTH': videoout_width, + 'org.appspot.apprtc.SAVE_REMOTE_VIDEO_TO_FILE_HEIGHT': videoout_height}) + +- print extras ++ print(extras) + + device.startActivity(data='https://appr.tc', + action='android.intent.action.VIEW', + component='org.appspot.apprtc/.ConnectActivity', extras=extras) + +- print 'Running a call for %d seconds' % call_length +- for _ in xrange(call_length): ++ print('Running a call for %d seconds' % call_length) ++ for _ in range(call_length): + sys.stdout.write('.') + sys.stdout.flush() + time.sleep(1) +- print '\nEnding call.' ++ print('\nEnding call.') + + # Press back to end the call. Will end on both sides. + device.press('KEYCODE_BACK', MonkeyDevice.DOWN_AND_UP) +--- a/src/3rdparty/chromium/third_party/webrtc/modules/audio_coding/audio_network_adaptor/parse_ana_dump.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/modules/audio_coding/audio_network_adaptor/parse_ana_dump.py 2025-01-16 02:26:08.616262452 +0800 +@@ -38,7 +38,7 @@ + event = debug_dump_pb2.Event() + event.ParseFromString(file_to_parse.read(message_size)) + except IOError: +- print 'Invalid message in file' ++ print('Invalid message in file') + return None + return event + +@@ -111,7 +111,7 @@ + + options = parser.parse_args()[0] + if options.dump_file_to_parse is None: +- print "No dump file to parse is set.\n" ++ print("No dump file to parse is set.\n") + parser.print_help() + exit() + (metrics, decisions) = ParseAnaDump(options.dump_file_to_parse) +@@ -119,7 +119,7 @@ + decision_keys = options.decision_keys + plot_count = len(metric_keys) + len(decision_keys) + if plot_count == 0: +- print "You have to set at least one metric or decision to plot.\n" ++ print("You have to set at least one metric or decision to plot.\n") + parser.print_help() + exit() + plots = [] +--- a/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment.py 2025-01-16 02:26:08.616262452 +0800 +@@ -37,9 +37,9 @@ + echo_path_simulation.EchoPathSimulator.REGISTERED_CLASSES) + _TEST_DATA_GENERATOR_CLASSES = ( + test_data_generation.TestDataGenerator.REGISTERED_CLASSES) +-_TEST_DATA_GENERATORS_NAMES = _TEST_DATA_GENERATOR_CLASSES.keys() ++_TEST_DATA_GENERATORS_NAMES = list(_TEST_DATA_GENERATOR_CLASSES.keys()) + _EVAL_SCORE_WORKER_CLASSES = eval_scores.EvaluationScore.REGISTERED_CLASSES +-_EVAL_SCORE_WORKER_NAMES = _EVAL_SCORE_WORKER_CLASSES.keys() ++_EVAL_SCORE_WORKER_NAMES = list(_EVAL_SCORE_WORKER_CLASSES.keys()) + + _DEFAULT_CONFIG_FILE = 'apm_configs/default.json' + +--- a/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/apm_quality_assessment_optimize.py 2025-01-16 02:26:08.616262452 +0800 +@@ -11,7 +11,7 @@ + parsing the output generated apm_quality_assessment.py. + """ + +-from __future__ import division ++ + + import collections + import logging +--- a/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/annotations.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/annotations.py 2025-01-16 02:26:08.616262452 +0800 +@@ -9,7 +9,7 @@ + """Extraction of annotations from audio files. + """ + +-from __future__ import division ++ + import logging + import os + import shutil +@@ -98,7 +98,7 @@ + + assert len(self._external_vads) == len(external_vads), ( + 'The external VAD names must be unique.') +- for vad in external_vads.values(): ++ for vad in list(external_vads.values()): + if not isinstance(vad, external_vad.ExternalVad): + raise exceptions.InitializationException( + 'Invalid vad type: ' + str(type(vad))) +--- a/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/annotations_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/annotations_unittest.py 2025-01-16 02:26:08.616262452 +0800 +@@ -9,7 +9,7 @@ + """Unit tests for the annotations module. + """ + +-from __future__ import division ++ + import logging + import os + import shutil +--- a/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/eval_scores.py 2025-01-16 02:26:08.616262452 +0800 +@@ -9,7 +9,7 @@ + """Evaluation score abstract class and implementations. + """ + +-from __future__ import division ++ + import logging + import os + import re +--- a/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/export.py 2025-01-16 02:26:08.616262452 +0800 +@@ -316,7 +316,7 @@ + '').format(score_tuple.score, anchor)] + + # Add all the available file paths as hidden data. +- for field_name in score_tuple.keys(): ++ for field_name in list(score_tuple.keys()): + if field_name.endswith('_filepath'): + html.append(''.format( + field_name, score_tuple[field_name])) +--- a/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/external_vad.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/external_vad.py 2025-01-16 02:26:08.616262452 +0800 +@@ -6,7 +6,7 @@ + # in the file PATENTS. All contributing project authors may + # be found in the AUTHORS file in the root of the source tree. + +-from __future__ import division ++ + + import logging + import os +--- a/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation.py 2025-01-16 02:26:08.616262452 +0800 +@@ -82,7 +82,7 @@ + + @property + def config_names(self): +- return self._noisy_signal_filepaths.keys() ++ return list(self._noisy_signal_filepaths.keys()) + + @property + def noisy_signal_filepaths(self): +--- a/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py 2025-01-16 02:26:08.616262452 +0800 +@@ -111,9 +111,9 @@ + def GetNoiseReferenceFilePaths(identity_generator): + noisy_signal_filepaths = identity_generator.noisy_signal_filepaths + reference_signal_filepaths = identity_generator.reference_signal_filepaths +- assert noisy_signal_filepaths.keys() == reference_signal_filepaths.keys() +- assert len(noisy_signal_filepaths.keys()) == 1 +- key = noisy_signal_filepaths.keys()[0] ++ assert list(noisy_signal_filepaths.keys()) == list(reference_signal_filepaths.keys()) ++ assert len(list(noisy_signal_filepaths.keys())) == 1 ++ key = list(noisy_signal_filepaths.keys())[0] + return noisy_signal_filepaths[key], reference_signal_filepaths[key] + + # Test the |copy_with_identity| flag. +--- a/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/codecs/test/plot_webrtc_test_logs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/codecs/test/plot_webrtc_test_logs.py 2025-01-16 02:26:08.616262452 +0800 +@@ -243,7 +243,7 @@ + try: + value = float(value) + except ValueError: +- print "Not a float, skipped %s" % value ++ print("Not a float, skipped %s" % value) + return False, -1 + + return True, value +@@ -269,13 +269,13 @@ + for key in sorted(metrics): + data = metrics[key] + if y_metric not in data: +- print "Failed to find metric: %s" % y_metric ++ print("Failed to find metric: %s" % y_metric) + continue + + y = numpy.array(data[y_metric]) + x = numpy.array(data[x_metric]) + if len(y) != len(x): +- print "Length mismatch for %s, %s" % (y, x) ++ print("Length mismatch for %s, %s" % (y, x)) + continue + + label = y_metric + ' - ' + str(key) +@@ -372,7 +372,7 @@ + + + def GetIdx(text_list): +- return int(raw_input(text_list)) - 1 ++ return int(input(text_list)) - 1 + + + def main(): +--- a/src/3rdparty/chromium/third_party/webrtc/rtc_tools/compare_videos.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/rtc_tools/compare_videos.py 2025-01-16 02:26:08.616262452 +0800 +@@ -7,9 +7,9 @@ + # in the file PATENTS. All contributing project authors may + # be found in the AUTHORS file in the root of the source tree. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + import json + import optparse + import os +--- a/src/3rdparty/chromium/third_party/webrtc/rtc_tools/metrics_plotter.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/rtc_tools/metrics_plotter.py 2025-01-16 02:26:08.616262452 +0800 +@@ -57,7 +57,7 @@ + line = line.replace(LINE_PREFIX, '') + metrics.append(json.loads(line)) + else: +- print line ++ print(line) + + for metric in metrics: + if len(metrics_to_plot) > 0 and metric[GRAPH_NAME] not in metrics_to_plot: +--- a/src/3rdparty/chromium/third_party/webrtc/rtc_tools/py_event_log_analyzer/misc.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/rtc_tools/py_event_log_analyzer/misc.py 2025-01-16 02:26:08.616262452 +0800 +@@ -9,7 +9,7 @@ + """Utility functions for calculating statistics. + """ + +-from __future__ import division ++ + import collections + import sys + +--- a/src/3rdparty/chromium/third_party/webrtc/rtc_tools/py_event_log_analyzer/misc_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/rtc_tools/py_event_log_analyzer/misc_test.py 2025-01-16 02:26:08.616262452 +0800 +@@ -14,7 +14,7 @@ + python3 misc_test.py + """ + +-from __future__ import division ++ + import random + import unittest + +--- a/src/3rdparty/chromium/third_party/webrtc/rtc_tools/py_event_log_analyzer/pb_parse.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/rtc_tools/py_event_log_analyzer/pb_parse.py 2025-01-16 02:26:08.616262452 +0800 +@@ -8,7 +8,7 @@ + + """Parses protobuf RTC dumps.""" + +-from __future__ import division ++ + import struct + import pyproto.logging.rtc_event_log.rtc_event_log_pb2 as rtc_pb + +--- a/src/3rdparty/chromium/third_party/webrtc/rtc_tools/py_event_log_analyzer/rtp_analyzer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/rtc_tools/py_event_log_analyzer/rtp_analyzer.py 2025-01-16 02:26:08.616262452 +0800 +@@ -8,8 +8,8 @@ + + """Displays statistics and plots graphs from RTC protobuf dump.""" + +-from __future__ import division +-from __future__ import print_function ++ ++ + + import collections + import optparse +@@ -90,7 +90,7 @@ + """Queries user for SSRC.""" + + if len(self.ssrc_frequencies) == 1: +- chosen_ssrc = self.ssrc_frequencies.keys()[0] ++ chosen_ssrc = list(self.ssrc_frequencies.keys())[0] + self.PrintSsrcInfo("", chosen_ssrc) + return chosen_ssrc + +--- a/src/3rdparty/chromium/third_party/webrtc/rtc_tools/py_event_log_analyzer/rtp_analyzer_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/rtc_tools/py_event_log_analyzer/rtp_analyzer_test.py 2025-01-16 02:26:08.616262452 +0800 +@@ -56,6 +56,6 @@ + + if __name__ == "__main__": + if MISSING_NUMPY: +- print "Missing numpy, skipping test." ++ print("Missing numpy, skipping test.") + else: + unittest.main() +--- a/src/3rdparty/chromium/third_party/webrtc/rtc_tools/testing/utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/rtc_tools/testing/utils.py 2025-01-16 02:26:08.616262452 +0800 +@@ -9,9 +9,9 @@ + + """Utilities for all our deps-management stuff.""" + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function ++ ++ ++ + + import os + import shutil +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/PRESUBMIT.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/PRESUBMIT.py 2025-01-16 02:26:08.616262452 +0800 +@@ -11,7 +11,7 @@ + """Returns the license header regexp.""" + # Accept any year number from 2003 to the current year + current_year = int(input_api.time.strftime('%Y')) +- allowed_years = (str(s) for s in reversed(xrange(2003, current_year + 1))) ++ allowed_years = (str(s) for s in reversed(range(2003, current_year + 1))) + years_re = '(' + '|'.join(allowed_years) + ')' + license_header = ( + r'.*? Copyright( \(c\))? %(year)s The WebRTC [Pp]roject [Aa]uthors\. ' +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/clang_tidy.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/clang_tidy.py 2025-01-16 02:26:08.616262452 +0800 +@@ -22,7 +22,7 @@ + import sys + import tempfile + #pylint: disable=relative-import +-from presubmit_checks_lib.build_helpers import GetClangTidyPath, \ ++from .presubmit_checks_lib.build_helpers import GetClangTidyPath, \ + GetCompilationCommand + + +@@ -56,7 +56,7 @@ + command[0:1] = [GetClangTidyPath(), + CHECKER_OPTION, + rel_path] + args + ['--'] # Separator for clang flags. +- print "Running: %s" % ' '.join(command) ++ print("Running: %s" % ' '.join(command)) + # Run from build dir so that relative paths are correct. + p = subprocess.Popen(command, cwd=out_dir, + stdout=sys.stdout, stderr=sys.stderr) +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/download_tools.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/download_tools.py 2025-01-16 02:26:08.616262452 +0800 +@@ -45,14 +45,14 @@ + '--recursive', + path, + ] +- print 'Downloading precompiled tools...' ++ print('Downloading precompiled tools...') + + # Perform download similar to how gclient hooks execute. + try: + gclient_utils.CheckCallAndFilter( + cmd, cwd=SRC_DIR, always_show_header=True) + except (gclient_utils.Error, subprocess2.CalledProcessError) as e: +- print 'Error: %s' % str(e) ++ print('Error: %s' % str(e)) + return 2 + return 0 + +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/ensure_webcam_is_running.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/ensure_webcam_is_running.py 2025-01-16 02:26:08.616262452 +0800 +@@ -45,15 +45,15 @@ + elif sys.platform.startswith('linux'): + # TODO(bugs.webrtc.org/9636): Currently a no-op on Linux: sw webcams no + # longer in use. +- print 'Virtual webcam: no-op on Linux' ++ print('Virtual webcam: no-op on Linux') + return True + else: + raise Exception('Unsupported platform: %s' % sys.platform) + for p in psutil.process_iter(): + try: + if process_name == p.name: +- print 'Found a running virtual webcam (%s with PID %s)' % (p.name, +- p.pid) ++ print('Found a running virtual webcam (%s with PID %s)' % (p.name, ++ p.pid)) + return True + except psutil.AccessDenied: + pass # This is normal if we query sys processes, etc. +@@ -64,17 +64,17 @@ + try: + if sys.platform == 'win32': + subprocess.check_call(WEBCAM_WIN) +- print 'Successfully launched virtual webcam.' ++ print('Successfully launched virtual webcam.') + elif sys.platform.startswith('darwin'): + subprocess.check_call(WEBCAM_MAC) +- print 'Successfully launched virtual webcam.' ++ print('Successfully launched virtual webcam.') + elif sys.platform.startswith('linux'): + # TODO(bugs.webrtc.org/9636): Currently a no-op on Linux: sw webcams no + # longer in use. +- print 'Not implemented on Linux' ++ print('Not implemented on Linux') + + except Exception as e: +- print 'Failed to launch virtual webcam: %s' % e ++ print('Failed to launch virtual webcam: %s' % e) + return False + + return True +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/get_landmines.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/get_landmines.py 2025-01-16 02:26:08.616262452 +0800 +@@ -34,29 +34,29 @@ + # dependency problems, fix the dependency problems instead of adding a + # landmine. + # See the Chromium version in src/build/get_landmines.py for usage examples. +- print 'Clobber to remove out/{Debug,Release}/args.gn (webrtc:5070)' ++ print('Clobber to remove out/{Debug,Release}/args.gn (webrtc:5070)') + if host_os() == 'win': +- print 'Clobber to resolve some issues with corrupt .pdb files on bots.' +- print 'Clobber due to corrupt .pdb files (after #14623)' +- print 'Clobber due to Win 64-bit Debug linking error (crbug.com/668961)' ++ print('Clobber to resolve some issues with corrupt .pdb files on bots.') ++ print('Clobber due to corrupt .pdb files (after #14623)') ++ print('Clobber due to Win 64-bit Debug linking error (crbug.com/668961)') + print ('Clobber due to Win Clang Debug linking errors in ' + 'https://codereview.webrtc.org/2786603002') + print ('Clobber due to Win Debug linking errors in ' + 'https://codereview.webrtc.org/2832063003/') +- print 'Clobber win x86 bots (issues with isolated files).' ++ print('Clobber win x86 bots (issues with isolated files).') + if host_os() == 'mac': +- print 'Clobber due to iOS compile errors (crbug.com/694721)' +- print 'Clobber to unblock https://codereview.webrtc.org/2709573003' ++ print('Clobber due to iOS compile errors (crbug.com/694721)') ++ print('Clobber to unblock https://codereview.webrtc.org/2709573003') + print ('Clobber to fix https://codereview.webrtc.org/2709573003 after ' + 'landing') + print ('Clobber to fix https://codereview.webrtc.org/2767383005 before' + 'landing (changing rtc_executable -> rtc_test on iOS)') + print ('Clobber to fix https://codereview.webrtc.org/2767383005 before' + 'landing (changing rtc_executable -> rtc_test on iOS)') +- print 'Another landmine for low_bandwidth_audio_test (webrtc:7430)' +- print 'Clobber to change neteq_rtpplay type to executable' +- print 'Clobber to remove .xctest files.' +- print 'Clobber to remove .xctest files (take 2).' ++ print('Another landmine for low_bandwidth_audio_test (webrtc:7430)') ++ print('Clobber to change neteq_rtpplay type to executable') ++ print('Clobber to remove .xctest files.') ++ print('Clobber to remove .xctest files (take 2).') + + + def main(): +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/gn_check_autofix.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/gn_check_autofix.py 2025-01-16 02:26:08.616262452 +0800 +@@ -53,7 +53,7 @@ + + + def Run(cmd): +- print 'Running:', ' '.join(cmd) ++ print('Running:', ' '.join(cmd)) + sub = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return sub.communicate() + +@@ -151,7 +151,7 @@ + errors = mb_output[0].split('ERROR')[1:] + + if mb_output[1]: +- print mb_output[1] ++ print(mb_output[1]) + return 1 + + for error in errors: +@@ -160,7 +160,7 @@ + if target_msg not in error: + target_msg = 'It is not in any dependency of' + if target_msg not in error: +- print '\n'.join(error) ++ print('\n'.join(error)) + continue + index = error.index(target_msg) + 1 + path, target = error[index].strip().split(':') +@@ -177,10 +177,10 @@ + deleted_file = '"' + os.path.basename(error[index+2].strip()) + '",' + deleted_sources.add(deleted_file) + else: +- print '\n'.join(error) ++ print('\n'.join(error)) + continue + +- for path, missing_deps in errors_by_file.items(): ++ for path, missing_deps in list(errors_by_file.items()): + FixErrors(path, missing_deps, deleted_sources) + + return 0 +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/gtest-parallel-wrapper.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/gtest-parallel-wrapper.py 2025-01-16 02:26:08.616262452 +0800 +@@ -213,7 +213,7 @@ + if test_artifacts_dir and not os.path.isdir(test_artifacts_dir): + os.makedirs(test_artifacts_dir) + +- print 'gtest-parallel-wrapper: Executing command %s' % ' '.join(command) ++ print('gtest-parallel-wrapper: Executing command %s' % ' '.join(command)) + sys.stdout.flush() + + exit_code = subprocess.call(command, env=test_env, cwd=os.getcwd()) +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/android/build_aar.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/android/build_aar.py 2025-01-16 02:26:08.616262452 +0800 +@@ -159,7 +159,7 @@ + if arm_version: + gn_args['arm_version'] = arm_version + gn_args_str = '--args=' + ' '.join([ +- k + '=' + _EncodeForGN(v) for k, v in gn_args.items()] + extra_gn_args) ++ k + '=' + _EncodeForGN(v) for k, v in list(gn_args.items())] + extra_gn_args) + + gn_args_list = ['gen', output_directory, gn_args_str] + gn_args_list.extend(extra_gn_switches) +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/android/release_aar.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/android/release_aar.py 2025-01-16 02:26:08.616262452 +0800 +@@ -109,7 +109,7 @@ + with open(filename) as fh: + file_data = fh.read() + +- for attempt in xrange(UPLOAD_TRIES): ++ for attempt in range(UPLOAD_TRIES): + try: + response = requests.put(url, data=file_data, auth=(user, password), + timeout=API_TIMEOUT_SECONDS) +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/autoroller/roll_deps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/autoroller/roll_deps.py 2025-01-16 02:26:08.616262452 +0800 +@@ -17,7 +17,7 @@ + import re + import subprocess + import sys +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + def FindSrcDirPath(): + """Returns the abs path to the src/ dir of the project.""" +@@ -145,7 +145,7 @@ + logging.debug('CMD: %s CWD: %s', ' '.join(command), working_dir) + env = os.environ.copy() + if extra_env: +- assert all(isinstance(value, str) for value in extra_env.values()) ++ assert all(isinstance(value, str) for value in list(extra_env.values())) + logging.debug('extra env: %s', extra_env) + env.update(extra_env) + p = subprocess.Popen(command, +@@ -205,7 +205,7 @@ + + def ReadUrlContent(url): + """Connect to a remote host and read the contents. Returns a list of lines.""" +- conn = urllib2.urlopen(url) ++ conn = urllib.request.urlopen(url) + try: + return conn.readlines() + except IOError as e: +@@ -228,7 +228,7 @@ + A list of DepsEntry objects. + """ + result = [] +- for path, depsentry in depsentry_dict.iteritems(): ++ for path, depsentry in depsentry_dict.items(): + if path == dir_path: + result.append(depsentry) + else: +@@ -244,7 +244,7 @@ + result = {} + + def AddDepsEntries(deps_subdict): +- for path, dep in deps_subdict.iteritems(): ++ for path, dep in deps_subdict.items(): + if path in result: + continue + if not isinstance(dep, dict): +@@ -370,7 +370,7 @@ + result = [] + webrtc_entries = BuildDepsentryDict(webrtc_deps) + new_cr_entries = BuildDepsentryDict(new_cr_deps) +- for path, webrtc_deps_entry in webrtc_entries.iteritems(): ++ for path, webrtc_deps_entry in webrtc_entries.items(): + if path in DONT_AUTOROLL_THESE: + continue + cr_deps_entry = new_cr_entries.get(path) +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/autoroller/unittests/roll_deps_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/autoroller/unittests/roll_deps_test.py 2025-01-16 02:26:08.616262452 +0800 +@@ -100,7 +100,7 @@ + def testVarLookup(self): + local_scope = {'foo': 'wrong', 'vars': {'foo': 'bar'}} + lookup = roll_deps.VarLookup(local_scope) +- self.assertEquals(lookup('foo'), 'bar') ++ self.assertEqual(lookup('foo'), 'bar') + + def testUpdateDepsFile(self): + new_rev = 'aaaaabbbbbcccccdddddeeeeefffff0000011111' +@@ -167,24 +167,24 @@ + vars_dict = local_scope['vars'] + + def AssertVar(variable_name): +- self.assertEquals(vars_dict[variable_name], TEST_DATA_VARS[variable_name]) ++ self.assertEqual(vars_dict[variable_name], TEST_DATA_VARS[variable_name]) + AssertVar('chromium_git') + AssertVar('chromium_revision') +- self.assertEquals(len(local_scope['deps']), 3) +- self.assertEquals(len(local_scope['deps_os']), 1) ++ self.assertEqual(len(local_scope['deps']), 3) ++ self.assertEqual(len(local_scope['deps_os']), 1) + + def testGetMatchingDepsEntriesReturnsPathInSimpleCase(self): + entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/testing/gtest') +- self.assertEquals(len(entries), 1) +- self.assertEquals(entries[0], DEPS_ENTRIES['src/testing/gtest']) ++ self.assertEqual(len(entries), 1) ++ self.assertEqual(entries[0], DEPS_ENTRIES['src/testing/gtest']) + + def testGetMatchingDepsEntriesHandlesSimilarStartingPaths(self): + entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/testing') +- self.assertEquals(len(entries), 2) ++ self.assertEqual(len(entries), 2) + + def testGetMatchingDepsEntriesHandlesTwoPathsWithIdenticalFirstParts(self): + entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/build') +- self.assertEquals(len(entries), 1) ++ self.assertEqual(len(entries), 1) + + + def testCalculateChangedDeps(self): +@@ -196,52 +196,52 @@ + BUILD_NEW_REV) + changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) + +- self.assertEquals(len(changed_deps), 3) +- self.assertEquals(changed_deps[0].path, 'src/build') +- self.assertEquals(changed_deps[0].current_rev, BUILD_OLD_REV) +- self.assertEquals(changed_deps[0].new_rev, BUILD_NEW_REV) +- +- self.assertEquals(changed_deps[1].path, 'src/third_party/depot_tools') +- self.assertEquals(changed_deps[1].current_rev, DEPOTTOOLS_OLD_REV) +- self.assertEquals(changed_deps[1].new_rev, DEPOTTOOLS_NEW_REV) +- +- self.assertEquals(changed_deps[2].path, 'src/third_party/xstream') +- self.assertEquals(changed_deps[2].package, 'chromium/third_party/xstream') +- self.assertEquals(changed_deps[2].current_version, 'version:1.4.8-cr0') +- self.assertEquals(changed_deps[2].new_version, 'version:1.10.0-cr0') ++ self.assertEqual(len(changed_deps), 3) ++ self.assertEqual(changed_deps[0].path, 'src/build') ++ self.assertEqual(changed_deps[0].current_rev, BUILD_OLD_REV) ++ self.assertEqual(changed_deps[0].new_rev, BUILD_NEW_REV) ++ ++ self.assertEqual(changed_deps[1].path, 'src/third_party/depot_tools') ++ self.assertEqual(changed_deps[1].current_rev, DEPOTTOOLS_OLD_REV) ++ self.assertEqual(changed_deps[1].new_rev, DEPOTTOOLS_NEW_REV) ++ ++ self.assertEqual(changed_deps[2].path, 'src/third_party/xstream') ++ self.assertEqual(changed_deps[2].package, 'chromium/third_party/xstream') ++ self.assertEqual(changed_deps[2].current_version, 'version:1.4.8-cr0') ++ self.assertEqual(changed_deps[2].new_version, 'version:1.10.0-cr0') + + def testWithDistinctDeps(self): + """Check CalculateChangedDeps still works when deps are added/removed. """ + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) +- self.assertEquals(len(changed_deps), 1) +- self.assertEquals( ++ self.assertEqual(len(changed_deps), 1) ++ self.assertEqual( + changed_deps[0].path, + 'src/third_party/android_deps/libs/android_arch_core_common') +- self.assertEquals( ++ self.assertEqual( + changed_deps[0].package, + 'chromium/third_party/android_deps/libs/android_arch_core_common') +- self.assertEquals(changed_deps[0].current_version, 'version:0.9.0') +- self.assertEquals(changed_deps[0].new_version, 'version:1.0.0-cr0') ++ self.assertEqual(changed_deps[0].current_version, 'version:0.9.0') ++ self.assertEqual(changed_deps[0].new_version, 'version:1.0.0-cr0') + + def testFindAddedDeps(self): + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + added_android_paths, other_paths = FindAddedDeps(webrtc_deps, new_cr_deps) +- self.assertEquals( ++ self.assertEqual( + added_android_paths, + ['src/third_party/android_deps/libs/android_arch_lifecycle_common']) +- self.assertEquals(other_paths, []) ++ self.assertEqual(other_paths, []) + + def testFindRemovedDeps(self): + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + removed_android_paths, other_paths = FindRemovedDeps(webrtc_deps, + new_cr_deps) +- self.assertEquals(removed_android_paths, ++ self.assertEqual(removed_android_paths, + ['src/third_party/android_deps/libs/android_arch_lifecycle_runtime']) +- self.assertEquals(other_paths, []) ++ self.assertEqual(other_paths, []) + + def testMissingDepsIsDetected(self): + """Check an error is reported when deps cannot be automatically removed.""" +@@ -251,7 +251,7 @@ + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + _, other_paths = FindRemovedDeps(webrtc_deps, new_cr_deps) +- self.assertEquals(other_paths, ['src/third_party/xstream', ++ self.assertEqual(other_paths, ['src/third_party/xstream', + 'src/third_party/depot_tools']) + + def testExpectedDepsIsNotReportedMissing(self): +@@ -319,13 +319,13 @@ + + class TestChooseCQMode(unittest.TestCase): + def testSkip(self): +- self.assertEquals(ChooseCQMode(True, 99, 500000, 500100), 0) ++ self.assertEqual(ChooseCQMode(True, 99, 500000, 500100), 0) + + def testDryRun(self): +- self.assertEquals(ChooseCQMode(False, 101, 500000, 500100), 1) ++ self.assertEqual(ChooseCQMode(False, 101, 500000, 500100), 1) + + def testSubmit(self): +- self.assertEquals(ChooseCQMode(False, 100, 500000, 500100), 2) ++ self.assertEqual(ChooseCQMode(False, 100, 500000, 500100), 2) + + + def _SetupGitLsRemoteCall(cmd_fake, url, revision): +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/coverage/generate_coverage_command.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/coverage/generate_coverage_command.py 2025-01-16 02:26:08.616262452 +0800 +@@ -48,7 +48,7 @@ + modules_unittests = 'out/coverage/modules_unittests' + cmd[cmd.index('-c \'%s\'' % modules_unittests)] = WithXvfb(modules_unittests) + +- print ' '.join(cmd) ++ print(' '.join(cmd)) + return 0 + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/coverage/generate_ios_coverage_command.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/coverage/generate_ios_coverage_command.py 2025-01-16 02:26:08.616262452 +0800 +@@ -109,11 +109,11 @@ + [FormatIossimTest(t, is_xctest=False) for t in TESTS] + ) + +- print 'To get code coverage using iOS simulator just run following commands:' +- print '' +- print ' '.join(gn_cmd) +- print '' +- print ' '.join(coverage_cmd) ++ print('To get code coverage using iOS simulator just run following commands:') ++ print('') ++ print(' '.join(gn_cmd)) ++ print('') ++ print(' '.join(coverage_cmd)) + return 0 + + +@@ -129,44 +129,44 @@ + ['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\''] + ) + +- print 'Computing code coverage for real iOS device is a little bit tedious.' +- print '' +- print 'You will need:' +- print '' +- print '1. Generate xcode project and open it with Xcode 10+:' +- print ' gn gen %s --ide=xcode --args=\'%s\'' % (DIRECTORY, gn_args_string) +- print ' open %s/all.xcworkspace' % DIRECTORY +- print '' +- print '2. Execute these Run targets manually with Xcode Run button and ' +- print 'manually save generated coverage.profraw file to %s:' % DIRECTORY +- print '\n'.join('- %s' % t for t in TESTS) +- print '' +- print '3. Execute these Test targets manually with Xcode Test button and ' +- print 'manually save generated coverage.profraw file to %s:' % DIRECTORY +- print '\n'.join('- %s' % t for t in XC_TESTS) +- print '' +- print '4. Merge *.profraw files to *.profdata using llvm-profdata tool:' +- print (' build/mac_files/Xcode.app/Contents/Developer/Toolchains/' + ++ print('Computing code coverage for real iOS device is a little bit tedious.') ++ print('') ++ print('You will need:') ++ print('') ++ print('1. Generate xcode project and open it with Xcode 10+:') ++ print(' gn gen %s --ide=xcode --args=\'%s\'' % (DIRECTORY, gn_args_string)) ++ print(' open %s/all.xcworkspace' % DIRECTORY) ++ print('') ++ print('2. Execute these Run targets manually with Xcode Run button and ') ++ print('manually save generated coverage.profraw file to %s:' % DIRECTORY) ++ print('\n'.join('- %s' % t for t in TESTS)) ++ print('') ++ print('3. Execute these Test targets manually with Xcode Test button and ') ++ print('manually save generated coverage.profraw file to %s:' % DIRECTORY) ++ print('\n'.join('- %s' % t for t in XC_TESTS)) ++ print('') ++ print('4. Merge *.profraw files to *.profdata using llvm-profdata tool:') ++ print((' build/mac_files/Xcode.app/Contents/Developer/Toolchains/' + + 'XcodeDefault.xctoolchain/usr/bin/llvm-profdata merge ' + + '-o %s/merged.profdata ' % DIRECTORY + +- '-sparse=true %s/*.profraw' % DIRECTORY) +- print '' +- print '5. Generate coverage report:' +- print ' ' + ' '.join(coverage_report_cmd) ++ '-sparse=true %s/*.profraw' % DIRECTORY)) ++ print('') ++ print('5. Generate coverage report:') ++ print(' ' + ' '.join(coverage_report_cmd)) + return 0 + + + def Main(): + if len(sys.argv) < 2: +- print 'Please specify type of coverage:' +- print ' %s simulator' % sys.argv[0] +- print ' %s device' % sys.argv[0] ++ print('Please specify type of coverage:') ++ print(' %s simulator' % sys.argv[0]) ++ print(' %s device' % sys.argv[0]) + elif sys.argv[1] == 'simulator': + GenerateIOSSimulatorCommand() + elif sys.argv[1] == 'device': + GenerateIOSDeviceCommand() + else: +- print 'Unsupported type of coverage' ++ print('Unsupported type of coverage') + + return 0 + +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/cpu/cpu_mon.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/cpu/cpu_mon.py 2025-01-16 02:26:08.616262452 +0800 +@@ -22,8 +22,8 @@ + self.samples = [] + + def Capture(self, sample_count): +- print ('Capturing %d CPU samples for %s...' % +- ((sample_count - len(self.samples)), self.label)) ++ print(('Capturing %d CPU samples for %s...' % ++ ((sample_count - len(self.samples)), self.label))) + while len(self.samples) < sample_count: + self.samples.append(psutil.cpu_percent(1.0, False)) + +@@ -38,8 +38,8 @@ + + + def GrabCpuSamples(sample_count): +- print 'Label for snapshot (enter to quit): ' +- label = raw_input().strip() ++ print('Label for snapshot (enter to quit): ') ++ label = input().strip() + if len(label) == 0: + return None + +@@ -50,12 +50,12 @@ + + + def main(): +- print 'How many seconds to capture per snapshot (enter for 60)?' +- sample_count = raw_input().strip() ++ print('How many seconds to capture per snapshot (enter for 60)?') ++ sample_count = input().strip() + if len(sample_count) > 0 and int(sample_count) > 0: + sample_count = int(sample_count) + else: +- print 'Defaulting to 60 samples.' ++ print('Defaulting to 60 samples.') + sample_count = 60 + + snapshots = [] +@@ -66,7 +66,7 @@ + snapshots.append(snapshot) + + if len(snapshots) == 0: +- print 'no samples captured' ++ print('no samples captured') + return -1 + + pyplot.title('CPU usage') +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/ios/merge_ios_libs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/ios/merge_ios_libs.py 2025-01-16 02:26:08.616262452 +0800 +@@ -51,12 +51,12 @@ + libs[filename] = entry + orphaned_libs = {} + valid_libs = {} +- for library, paths in libs.items(): ++ for library, paths in list(libs.items()): + if len(paths) < len(archs): + orphaned_libs[library] = paths + else: + valid_libs[library] = paths +- for library, paths in orphaned_libs.items(): ++ for library, paths in list(orphaned_libs.items()): + components = library[:-2].split('_')[:-1] + found = False + # Find directly matching parent libs by stripping suffix. +@@ -70,7 +70,7 @@ + # Find next best match by finding parent libs with the same prefix. + if not found: + base_prefix = library[:-2].split('_')[0] +- for valid_lib, valid_paths in valid_libs.items(): ++ for valid_lib, valid_paths in list(valid_libs.items()): + if valid_lib[:len(base_prefix)] == base_prefix: + valid_paths.extend(paths) + found = True +@@ -91,14 +91,14 @@ + + # Merge libraries using libtool. + libtool_returncode = 0 +- for library, paths in valid_libs.items(): ++ for library, paths in list(valid_libs.items()): + cmd_list = ['libtool', '-static', '-v', '-o', + os.path.join(output_dir_path, library)] + paths + libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env) + _, err = libtoolout.communicate() + for line in err.splitlines(): + if not libtool_re.match(line): +- print >>sys.stderr, line ++ print(line, file=sys.stderr) + # Unconditionally touch the output .a file on the command line if present + # and the command succeeded. A bit hacky. + libtool_returncode = libtoolout.returncode +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/libs/generate_licenses.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/libs/generate_licenses.py 2025-01-16 02:26:08.616262452 +0800 +@@ -183,7 +183,7 @@ + def _GetThirdPartyLibraries(self, buildfile_dir, target): + output = json.loads(LicenseBuilder._RunGN(buildfile_dir, target)) + libraries = set() +- for described_target in output.values(): ++ for described_target in list(output.values()): + third_party_libs = ( + self._ParseLibrary(dep) for dep in described_target['deps']) + libraries |= set(lib for lib in third_party_libs if lib) +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/libs/generate_licenses_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/libs/generate_licenses_test.py 2025-01-16 02:26:08.616262452 +0800 +@@ -12,7 +12,7 @@ + import unittest + import mock + +-from generate_licenses import LicenseBuilder ++from .generate_licenses import LicenseBuilder + + + class TestLicenseBuilder(unittest.TestCase): +@@ -33,21 +33,21 @@ + """ + + def testParseLibraryName(self): +- self.assertEquals( ++ self.assertEqual( + LicenseBuilder._ParseLibraryName('//a/b/third_party/libname1:c'), + 'libname1') +- self.assertEquals( ++ self.assertEqual( + LicenseBuilder._ParseLibraryName('//a/b/third_party/libname2:c(d)'), + 'libname2') +- self.assertEquals( ++ self.assertEqual( + LicenseBuilder._ParseLibraryName('//a/b/third_party/libname3/c:d(e)'), + 'libname3') +- self.assertEquals( ++ self.assertEqual( + LicenseBuilder._ParseLibraryName('//a/b/not_third_party/c'), None) + + def testParseLibrarySimpleMatch(self): + builder = LicenseBuilder([], [], {}, {}) +- self.assertEquals( ++ self.assertEqual( + builder._ParseLibrary('//a/b/third_party/libname:c'), 'libname') + + def testParseLibraryRegExNoMatchFallbacksToDefaultLibname(self): +@@ -55,7 +55,7 @@ + 'libname:foo.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], lib_dict, {}) +- self.assertEquals( ++ self.assertEqual( + builder._ParseLibrary('//a/b/third_party/libname:bar_java'), 'libname') + + def testParseLibraryRegExMatch(self): +@@ -63,7 +63,7 @@ + 'libname:foo.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], {}, lib_regex_dict) +- self.assertEquals( ++ self.assertEqual( + builder._ParseLibrary('//a/b/third_party/libname:foo_bar_java'), + 'libname:foo.*') + +@@ -72,7 +72,7 @@ + 'libname/foo:bar.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], {}, lib_regex_dict) +- self.assertEquals( ++ self.assertEqual( + builder._ParseLibrary('//a/b/third_party/libname/foo:bar_java'), + 'libname/foo:bar.*') + +@@ -81,14 +81,14 @@ + 'libname/foo.*bar.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], {}, lib_regex_dict) +- self.assertEquals( ++ self.assertEqual( + builder._ParseLibrary('//a/b/third_party/libname/fooHAHA:bar_java'), + 'libname/foo.*bar.*') + + @mock.patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) + def testGetThirdPartyLibrariesWithoutRegex(self): + builder = LicenseBuilder([], [], {}, {}) +- self.assertEquals( ++ self.assertEqual( + builder._GetThirdPartyLibraries('out/arm', 'target1'), + set(['libname1', 'libname2', 'libname3'])) + +@@ -98,7 +98,7 @@ + 'libname2:c.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], {}, lib_regex_dict) +- self.assertEquals( ++ self.assertEqual( + builder._GetThirdPartyLibraries('out/arm', 'target1'), + set(['libname1', 'libname2:c.*', 'libname3'])) + +@@ -112,7 +112,7 @@ + with self.assertRaises(Exception) as context: + builder.GenerateLicenseText('dummy/dir') + +- self.assertEquals( ++ self.assertEqual( + context.exception.message, + 'Missing licenses for following third_party targets: ' + 'libname1, libname2, libname3') +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/mb/mb.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/mb/mb.py 2025-01-16 02:26:08.616262452 +0800 +@@ -13,7 +13,7 @@ + for sets of canned configurations and analyze them. + """ + +-from __future__ import print_function ++ + + import argparse + import ast +@@ -28,7 +28,7 @@ + import subprocess + import tempfile + import traceback +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + from collections import OrderedDict + +@@ -252,7 +252,7 @@ + def CmdExport(self): + self.ReadConfigFile() + obj = {} +- for master, builders in self.masters.items(): ++ for master, builders in list(self.masters.items()): + obj[master] = {} + for builder in builders: + config = self.masters[master][builder] +@@ -261,7 +261,7 @@ + + if isinstance(config, dict): + args = {k: self.FlattenConfig(v)['gn_args'] +- for k, v in config.items()} ++ for k, v in list(config.items())} + elif config.startswith('//'): + args = config + else: +@@ -403,15 +403,15 @@ + # Build a list of all of the configs referenced by builders. + all_configs = {} + for master in self.masters: +- for config in self.masters[master].values(): ++ for config in list(self.masters[master].values()): + if isinstance(config, dict): +- for c in config.values(): ++ for c in list(config.values()): + all_configs[c] = master + else: + all_configs[config] = master + + # Check that every referenced args file or config actually exists. +- for config, loc in all_configs.items(): ++ for config, loc in list(all_configs.items()): + if config.startswith('//'): + if not self.Exists(self.ToAbsPath(config)): + errs.append('Unknown args file "%s" referenced from "%s".' % +@@ -428,7 +428,7 @@ + # Figure out the whole list of mixins, and check that every mixin + # listed by a config or another mixin actually exists. + referenced_mixins = set() +- for config, mixins in self.configs.items(): ++ for config, mixins in list(self.configs.items()): + for mixin in mixins: + if not mixin in self.mixins: + errs.append('Unknown mixin "%s" referenced by config "%s".' % +@@ -1071,14 +1071,14 @@ + + def CheckCompile(self, master, builder): + url_template = self.args.url_template + '/{builder}/builds/_all?as_text=1' +- url = urllib2.quote(url_template.format(master=master, builder=builder), ++ url = urllib.parse.quote(url_template.format(master=master, builder=builder), + safe=':/()?=') + try: + builds = json.loads(self.Fetch(url)) + except Exception as e: + return str(e) + successes = sorted( +- [int(x) for x in builds.keys() if "text" in builds[x] and ++ [int(x) for x in list(builds.keys()) if "text" in builds[x] and + cmp(builds[x]["text"][:2], ["build", "successful"]) == 0], + reverse=True) + if not successes: +@@ -1162,7 +1162,7 @@ + + def Fetch(self, url): + # This function largely exists so it can be overridden for testing. +- f = urllib2.urlopen(url) ++ f = urllib.request.urlopen(url) + contents = f.read() + f.close() + return contents +@@ -1170,7 +1170,7 @@ + def MaybeMakeDirectory(self, path): + try: + os.makedirs(path) +- except OSError, e: ++ except OSError as e: + if e.errno != errno.EEXIST: + raise + +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/mb/mb_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/mb/mb_unittest.py 2025-01-16 02:26:08.616262452 +0800 +@@ -11,7 +11,7 @@ + + import ast + import json +-import StringIO ++import io + import os + import sys + import unittest +@@ -172,7 +172,7 @@ + mbw.ToAbsPath('//build/args/bots/fake_master/fake_args_bot.gn'), + 'is_debug = false\n') + if files: +- for path, contents in files.items(): ++ for path, contents in list(files.items()): + mbw.files[path] = contents + return mbw + +@@ -792,7 +792,7 @@ + def test_help(self): + orig_stdout = sys.stdout + try: +- sys.stdout = StringIO.StringIO() ++ sys.stdout = io.StringIO() + self.assertRaises(SystemExit, self.check, ['-h']) + self.assertRaises(SystemExit, self.check, ['help']) + self.assertRaises(SystemExit, self.check, ['help', 'gen']) +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/network_emulator/emulate.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/network_emulator/emulate.py 2025-01-16 02:26:08.616262452 +0800 +@@ -102,7 +102,7 @@ + options = parser.parse_args()[0] + + # Find preset by ID, if specified. +- if options.preset and not _PRESETS_DICT.has_key(options.preset): ++ if options.preset and options.preset not in _PRESETS_DICT: + parser.error('Invalid preset: %s' % options.preset) + + # Simple validation of the IP address, if supplied. +@@ -182,7 +182,7 @@ + connection_config.queue_slots) + logging.info('Affected traffic: IP traffic on ports %s-%s', + options.port_range[0], options.port_range[1]) +- raw_input('Press Enter to abort Network Emulation...') ++ input('Press Enter to abort Network Emulation...') + logging.info('Flushing all Dummynet rules...') + network_emulator.Cleanup() + logging.info('Completed Network Emulation.') +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/perf/catapult_uploader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/perf/catapult_uploader.py 2025-01-16 02:26:08.616262452 +0800 +@@ -50,7 +50,7 @@ + else: + data = zlib.compress(serialized) + +- print 'Sending %d bytes to %s.' % (len(data), url + '/add_histograms') ++ print('Sending %d bytes to %s.' % (len(data), url + '/add_histograms')) + + http = httplib2.Http() + response, content = http.request(url + '/add_histograms', method='POST', +@@ -92,7 +92,7 @@ + reserved_infos.BUILD_URLS: options.build_page_url, + } + +- for k, v in common_diagnostics.items(): ++ for k, v in list(common_diagnostics.items()): + histograms.AddSharedDiagnosticToAllHistograms( + k.name, generic_set.GenericSet([v])) + +@@ -114,9 +114,9 @@ + options.dashboard_url, histograms, oauth_token) + + if response.status == 200: +- print 'Received 200 from dashboard.' ++ print('Received 200 from dashboard.') + return 0 + else: +- print('Upload failed with %d: %s\n\n%s' % (response.status, response.reason, +- content)) ++ print(('Upload failed with %d: %s\n\n%s' % (response.status, response.reason, ++ content))) + return 1 +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/presubmit_checks_lib/build_helpers.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/presubmit_checks_lib/build_helpers.py 2025-01-16 02:26:08.616262452 +0800 +@@ -106,9 +106,7 @@ + """ + gn_errors = RunGnCommand(['gen'] + gn_args + [work_dir]) + if gn_errors: +- raise(RuntimeError( +- 'FYI, cannot complete check due to gn error:\n%s\n' +- 'Please open a bug.' % gn_errors)) ++ raise RuntimeError + + # Needed for single file compilation. + commands = GetCompilationDb(work_dir) +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/presubmit_checks_lib/build_helpers_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/presubmit_checks_lib/build_helpers_test.py 2025-01-16 02:26:08.616262452 +0800 +@@ -12,7 +12,7 @@ + import unittest + + #pylint: disable=relative-import +-import build_helpers ++from . import build_helpers + + + TESTDATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/presubmit_checks_lib/check_orphan_headers_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/presubmit_checks_lib/check_orphan_headers_test.py 2025-01-16 02:26:08.616262452 +0800 +@@ -12,7 +12,7 @@ + import unittest + + #pylint: disable=relative-import +-import check_orphan_headers ++from . import check_orphan_headers + + + def _GetRootBasedOnPlatform(): +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/presubmit_checks_lib/check_package_boundaries.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/presubmit_checks_lib/check_package_boundaries.py 2025-01-16 02:26:08.616262452 +0800 +@@ -116,8 +116,8 @@ + + for i, message in enumerate(messages): + if i > 0: +- print +- print message ++ print() ++ print(message) + + return bool(messages) + +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/presubmit_checks_lib/check_package_boundaries_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/presubmit_checks_lib/check_package_boundaries_test.py 2025-01-16 02:26:08.616262452 +0800 +@@ -13,7 +13,7 @@ + import unittest + + #pylint: disable=relative-import +-from check_package_boundaries import CheckPackageBoundaries ++from .check_package_boundaries import CheckPackageBoundaries + + + MSG_FORMAT = 'ERROR:check_package_boundaries.py: Unexpected %s.' +--- a/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/sslroots/generate_sslroots.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/tools_webrtc/sslroots/generate_sslroots.py 2025-01-16 02:26:08.616262452 +0800 +@@ -19,7 +19,7 @@ + generated file size. + """ + +-import commands ++import subprocess + from optparse import OptionParser + import os + import re +@@ -132,7 +132,7 @@ + def _CreateCertSection(root_dir, source_file, label, options): + command = 'openssl x509 -in %s%s -noout -C' %(root_dir, source_file) + _PrintOutput(command, options) +- output = commands.getstatusoutput(command)[1] ++ output = subprocess.getstatusoutput(command)[1] + renamed_output = output.replace('unsigned char XXX_', + 'const unsigned char ' + label + '_') + filtered_output = '' +@@ -206,7 +206,7 @@ + + def _PrintOutput(output, options): + if options.verbose: +- print output ++ print(output) + + if __name__ == '__main__': + main() +--- a/src/3rdparty/chromium/third_party/webrtc/video/full_stack_tests_plot.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webrtc/video/full_stack_tests_plot.py 2025-01-16 02:26:08.616262452 +0800 +@@ -113,14 +113,14 @@ + it = iter(f) + + self.title = it.next().strip() +- self.length = int(it.next()) ++ self.length = int(next(it)) + field_names = [name.strip() for name in it.next().split()] + field_ids = [NAME_TO_ID[name] for name in field_names] + + for field_id in field_ids: + self.samples[field_id] = [0.0] * self.length + +- for sample_id in xrange(self.length): ++ for sample_id in range(self.length): + for col, value in enumerate(it.next().split()): + self.samples[field_ids[col]][sample_id] = float(value) + +@@ -263,17 +263,17 @@ + + for line in lines: + if not line: +- color_iter.next() ++ next(color_iter) + continue + + if self.cycle_length: +- x = numpy.array(range(self.cycle_length)) ++ x = numpy.array(list(range(self.cycle_length))) + else: +- x = numpy.array(range(self.offset, self.offset + len(line.values))) ++ x = numpy.array(list(range(self.offset, self.offset + len(line.values)))) + y = numpy.array(line.values) + ax = ax2 if line.flags & RIGHT_Y_AXIS else ax1 + ax.Plot(x, y, "o-", label=line.label, markersize=3.0, linewidth=1.0, +- color=color_iter.next()) ++ color=next(color_iter)) + + ax1.grid(True) + if ax2: +@@ -404,7 +404,7 @@ + plt.title(config.title) + config.Plot(ax) + if config.output_filename: +- print "Saving to", config.output_filename ++ print("Saving to", config.output_filename) + fig.savefig(config.output_filename) + plt.close(fig) + +--- a/src/3rdparty/chromium/third_party/webxr_test_pages/update_bucket.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/webxr_test_pages/update_bucket.py 2025-01-16 02:26:08.616262452 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + import logging +@@ -149,7 +149,7 @@ + + def check_and_fix_content_types(destination): + mimetypes.init() +- for suffix, content_type in SUFFIX_TYPES.iteritems(): ++ for suffix, content_type in SUFFIX_TYPES.items(): + configured_type = mimetypes.types_map.get('.' + suffix) + if configured_type != content_type: + logging.info('Fixing content type mismatch for .%s: found %s, ' +--- a/src/3rdparty/chromium/third_party/weston/generate_configs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/weston/generate_configs.py 2025-01-16 02:26:08.616262452 +0800 +@@ -5,7 +5,7 @@ + # found in the LICENSE file. + """Creates config files for building Weston.""" + +-from __future__ import print_function ++ + + import os + import re +--- a/src/3rdparty/chromium/third_party/xcbproto/src/xcbgen/align.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/xcbproto/src/xcbgen/align.py 2025-01-16 02:26:08.616262452 +0800 +@@ -157,7 +157,7 @@ + # check whether the ok_callstack is a subset or equal to a fail_callstack + for (align_before, field_name, type_obj, fail_callstack, reason) in self.fail_list: + if len(ok_callstack) <= len(fail_callstack): +- zipped = zip(ok_callstack, fail_callstack[:len(ok_callstack)]) ++ zipped = list(zip(ok_callstack, fail_callstack[:len(ok_callstack)])) + is_subset = all([i == j for i, j in zipped]) + if is_subset: + return True +--- a/src/3rdparty/chromium/third_party/xcbproto/src/xcbgen/expr.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/xcbproto/src/xcbgen/expr.py 2025-01-16 02:26:08.616262452 +0800 +@@ -162,7 +162,7 @@ + # need to find the field with lenfield_name + for p in reversed(parents): + fields = dict([(f.field_name, f) for f in p.fields]) +- if self.lenfield_name in fields.keys(): ++ if self.lenfield_name in list(fields.keys()): + if p.is_case_or_bitcase: + # switch is the anchestor + self.lenfield_parent = p.parents[-1] +--- a/src/3rdparty/chromium/third_party/xcbproto/src/xcbgen/state.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/xcbproto/src/xcbgen/state.py 2025-01-16 02:26:08.616262452 +0800 +@@ -58,7 +58,7 @@ + self.events[id] = (name, item) + + def get_event_by_opcode(self, opcode, is_ge_event): +- for id, (name, event) in self.events.items(): ++ for id, (name, event) in list(self.events.items()): + if event.is_ge_event == is_ge_event: + opcode_specific_name = event.get_name_for_opcode( opcode ) + if opcode_specific_name is not None: +@@ -163,7 +163,7 @@ + if key in self.types: + return self.types[key][idx] + +- for key in self.types.keys(): ++ for key in list(self.types.keys()): + if key.rpartition(':')[2] == id: + return self.types[key][idx] + +@@ -192,7 +192,7 @@ + + def add_events_to_namespaces(self): + # add to its namespace object +- for id, (name,item) in self.events.items(): ++ for id, (name,item) in list(self.events.items()): + if name[:-1] == ('xcb',): + # core event + namespace_name = '' +--- a/src/3rdparty/chromium/third_party/xcbproto/src/xcbgen/xtypes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/xcbproto/src/xcbgen/xtypes.py 2025-01-16 02:26:08.616262452 +0800 +@@ -4,6 +4,7 @@ + from xcbgen.expr import Field, Expression + from xcbgen.align import Alignment, AlignmentLog + import __main__ ++from functools import reduce + + verbose_align_log = False + true_values = ['true', '1', 'yes'] +@@ -523,7 +524,7 @@ + int(required_start_align_element.get('align', "4"), 0), + int(required_start_align_element.get('offset', "0"), 0)) + if verbose_align_log: +- print ("Explicit start-align for %s: %s\n" % (self, self.required_start_align)) ++ print(("Explicit start-align for %s: %s\n" % (self, self.required_start_align))) + + def resolve(self, module): + if self.resolved: +@@ -613,28 +614,28 @@ + callstack = [] + self.required_start_align = self.calc_minimally_required_start_align(callstack, log) + if self.required_start_align is None: +- print ("ERROR: could not calc required_start_align of %s\nDetails:\n%s" +- % (str(self), str(log))) ++ print(("ERROR: could not calc required_start_align of %s\nDetails:\n%s" ++ % (str(self), str(log)))) + else: + if verbose_align_log: +- print ("calc_required_start_align: %s has start-align %s" +- % (str(self), str(self.required_start_align))) +- print ("Details:\n" + str(log)) ++ print(("calc_required_start_align: %s has start-align %s" ++ % (str(self), str(self.required_start_align)))) ++ print(("Details:\n" + str(log))) + if self.required_start_align.offset != 0: +- print (("WARNING: %s\n\thas start-align with non-zero offset: %s" ++ print((("WARNING: %s\n\thas start-align with non-zero offset: %s" + + "\n\tsuggest to add explicit definition with:" + + "\n\t\t" + + "\n\tor to fix the xml so that zero offset is ok\n") + % (str(self), self.required_start_align, + self.required_start_align.align, +- self.required_start_align.offset)) ++ self.required_start_align.offset))) + else: + # required-start-align configured -> check it + log = AlignmentLog() + callstack = [] + if not self.is_possible_start_align(self.required_start_align, callstack, log): +- print ("ERROR: required_start_align %s of %s causes problems\nDetails:\n%s" +- % (str(self.required_start_align), str(self), str(log))) ++ print(("ERROR: required_start_align %s of %s causes problems\nDetails:\n%s" ++ % (str(self.required_start_align), str(self), str(log)))) + + + def calc_minimally_required_start_align(self, callstack, log): +@@ -646,12 +647,12 @@ + for offset in range(0,align): + align_candidate = Alignment(align, offset) + if verbose_align_log: +- print ("trying %s for %s" % (str(align_candidate), str(self))) ++ print(("trying %s for %s" % (str(align_candidate), str(self)))) + my_log = AlignmentLog() + if self.is_possible_start_align(align_candidate, callstack, my_log): + log.append(my_log) + if verbose_align_log: +- print ("found start-align %s for %s" % (str(align_candidate), str(self))) ++ print(("found start-align %s for %s" % (str(align_candidate), str(self)))) + return align_candidate + else: + my_ok_count = my_log.ok_count() +@@ -668,7 +669,7 @@ + # none of the candidates applies + # this type has illegal internal aligns for all possible start_aligns + if verbose_align_log: +- print ("didn't find start-align for %s" % str(self)) ++ print(("didn't find start-align for %s" % str(self))) + log.append(best_log) + return None + +@@ -927,7 +928,7 @@ + # aux function for unchecked_get_alignment_after + def get_align_for_selected_case_field(self, case_field, start_align, callstack, log): + if verbose_align_log: +- print ("get_align_for_selected_case_field: %s, case_field = %s" % (str(self), str(case_field))) ++ print(("get_align_for_selected_case_field: %s, case_field = %s" % (str(self), str(case_field)))) + total_align = start_align + for field in self.bitcases: + my_callstack = callstack[:] +@@ -1299,7 +1300,7 @@ + self.name = name + + def get_name_for_opcode(self, opcode): +- for name, my_opcode in self.opcodes.items(): ++ for name, my_opcode in list(self.opcodes.items()): + if int(my_opcode) == opcode: + return name + else: +--- a/src/3rdparty/chromium/third_party/zxcvbn-cpp/data-scripts/build_frequency_lists.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/zxcvbn-cpp/data-scripts/build_frequency_lists.py 2025-01-16 02:26:08.616262452 +0800 +@@ -46,7 +46,7 @@ + freq_list_name, ext = os.path.splitext(filename) + if freq_list_name not in DICTIONARIES: + msg = 'Warning: %s appears in %s directory but not in DICTIONARY settings. Excluding.' +- print(msg % (freq_list_name, data_dir)) ++ print((msg % (freq_list_name, data_dir))) + continue + token_to_rank = {} + with codecs.open(os.path.join(data_dir, filename), 'r', 'utf8') as f: +@@ -58,7 +58,7 @@ + for freq_list_name in DICTIONARIES: + if freq_list_name not in freq_lists: + msg = 'Warning: %s appears in DICTIONARY settings but not in %s directory. Excluding.' +- print(msg % (freq_list, data_dir)) ++ print((msg % (freq_list, data_dir))) + return freq_lists + + def is_rare_and_short(token, rank): +@@ -89,7 +89,7 @@ + minimum_rank = {} # maps token -> lowest token rank across all freq lists + minimum_name = {} # maps token -> freq list name with lowest token rank + for name, token_to_rank in sorted(freq_lists.items()): +- for token, rank in token_to_rank.items(): ++ for token, rank in list(token_to_rank.items()): + if token not in minimum_rank: + assert token not in minimum_name + minimum_rank[token] = rank +@@ -102,7 +102,7 @@ + minimum_rank[token] = rank + minimum_name[token] = name + for name, token_to_rank in sorted(freq_lists.items()): +- for token, rank in token_to_rank.items(): ++ for token, rank in list(token_to_rank.items()): + if minimum_name[token] != name: + continue + if is_rare_and_short(token, rank) or has_comma_or_double_quote(token, rank, name): +@@ -110,7 +110,7 @@ + filtered_token_and_rank[name].append((token, rank)) + token_count[name] += 1 + result = {} +- for name, token_rank_pairs in filtered_token_and_rank.items(): ++ for name, token_rank_pairs in list(filtered_token_and_rank.items()): + token_rank_pairs.sort(key=itemgetter(1)) + cutoff_limit = DICTIONARIES[name] + if cutoff_limit and len(token_rank_pairs) > cutoff_limit: +@@ -127,7 +127,7 @@ + f.write('# generated by %s\n' % script_name) + f.write('frequency_lists = \n ') + lines = [] +- for name, lst in freq_lists.items(): ++ for name, lst in list(freq_lists.items()): + lines.append(to_kv(lst, name)) + f.write('\n '.join(lines)) + f.write('\n') +@@ -280,14 +280,14 @@ + f.write('/* generated by %s */\n' % (script_name,)) + f.write('{\n') + lines = [] +- for name, lst in freq_lists.items(): ++ for name, lst in list(freq_lists.items()): + lines.append(to_kv(lst, '"' + name + '"')) + f.write(',\n '.join(lines)) + f.write('}') + + # Outputs the processed text files as text files again + def output_txt(output_txt, script_name, freq_lists): +- for name, lst in freq_lists.items(): ++ for name, lst in list(freq_lists.items()): + with codecs.open(output_txt.replace('.txt', name + '.txt'), 'w', + 'utf8') as f: + for word in lst: +@@ -295,7 +295,7 @@ + + def main(): + if len(sys.argv) != 3: +- print(usage()) ++ print((usage())) + sys.exit(0) + data_dir, output_file = sys.argv[1:] + unfiltered_freq_lists = parse_frequency_lists(data_dir) +--- a/src/3rdparty/chromium/third_party/zxcvbn-cpp/data-scripts/build_keyboard_adjacency_graphs.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/zxcvbn-cpp/data-scripts/build_keyboard_adjacency_graphs.py 2025-01-16 02:26:08.616262452 +0800 +@@ -79,7 +79,7 @@ + position_table[(x,y)] = token + + adjacency_graph = {} +- for (x,y), chars in position_table.items(): ++ for (x,y), chars in list(position_table.items()): + for char in chars: + adjacency_graph[char] = [] + for coord in adjacency_func(x, y): +@@ -94,7 +94,7 @@ + # this calculates the average over all keys. + def calc_average_deg_and_len(layout_str, slanted): + graph = build_graph(layout_str, slanted) +- count = sum(a is not None for adj in graph.values() for a in adj) ++ count = sum(a is not None for adj in list(graph.values()) for a in adj) + length = len(graph) + return float(count) / length, length + +@@ -223,7 +223,7 @@ + + if __name__ == '__main__': + if len(sys.argv) != 2: +- print(usage()) ++ print((usage())) + sys.exit(0) + + output_file = sys.argv[1] +--- a/src/3rdparty/chromium/third_party/zxcvbn-cpp/data-scripts/count_us_census.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/zxcvbn-cpp/data-scripts/count_us_census.py 2025-01-16 02:26:08.616262452 +0800 +@@ -29,7 +29,7 @@ + + if __name__ == '__main__': + if len(sys.argv) != 3: +- print usage() ++ print(usage()) + else: + main(*sys.argv[1:]) + sys.exit(0) +--- a/src/3rdparty/chromium/third_party/zxcvbn-cpp/data-scripts/count_wikipedia.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/zxcvbn-cpp/data-scripts/count_wikipedia.py 2025-01-16 02:26:08.616262452 +0800 +@@ -12,7 +12,7 @@ + from unidecode import unidecode + + def usage(): +- print ''' ++ print(''' + tokenize a directory of text and count unigrams. + + usage: +@@ -48,7 +48,7 @@ + Then run: + ./WikiExtractor.py -o en_sents --no-templates enwiki-20151002-pages-articles.xml.bz2 + +-''' % sys.argv[0] ++''' % sys.argv[0]) + + SENTENCES_PER_BATCH = 500000 # after each batch, delete all counts with count == 1 (hapax legomena) + PRE_SORT_CUTOFF = 300 # before sorting, discard all words with less than this count +@@ -110,7 +110,7 @@ + + def pre_sort_prune(self): + under_cutoff = set() +- for token, count in self.count.iteritems(): ++ for token, count in self.count.items(): + if count < PRE_SORT_CUTOFF: + under_cutoff.add(token) + for token in under_cutoff: +@@ -118,7 +118,7 @@ + self.legomena = set() + + def get_sorted_pairs(self): +- return sorted(self.count.items(), key=operator.itemgetter(1), reverse=True) ++ return sorted(list(self.count.items()), key=operator.itemgetter(1), reverse=True) + + def get_ts(self): + return datetime.datetime.now().strftime("%b %d %Y %H:%M:%S") +@@ -129,7 +129,7 @@ + + def main(input_dir_str, output_filename): + counter = TopTokenCounter() +- print counter.get_ts(), 'starting...' ++ print(counter.get_ts(), 'starting...') + lines = 0 + for root, dirs, files in os.walk(input_dir_str, topdown=True): + if not files: +@@ -148,17 +148,17 @@ + lines += 1 + if lines % SENTENCES_PER_BATCH == 0: + counter.batch_prune() +- print counter.get_stats() +- print 'processing: %s' % path +- print counter.get_stats() +- print 'deleting tokens under cutoff of', PRE_SORT_CUTOFF ++ print(counter.get_stats()) ++ print('processing: %s' % path) ++ print(counter.get_stats()) ++ print('deleting tokens under cutoff of', PRE_SORT_CUTOFF) + counter.pre_sort_prune() +- print 'done' +- print counter.get_stats() +- print counter.get_ts(), 'sorting...' ++ print('done') ++ print(counter.get_stats()) ++ print(counter.get_ts(), 'sorting...') + sorted_pairs = counter.get_sorted_pairs() +- print counter.get_ts(), 'done' +- print 'writing...' ++ print(counter.get_ts(), 'done') ++ print('writing...') + with codecs.open(output_filename, 'w', 'utf8') as f: + for token, count in sorted_pairs: + f.write('%-18s %d\n' % (token, count)) +--- a/src/3rdparty/chromium/third_party/zxcvbn-cpp/data-scripts/count_wiktionary.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/third_party/zxcvbn-cpp/data-scripts/count_wiktionary.py 2025-01-16 02:26:08.616262452 +0800 +@@ -74,7 +74,7 @@ + + if __name__ == '__main__': + if len(sys.argv) != 3: +- print usage() ++ print(usage()) + else: + main(*sys.argv[1:]) + sys.exit(0) +--- a/src/3rdparty/chromium/tools/bisect-builds.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/bisect-builds.py 2025-01-16 02:26:08.616262452 +0800 +@@ -12,7 +12,7 @@ + it will ask you whether it is good or bad before continuing the search. + """ + +-from __future__ import print_function ++ + + # The base URL for stored build archives. + CHROMIUM_BASE_URL = ('http://commondatastorage.googleapis.com' +@@ -75,7 +75,7 @@ + ############################################################################### + + import glob +-import httplib ++import http.client + import json + import optparse + import os +@@ -86,7 +86,7 @@ + import sys + import tempfile + import threading +-import urllib ++import urllib.request, urllib.parse, urllib.error + from distutils.version import LooseVersion + from xml.etree import ElementTree + import zipfile +@@ -257,7 +257,7 @@ + next-marker is not None, then the listing is a partial listing and another + fetch should be performed with next-marker being the marker= GET + parameter.""" +- handle = urllib.urlopen(url) ++ handle = urllib.request.urlopen(url) + document = ElementTree.parse(handle) + + # All nodes in the tree are namespaced. Get the root's tag name to extract +@@ -342,7 +342,7 @@ + + def _GetSVNRevisionFromGitHashWithoutGitCheckout(self, git_sha1, depot): + json_url = GITHASH_TO_SVN_URL[depot] % git_sha1 +- response = urllib.urlopen(json_url) ++ response = urllib.request.urlopen(json_url) + if response.getcode() == 200: + try: + data = json.loads(response.read()[4:]) +@@ -418,7 +418,7 @@ + if self.use_local_cache: + try: + with open(cache_filename) as cache_file: +- for (key, value) in json.load(cache_file).items(): ++ for (key, value) in list(json.load(cache_file).items()): + cache[key] = value + revisions = cache.get(cache_dict_key, []) + githash_svn_dict = cache.get('githash_svn_dict', {}) +@@ -451,7 +451,7 @@ + (revlist_all, self.githash_svn_dict) = _LoadBucketFromCache() + last_known_rev = revlist_all[-1] if revlist_all else 0 + if last_known_rev < maxrev: +- revlist_all.extend(map(int, self.ParseDirectoryIndex(last_known_rev))) ++ revlist_all.extend(list(map(int, self.ParseDirectoryIndex(last_known_rev)))) + revlist_all = list(set(revlist_all)) + revlist_all.sort() + _SaveBucketToCache() +@@ -520,7 +520,7 @@ + out.write(zf.read(name)) + out.close() + # Set permissions. Permission info in external_attr is shifted 16 bits. +- os.chmod(name, info.external_attr >> 16L) ++ os.chmod(name, info.external_attr >> 16) + os.chdir(cwd) + + +@@ -551,7 +551,7 @@ + sys.stdout.flush() + download_url = context.GetDownloadURL(rev) + try: +- urllib.urlretrieve(download_url, filename, ReportHook) ++ urllib.request.urlretrieve(download_url, filename, ReportHook) + if progress_event and progress_event.isSet(): + print() + +@@ -647,7 +647,7 @@ + print('Chrome exit_status: %d. Use s to see output' % exit_status) + # Loop until we get a response that we can parse. + while True: +- response = raw_input('Revision %s is ' ++ response = input('Revision %s is ' + '[(g)ood/(b)ad/(r)etry/(u)nknown/(s)tdout/(q)uit]: ' % + str(rev)) + if response in ('g', 'b', 'r', 'u'): +@@ -744,7 +744,7 @@ + (exit_status, stdout, stderr) = RunRevision( + context, rev, fetch.zip_file, profile, num_runs, command, try_args) + answer = evaluate(rev, exit_status, stdout, stderr); +- except Exception, e: ++ except Exception as e: + print(e, file=sys.stderr) + raise SystemExit + if (answer != expected_answer): +@@ -881,7 +881,7 @@ + try: + (exit_status, stdout, stderr) = RunRevision( + context, rev, fetch.zip_file, profile, num_runs, command, try_args) +- except Exception, e: ++ except Exception as e: + print(e, file=sys.stderr) + + # Call the evaluate function to see if the current revision is good or bad. +@@ -971,7 +971,7 @@ + if m: + return m.group(1) + +- url = urllib.urlopen(DEPS_FILE % GetGitHashFromSVNRevision(rev)) ++ url = urllib.request.urlopen(DEPS_FILE % GetGitHashFromSVNRevision(rev)) + if url.getcode() == 200: + blink_re = re.compile(r'webkit_revision\D*\d+;\D*\d+;(\w+)') + blink_git_sha = _GetBlinkRev(url, blink_re) +@@ -991,7 +991,7 @@ + rev = context.githash_svn_dict[str(rev)] + file_url = '%s/%s%s/REVISIONS' % (context.base_url, + context._listing_platform_dir, rev) +- url = urllib.urlopen(file_url) ++ url = urllib.request.urlopen(file_url) + if url.getcode() == 200: + try: + data = json.loads(url.read()) +@@ -1033,7 +1033,7 @@ + """Returns the chromium revision read from given URL.""" + try: + # Location of the latest build revision number +- latest_revision = urllib.urlopen(url).read() ++ latest_revision = urllib.request.urlopen(url).read() + if latest_revision.isdigit(): + return int(latest_revision) + return context.GetSVNRevisionFromGitHash(latest_revision) +@@ -1043,7 +1043,7 @@ + + def GetGitHashFromSVNRevision(svn_revision): + crrev_url = CRREV_URL + str(svn_revision) +- url = urllib.urlopen(crrev_url) ++ url = urllib.request.urlopen(crrev_url) + if url.getcode() == 200: + data = json.loads(url.read()) + if 'git_sha' in data: +--- a/src/3rdparty/chromium/tools/bisect_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/bisect_test.py 2025-01-16 02:26:08.616262452 +0800 +@@ -45,7 +45,7 @@ + subprocess, 'Popen', + lambda *args, **kwargs: FakeProcess(self.fake_process_return_code)) + self.monkey_patch(bisect_builds.PathContext, 'ParseDirectoryIndex', +- lambda *args: range(self.max_rev)) ++ lambda *args: list(range(self.max_rev))) + + def tearDown(self): + self.clear_patching() +--- a/src/3rdparty/chromium/tools/boilerplate.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/boilerplate.py 2025-01-16 02:26:08.616262452 +0800 +@@ -8,7 +8,7 @@ + Usage: tools/boilerplate.py path/to/file.{h,cc} + """ + +-from __future__ import print_function ++ + + from datetime import date + import os +--- a/src/3rdparty/chromium/tools/check_git_config.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/check_git_config.py 2025-01-16 02:26:08.616262452 +0800 +@@ -15,7 +15,7 @@ + collect information about misconfigured Git accounts. + """ + +-from __future__ import print_function ++ + + import contextlib + import datetime +@@ -34,8 +34,8 @@ + import sys + import tempfile + import time +-import urllib2 +-import urlparse ++import urllib.request, urllib.error, urllib.parse ++import urllib.parse + + + # Absolute path to src/ directory. +@@ -155,7 +155,7 @@ + """ + try: + env = {} +- execfile(GCLIENT_CONFIG, env, env) ++ exec(compile(open(GCLIENT_CONFIG, "rb").read(), GCLIENT_CONFIG, 'exec'), env, env) + for sol in (env.get('solutions') or []): + if sol.get('name') == 'src': + return sol.get('url'), sol.get('deps_file'), sol.get('managed') +@@ -451,7 +451,7 @@ + 'if you need help to set up you committer git account.') + return True + +- req = urllib2.Request( ++ req = urllib.request.Request( + url=report_url, + data=as_bytes, + headers={'Content-Type': 'application/json; charset=utf-8'}) +@@ -463,8 +463,8 @@ + try: + logging.warning( + 'Attempting to upload the report to %s...', +- urlparse.urlparse(report_url).netloc) +- resp = urllib2.urlopen(req, timeout=5) ++ urllib.parse.urlparse(report_url).netloc) ++ resp = urllib.request.urlopen(req, timeout=5) + report_id = None + try: + report_id = json.load(resp)['report_id'] +@@ -472,7 +472,7 @@ + pass + logging.warning('Report uploaded: %s', report_id) + success = True +- except (urllib2.URLError, socket.error, ssl.SSLError) as exc: ++ except (urllib.error.URLError, socket.error, ssl.SSLError) as exc: + logging.warning('Failed to upload the report: %s', exc) + return success + +--- a/src/3rdparty/chromium/tools/check_grd_for_unused_strings.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/check_grd_for_unused_strings.py 2025-01-16 02:26:08.616262452 +0800 +@@ -10,7 +10,7 @@ + check instead. + """ + +-from __future__ import print_function ++ + + import os + import re +@@ -103,7 +103,7 @@ + # Anything left? + if len(ids_left) > 0: + print('The following ids are in GRD files, but *appear* to be unused:') +- for file_path, file_ids in file_id_map.iteritems(): ++ for file_path, file_ids in file_id_map.items(): + missing = ids_left.intersection(file_ids) + if len(missing) > 0: + print(' %s:' % file_path) +--- a/src/3rdparty/chromium/tools/diagnose-me.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/diagnose-me.py 2025-01-16 02:26:08.616262452 +0800 +@@ -6,7 +6,7 @@ + """Diagnose some common system configuration problems on Linux, and + suggest fixes.""" + +-from __future__ import print_function ++ + + import os + import subprocess +@@ -53,7 +53,7 @@ + path = '/usr/bin/' + path + try: + target = os.readlink(path) +- except OSError, e: ++ except OSError as e: + if e.errno == 2: + continue # No such file + if e.errno == 22: +--- a/src/3rdparty/chromium/tools/download_optimization_profile.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/download_optimization_profile.py 2025-01-16 02:26:08.616262452 +0800 +@@ -13,14 +13,14 @@ + No authentication is necessary if you pull these profiles directly over https. + """ + +-from __future__ import print_function ++ + + import argparse + import contextlib + import os + import subprocess + import sys +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + GS_HTTP_URL = 'https://storage.googleapis.com' + +@@ -80,7 +80,7 @@ + else: + gs_url = '/'.join([GS_HTTP_URL, desired_profile_name[len(gs_prefix):]]) + +- with contextlib.closing(urllib2.urlopen(gs_url)) as u: ++ with contextlib.closing(urllib.request.urlopen(gs_url)) as u: + with open(out_path, 'wb') as f: + while True: + buf = u.read(4096) +--- a/src/3rdparty/chromium/tools/gypv8sh.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/gypv8sh.py 2025-01-16 02:26:08.616262452 +0800 +@@ -7,7 +7,7 @@ + argument lists and to generate inlinable tests. + """ + +-from __future__ import print_function ++ + + import json + import optparse +@@ -65,7 +65,7 @@ + with open(cxxoutfile, 'wb') as f: + f.write(out) + shutil.copyfile(inputfile, jsoutfile) +- except Exception, ex: ++ except Exception as ex: + if os.path.exists(cxxoutfile): + os.remove(cxxoutfile) + if os.path.exists(jsoutfile): +--- a/src/3rdparty/chromium/tools/include_tracer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/include_tracer.py 2025-01-16 02:26:08.616262452 +0800 +@@ -12,7 +12,7 @@ + tools/include_tracer.py -Iout/Default/gen chrome/browser/ui/browser.h + """ + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/tools/ipc_messages_log.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/ipc_messages_log.py 2025-01-16 02:26:08.616262452 +0800 +@@ -19,7 +19,7 @@ + Chromium is checked out using git. + """ + +-from __future__ import print_function ++ + + import optparse + import os +--- a/src/3rdparty/chromium/tools/licenses.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/licenses.py 2025-01-16 02:26:08.616262452 +0800 +@@ -14,7 +14,7 @@ + + (You can also import this as a module.) + """ +-from __future__ import print_function ++ + + import argparse + import codecs +@@ -423,7 +423,7 @@ + + # Check that all expected metadata is present. + errors = [] +- for key, value in metadata.items(): ++ for key, value in list(metadata.items()): + if not value: + errors.append("couldn't find '" + key + "' line " + "in README.chromium or licences.py " +@@ -628,7 +628,7 @@ + def EvaluateTemplate(template, env, escape=True): + """Expand a template with variables like {{foo}} using a + dictionary of expansions.""" +- for key, val in env.items(): ++ for key, val in list(env.items()): + if escape: + val = html.escape(val) + val = val.replace("*/", "* /") +--- a/src/3rdparty/chromium/tools/make-gtest-filter.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/make-gtest-filter.py 2025-01-16 02:26:08.620595712 +0800 +@@ -20,7 +20,7 @@ + > make-gtest-filter.py --line=123 ./myfile_unittest.cc + """ + +-from __future__ import print_function ++ + + import argparse + import fileinput +--- a/src/3rdparty/chromium/tools/multi_process_rss.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/multi_process_rss.py 2025-01-16 02:26:08.620595712 +0800 +@@ -17,7 +17,7 @@ + # The command line above counts the RSS of 1) process 12345, 2) process 23456 + # and 3) all descendant processes of process 23456. + +-from __future__ import print_function ++ + + import collections + import logging +@@ -91,9 +91,9 @@ + continue + pagemap_dct[pid] = pagemap + +- for pid, pagemap in pagemap_dct.iteritems(): +- for vma in pagemap.vma_internals.itervalues(): +- for pageframe, number in vma.pageframes.iteritems(): ++ for pid, pagemap in pagemap_dct.items(): ++ for vma in pagemap.vma_internals.values(): ++ for pageframe, number in vma.pageframes.items(): + pageframes[pageframe] += number + + return pageframes +--- a/src/3rdparty/chromium/tools/nocompile_driver.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/nocompile_driver.py 2025-01-16 02:26:08.620595712 +0800 +@@ -13,9 +13,9 @@ + http://dev.chromium.org/developers/testing/no-compile-tests + """ + +-from __future__ import print_function + +-import StringIO ++ ++import io + import ast + import os + import re +@@ -390,13 +390,13 @@ + # do. We ignore the return value from select and just poll all + # processes. + read_set = [] +- for test in executing_tests.values(): ++ for test in list(executing_tests.values()): + read_set.extend([test['stdout'], test['stderr']]) + select.select(read_set, [], read_set, NCTEST_TERMINATE_TIMEOUT_SEC) + + # Now attempt to process results. + now = time.time() +- for test in executing_tests.values(): ++ for test in list(executing_tests.values()): + proc = test['proc'] + if proc.poll() is not None: + test['finished_at'] = now +@@ -448,8 +448,8 @@ + test_configs = ExtractTestConfigs(sourcefile_path, suite_name) + timings['extract_done'] = time.time() + +- resultfile = StringIO.StringIO() +- resultlog = StringIO.StringIO() ++ resultfile = io.StringIO() ++ resultlog = io.StringIO() + resultfile.write(RESULT_FILE_HEADER % sourcefile_path) + + # Run the no-compile tests, but ensure we do not run more than |parallelism| +--- a/src/3rdparty/chromium/tools/omahaproxy.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/omahaproxy.py 2025-01-16 02:26:08.620595712 +0800 +@@ -9,21 +9,21 @@ + information. + """ + +-from __future__ import print_function ++ + + import json + import optparse + import os + import string + import sys +-import urllib ++import urllib.request, urllib.parse, urllib.error + + URL = 'https://omahaproxy.appspot.com/json' + + + def main(): + try: +- data = json.load(urllib.urlopen(URL)) ++ data = json.load(urllib.request.urlopen(URL)) + except Exception as e: + print('Error: could not load %s\n\n%s' % (URL, str(e))) + return 1 +--- a/src/3rdparty/chromium/tools/perry.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/perry.py 2025-01-16 02:26:08.620595712 +0800 +@@ -16,7 +16,7 @@ + You might want to run it in `screen` as it'll take a while. + """ + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/tools/remove_duplicate_includes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/remove_duplicate_includes.py 2025-01-16 02:26:08.620595712 +0800 +@@ -11,7 +11,7 @@ + Usage: remove_duplicate_includes.py --dry-run components/foo components/bar + """ + +-from __future__ import print_function ++ + + import argparse + import collections +--- a/src/3rdparty/chromium/tools/roll_webgl_conformance.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/roll_webgl_conformance.py 2025-01-16 02:26:08.620595712 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + import collections +--- a/src/3rdparty/chromium/tools/run-swarmed.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/run-swarmed.py 2025-01-16 02:26:08.620595712 +0800 +@@ -20,7 +20,7 @@ + See //docs/workflow/debugging-with-swarming.md for more details. + """ + +-from __future__ import print_function ++ + + import argparse + import hashlib +@@ -196,7 +196,7 @@ + for l in f: + l = l.split('#')[0].strip() + if not l: continue +- k, v = map(str.strip, l.split('=', 1)) ++ k, v = list(map(str.strip, l.split('=', 1))) + gn_args[k] = v + if 'target_os' in gn_args: + args.target_os = gn_args['target_os'].strip('"') +@@ -258,7 +258,7 @@ + # Use dummy since threadpools give better exception messages + # than process pools do, and threads work fine for what we're doing. + pool = multiprocessing.dummy.Pool() +- spawn_args = map(lambda i: (i, args, isolated_hash), range(args.copies)) ++ spawn_args = [(i, args, isolated_hash) for i in range(args.copies)] + spawn_results = pool.imap_unordered(_Spawn, spawn_args) + + exit_codes = [] +--- a/src/3rdparty/chromium/tools/sort-headers.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/sort-headers.py 2025-01-16 02:26:08.620595712 +0800 +@@ -9,7 +9,7 @@ + Works great with tools/git/for-all-touched-files.py. + """ + +-from __future__ import print_function ++ + + import optparse + import os +@@ -80,7 +80,7 @@ + headerblock.append(line) + # Ensure we don't die due to trying to read beyond the end of the file. + try: +- line = infile.next() ++ line = next(infile) + except StopIteration: + infile_ended_on_include_line = True + break +--- a/src/3rdparty/chromium/tools/sort_sources.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/sort_sources.py 2025-01-16 02:26:08.620595712 +0800 +@@ -86,7 +86,7 @@ + + """ + +-from __future__ import print_function ++ + + import difflib + import optparse +--- a/src/3rdparty/chromium/tools/uberblame.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/uberblame.py 2025-01-16 02:26:08.620595712 +0800 +@@ -249,7 +249,7 @@ + it = iter(diff) + for line in it: + while not line.startswith('@@'): +- line = it.next() ++ line = next(it) + parts = line.split(' ') + previous_start, previous_end = parse_chunk_header_file_range( + parts[1].lstrip('-')) +@@ -261,7 +261,7 @@ + added_lines_end = None + removed_lines = [] + while previous_start < previous_end or current_start < current_end: +- line = it.next() ++ line = next(it) + firstchar = line[0] + line = line[1:] + if not in_delta and (firstchar == '-' or firstchar == '+'): +@@ -357,10 +357,10 @@ + """ + substring_generator = generate_substrings(git_log_stdout) + while True: +- hash = substring_generator.next() +- author_name = substring_generator.next() +- author_email = substring_generator.next() +- author_date = substring_generator.next() ++ hash = next(substring_generator) ++ author_name = next(substring_generator) ++ author_email = next(substring_generator) ++ author_date = next(substring_generator) + message = substring_generator.next().rstrip('\n') + diff = substring_generator.next().split('\n')[1:-1] + yield Commit(hash, author_name, author_email, author_date, message, diff) +@@ -656,7 +656,7 @@ + commit_data.append('"%s": "%s",\n' % (hash, commit_display)) + commit_data.append('}') + commit_data = ''.join(commit_data) +- line_nums = range(1, row if lastline.strip() == '' else row + 1) ++ line_nums = list(range(1, row if lastline.strip() == '' else row + 1)) + line_nums = '\n'.join([str(num) for num in line_nums]) + lines = ''.join(lines) + return html % (commit_data, line_nums, lines) +--- a/src/3rdparty/chromium/tools/unused-symbols-report.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/unused-symbols-report.py 2025-01-16 02:26:08.620595712 +0800 +@@ -17,7 +17,7 @@ + ./tools/unused-symbols-report.py buildlog > report.html + """ + +-from __future__ import print_function ++ + + import cgi + import optparse +--- a/src/3rdparty/chromium/tools/update_pgo_profiles.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/update_pgo_profiles.py 2025-01-16 02:26:08.620595712 +0800 +@@ -11,7 +11,7 @@ + large (~1GB) and updated frequently (~4 times a day). + """ + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/tools/yes_no.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/yes_no.py 2025-01-16 02:26:08.620595712 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import sys + +--- a/src/3rdparty/chromium/tools/accessibility/dump_accessibility_tree_auralinux.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/accessibility/dump_accessibility_tree_auralinux.py 2025-01-16 02:26:08.620595712 +0800 +@@ -9,7 +9,7 @@ + exposing its interface to ATK from the command line. + """ + +-from __future__ import print_function ++ + + import pyatspi + +--- a/src/3rdparty/chromium/tools/accessibility/rebase_dump_accessibility_tree_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/accessibility/rebase_dump_accessibility_tree_test.py 2025-01-16 02:26:08.620595712 +0800 +@@ -18,7 +18,7 @@ + Optional argument: patchset number, otherwise will default to latest patchset + """ + +-from __future__ import print_function ++ + + import json + import os +@@ -26,8 +26,8 @@ + import sys + import tempfile + import time +-import urllib +-import urlparse ++import urllib.request, urllib.parse, urllib.error ++import urllib.parse + + # The location of the DumpAccessibilityTree html test files and expectations. + TEST_DATA_PATH = os.path.join(os.getcwd(), 'content/test/data/accessibility') +--- a/src/3rdparty/chromium/tools/accessibility/nvda/nvda_chrome_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/accessibility/nvda/nvda_chrome_tests.py 2025-01-16 02:26:08.620595712 +0800 +@@ -21,7 +21,7 @@ + is set up correctly, the actual tests should run automatically and unattended. + """ + +-from __future__ import print_function ++ + + import os + import pywinauto +--- a/src/3rdparty/chromium/tools/binary_size/diagnose_bloat.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/diagnose_bloat.py 2025-01-16 02:26:08.620595712 +0800 +@@ -135,7 +135,7 @@ + + @property + def summary_stat(self): +- for section_name, results in self._diff.items(): ++ for section_name, results in list(self._diff.items()): + for subsection_name, value, units in results: + if 'normalized' in subsection_name: + full_name = '{} {}'.format(section_name, subsection_name) +@@ -158,8 +158,8 @@ + before = self._LoadResults(before_dir) + after = self._LoadResults(after_dir) + self._diff = collections.defaultdict(list) +- for section, section_dict in after.items(): +- for subsection, v in section_dict.items(): ++ for section, section_dict in list(after.items()): ++ for subsection, v in list(section_dict.items()): + # Ignore entries when resource_sizes.py chartjson format has changed. + if (section not in before or + subsection not in before[section] or +@@ -176,7 +176,7 @@ + def _ResultLines(self, include_sections=None): + """Generates diff lines for the specified sections (defaults to all).""" + section_lines = collections.defaultdict(list) +- for section_name, section_results in self._diff.items(): ++ for section_name, section_results in list(self._diff.items()): + if not include_sections or section_name in include_sections: + subsection_lines = [] + section_sum = 0 +@@ -209,7 +209,7 @@ + charts = chartjson['charts'] + # Older versions of resource_sizes.py prefixed the apk onto section names. + ret = {} +- for section, section_dict in charts.items(): ++ for section, section_dict in list(charts.items()): + section_no_target = re.sub(r'^.*_', '', section) + ret[section_no_target] = section_dict + return ret +@@ -717,7 +717,7 @@ + + + def _VerifyUserAccepts(message): +- print(message + ' Do you want to proceed? [y/n]') ++ print((message + ' Do you want to proceed? [y/n]')) + if input('> ').lower() != 'y': + sys.exit() + +--- a/src/3rdparty/chromium/tools/binary_size/find_large_commits.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/find_large_commits.py 2025-01-16 02:26:08.620595712 +0800 +@@ -76,19 +76,19 @@ + rev_and_delta = _FindBigDeltas(revs_and_sizes, options.increase_threshold, + options.decrease_threshold) + +- print('Printing info for up to {} commits in the range {}-{}'.format( +- len(rev_and_delta), revs_and_sizes[0][0], revs_and_sizes[-1][0])) ++ print(('Printing info for up to {} commits in the range {}-{}'.format( ++ len(rev_and_delta), revs_and_sizes[0][0], revs_and_sizes[-1][0]))) + print('Revision,Hash,Title,Author,Delta,Date,Milestone') + afdo_count = 0 + for rev, delta in rev_and_delta: + sha1, author, date, title, milestone = _LookupCommitInfo(rev) + if milestone is not None: +- print('\t'.join( ++ print(('\t'.join( + [str(rev), sha1, title, author, +- str(delta), date, milestone])) ++ str(delta), date, milestone]))) + else: + afdo_count += 1 +- print('Skipped %d AFDO rolls' % afdo_count) ++ print(('Skipped %d AFDO rolls' % afdo_count)) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/tools/binary_size/sizes.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/sizes.py 2025-01-16 02:26:08.620595712 +0800 +@@ -10,7 +10,7 @@ + This script uses Python 2 due to dependence on tracing.value. + """ + +-from __future__ import print_function ++ + + import argparse + import errno +@@ -273,7 +273,7 @@ + # TODO(mcgrathr): This should all be refactored so the mac and win flavors + # also deliver data structures rather than printing, and the logic for + # the printing and the summing totals is shared across all three flavors. +- for (identifier, units), value in sorted(totals.iteritems()): ++ for (identifier, units), value in sorted(totals.items()): + results_collector.add_result('totals-%s' % identifier, identifier, value, + units) + +@@ -373,7 +373,7 @@ + # 1. Add a top-level "benchmark_name" key. + # 2. Pull out the "identifier" value to be the story name. + formatted_data = {} +- for metric, metric_data in data.iteritems(): ++ for metric, metric_data in data.items(): + story = metric_data['identifier'] + formatted_data[metric] = {story: metric_data.copy()} + del formatted_data[metric][story]['identifier'] +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/ar.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/ar.py 2025-01-16 02:26:08.620595712 +0800 +@@ -125,10 +125,10 @@ + + if args.expand_thin: + expanded = ExpandThinArchives([args.ar_path], args.output_directory)[0] +- print('\n'.join(expanded)) ++ print(('\n'.join(expanded))) + else: + for name, payload in IterArchiveChunks(args.ar_path): +- print('{}: size={}'.format(name, len(payload) if payload else '')) ++ print(('{}: size={}'.format(name, len(payload) if payload else ''))) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/archive.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/archive.py 2025-01-16 02:26:08.620595712 +0800 +@@ -473,12 +473,12 @@ + ret = [] + STRING_LITERAL_NAME = models.STRING_LITERAL_NAME + assert len(merge_string_syms) == len(list_of_positions_by_object_path) +- tups = zip(merge_string_syms, list_of_positions_by_object_path) ++ tups = list(zip(merge_string_syms, list_of_positions_by_object_path)) + for merge_sym, positions_by_object_path in tups: + merge_sym_address = merge_sym.address + new_symbols = [] + ret.append(new_symbols) +- for object_path, positions in positions_by_object_path.items(): ++ for object_path, positions in list(positions_by_object_path.items()): + for offset, size in positions: + address = merge_sym_address + offset + symbol = models.Symbol( +@@ -687,7 +687,7 @@ + num_new_symbols += len(name_list) - 1 + + if missing_names and logging.getLogger().isEnabledFor(logging.INFO): +- for address, names in names_by_address.items(): ++ for address, names in list(names_by_address.items()): + for name in names: + if name in missing_names: + logging.info('Missing name %s is at address %x instead of [%s]' % +@@ -833,7 +833,7 @@ + sizes_by_module = _CollectModuleSizes(args.minimal_apks_file) + metadata[models.METADATA_APK_FILENAME] = shorten_path( + args.minimal_apks_file) +- for name, size in sizes_by_module.items(): ++ for name, size in list(sizes_by_module.items()): + key = models.METADATA_APK_SIZE + if name != 'base': + key += '-' + name +@@ -951,7 +951,7 @@ + elf_section_ranges = _SectionInfoFromElf(elf_path, tool_prefix) + differing_elf_section_sizes = {} + differing_map_section_sizes = {} +- for k, (_, elf_size) in elf_section_ranges.items(): ++ for k, (_, elf_size) in list(elf_section_ranges.items()): + if k in _SECTION_SIZE_BLACKLIST: + continue + (_, map_size) = map_section_ranges.get(k) +@@ -1044,7 +1044,7 @@ + } + alias_map = { + k: id_map[id(v)] +- for k, v in contents.resources.items() if id_map[id(v)] != k ++ for k, v in list(contents.resources.items()) if id_map[id(v)] != k + } + # Longest locale pak is: es-419.pak. + # Only non-translated .pak files are: resources.pak, chrome_100_percent.pak. +@@ -1109,7 +1109,7 @@ + # We package resources in the res/ folder only in the apk. + res_info = { + os.path.join('res', dest): source +- for dest, source in res_info_without_root.items() ++ for dest, source in list(res_info_without_root.items()) + } + res_info.update(self._knobs.apk_other_files) + return res_info +@@ -1139,7 +1139,7 @@ + + def _ParsePakSymbols(symbols_by_id, object_paths_by_pak_id): + raw_symbols = [] +- for resource_id, symbol in symbols_by_id.items(): ++ for resource_id, symbol in list(symbols_by_id.items()): + raw_symbols.append(symbol) + paths = object_paths_by_pak_id.get(resource_id) + if not paths: +@@ -1365,7 +1365,7 @@ + def _CalculateElfOverhead(section_ranges, elf_path): + if elf_path: + section_sizes_total_without_bss = sum( +- size for k, (address, size) in section_ranges.items() ++ size for k, (address, size) in list(section_ranges.items()) + if k not in models.BSS_SECTIONS) + elf_overhead_size = ( + os.path.getsize(elf_path) - section_sizes_total_without_bss) +@@ -1424,7 +1424,7 @@ + for sym in raw_symbols: + if sym.end_address > last_symbol_ends[sym.section_name]: + last_symbol_ends[sym.section_name] = sym.end_address +- for section_name, last_symbol_end in last_symbol_ends.items(): ++ for section_name, last_symbol_end in list(last_symbol_ends.items()): + size_from_syms = last_symbol_end - section_ranges[section_name][0] + overhead = section_ranges[section_name][1] - size_from_syms + assert overhead >= 0, ( +@@ -1444,7 +1444,7 @@ + + # Sections that should not bundle into ".other". + unsummed_sections, summed_sections = models.ClassifySections( +- section_ranges.keys()) ++ list(section_ranges.keys())) + # Sort keys to ensure consistent order (> 1 sections may have address = 0). + for section_name in sorted(section_ranges.keys()): + # Handle sections that don't appear in |raw_symbols|. +@@ -1665,7 +1665,7 @@ + if elf_path and opts.relocations_mode: + _OverwriteSymbolSizesWithRelocationCount(raw_symbols, tool_prefix, elf_path) + +- section_sizes = {k: size for k, (address, size) in section_ranges.items()} ++ section_sizes = {k: size for k, (address, size) in list(section_ranges.items())} + container = models.Container(name=container_name, + metadata=metadata, + section_sizes=section_sizes) +@@ -1736,7 +1736,7 @@ + + def _ElfIsMainPartition(elf_path, tool_prefix): + section_ranges = _SectionInfoFromElf(elf_path, tool_prefix) +- return models.SECTION_PART_END in section_ranges.keys() ++ return models.SECTION_PART_END in list(section_ranges.keys()) + + + def _ArchFromElf(elf_path, tool_prefix): +@@ -2025,7 +2025,7 @@ + except ValueError as e: + on_config_error('%s: %s' % (err_prefix, e.args[0])) + for sub_args in ret: +- for k, v in sub_args.__dict__.items(): ++ for k, v in list(sub_args.__dict__.items()): + # Translate file arguments to be relative to |sub_dir|. + if (k.endswith('_file') or k == 'f') and v is not None: + sub_args.__dict__[k] = os.path.join(base_dir, v) +@@ -2052,7 +2052,7 @@ + parser.error('Container name cannot have characters in "<>"') + + # Copy output_directory, tool_prefix, etc. into sub_args. +- for k, v in top_args.__dict__.items(): ++ for k, v in list(top_args.__dict__.items()): + sub_args.__dict__.setdefault(k, v) + + opts = ContainerArchiveOptions(top_args, sub_args) +@@ -2092,7 +2092,7 @@ + size_info_prefix = os.path.join(top_args.output_directory, 'size-info', + os.path.basename(apk_prefix)) + +- container_args = {k: v for k, v in sub_args.__dict__.items()} ++ container_args = {k: v for k, v in list(sub_args.__dict__.items())} + container_args.update(opts.__dict__) + logging.info('Container Params: %r', container_args) + return (sub_args, opts, container_name, apk_so_path, resources_pathmap_path, +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/bcanalyzer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/bcanalyzer.py 2025-01-16 02:26:08.620595712 +0800 +@@ -368,9 +368,9 @@ + + for obj_path in args.objects: + rel_path = os.path.relpath(obj_path, base_path) +- print('File: %s' % rel_path) ++ print(('File: %s' % rel_path)) + for cur_type, s in _ParseBcAnalyzer(runner.RunOnFile(obj_path)): +- print(' char%d[%d]: %r' % (cur_type.width * 8, cur_type.length, s)) ++ print((' char%d[%d]: %r' % (cur_type.width * 8, cur_type.length, s))) + print('') + + +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/bcanalyzer_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/bcanalyzer_test.py 2025-01-16 02:26:08.620595712 +0800 +@@ -32,7 +32,7 @@ + the result of concatanating tokens. + """ + cfi = itertools.chain.from_iterable +- chars = cfi(map(ord, t) if isinstance(t, str) else (t, ) for t in toks) ++ chars = cfi(list(map(ord, t)) if isinstance(t, str) else (t, ) for t in toks) + return bytes(cfi((c >> (8 * i) for i in range(bits // 8)) for c in chars)) + + +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/console.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/console.py 2025-01-16 02:26:08.620595712 +0800 +@@ -170,7 +170,7 @@ + 'acl ch -u AllUsers:R gs://chrome-supersize/oneoffs/{shortname}\n' + ' Then view it at https://storage.googleapis.com/chrome-supersize' + '/viewer.html?load_url=oneoffs%2F{shortname}') +- print(msg.format(local=to_file, shortname=shortname)) ++ print((msg.format(local=to_file, shortname=shortname))) + + def _SaveDeltaSizeInfo(self, size_info, to_file=None): + """Saves a .sizediff file containing only filtered_symbols into to_file. +@@ -191,7 +191,7 @@ + 'acl ch -u AllUsers:R gs://chrome-supersize/oneoffs/{shortname}\n' + ' Then view it at https://storage.googleapis.com/chrome-supersize' + '/viewer.html?load_url=oneoffs%2F{shortname}') +- print(msg.format(local=to_file, shortname=shortname)) ++ print((msg.format(local=to_file, shortname=shortname))) + + def _SizeStats(self, size_info=None): + """Prints some statistics for the given size info. +@@ -354,8 +354,8 @@ + proc.kill() + + def _ShowExamplesFunc(self): +- print(self._CreateBanner()) +- print('\n'.join([ ++ print((self._CreateBanner())) ++ print(('\n'.join([ + '# Show pydoc for main types:', + 'import models', + 'help(models)', +@@ -399,7 +399,7 @@ + '', + '# For even more inspiration, look at canned_queries.py', + '# (and feel free to add your own!).', +- ])) ++ ]))) + + def _CreateBanner(self): + def keys(cls, super_keys=None): +@@ -438,7 +438,7 @@ + 'Variables:', + ' printed: List of objects passed to Print().', + ] +- for key, value in self._variables.items(): ++ for key, value in list(self._variables.items()): + if isinstance(value, types.ModuleType): + continue + if key.startswith('size_info'): +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/demangle.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/demangle.py 2025-01-16 02:26:08.620595712 +0800 +@@ -107,7 +107,7 @@ + |key_to_names| is a dict from key to sets (or lists) of mangled names. + """ + all_names = [] +- for names in key_to_names.values(): ++ for names in list(key_to_names.values()): + all_names.extend(n for n in names if _CanDemangle(n)) + if not all_names: + return key_to_names +@@ -115,7 +115,7 @@ + logging.info('Demangling %d values', len(all_names)) + it = iter(_DemangleNames(all_names, tool_prefix)) + ret = {} +- for key, names in key_to_names.items(): ++ for key, names in list(key_to_names.items()): + ret[key] = set(next(it) if _CanDemangle(n) else n for n in names) + assert(next(it, None) is None) + return ret +@@ -134,7 +134,7 @@ + logging.info('Demangling %d keys', len(keys)) + key_iter = iter(_DemangleNames(keys, tool_prefix)) + ret = collections.defaultdict(list) +- for key, val in name_to_list.items(): ++ for key, val in list(name_to_list.items()): + ret[next(key_iter) if _CanDemangle(key) else key] += val + assert(next(key_iter, None) is None) + logging.info('* %d keys become %d keys' % (len(name_to_list), len(ret))) +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/describe.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/describe.py 2025-01-16 02:26:08.622762342 +0800 +@@ -52,7 +52,7 @@ + + + def _GetSectionSizeInfo(unsummed_sections, summed_sections, section_sizes): +- sizes = [v for k, v in section_sizes.items() if k in summed_sections] ++ sizes = [v for k, v in list(section_sizes.items()) if k in summed_sections] + total_bytes = sum(sizes) + max_bytes = max(sizes) + +@@ -68,7 +68,7 @@ + or abs(_Divide(size, max_bytes)) > .04) + + section_names = sorted( +- k for k, v in section_sizes.items() if is_significant_section(k, v)) ++ k for k, v in list(section_sizes.items()) if is_significant_section(k, v)) + + return (total_bytes, section_names) + +@@ -205,7 +205,7 @@ + yield '' + yield '{}Other section sizes:'.format(indent) + section_names = sorted( +- k for k in section_sizes.keys() if k not in section_names) ++ k for k in list(section_sizes.keys()) if k not in section_names) + for name in section_names: + notes = '' + if name in unsummed_sections: +@@ -339,7 +339,7 @@ + @staticmethod + def _RelevantSections(section_names): + relevant_sections = [ +- s for s in models.SECTION_TO_SECTION_NAME.values() if s in section_names ++ s for s in list(models.SECTION_TO_SECTION_NAME.values()) if s in section_names + ] + if models.SECTION_MULTIPLE in relevant_sections: + relevant_sections.remove(models.SECTION_MULTIPLE) +@@ -412,7 +412,7 @@ + title_parts.append('...') + else: + if group.IsDelta(): +- title_parts.append(u'\u0394 PSS (\u0394 size_without_padding)') ++ title_parts.append('\u0394 PSS (\u0394 size_without_padding)') + else: + title_parts.append('PSS') + title_parts.append('Path') +@@ -509,13 +509,13 @@ + def _DescribeDeltaDict(self, data_name, before_dict, after_dict, indent=''): + common_items = { + k: v +- for k, v in before_dict.items() if after_dict.get(k) == v ++ for k, v in list(before_dict.items()) if after_dict.get(k) == v + } + before_items = { + k: v +- for k, v in before_dict.items() if k not in common_items ++ for k, v in list(before_dict.items()) if k not in common_items + } +- after_items = {k: v for k, v in after_dict.items() if k not in common_items} ++ after_items = {k: v for k, v in list(after_dict.items()) if k not in common_items} + return itertools.chain( + (indent + 'Common %s:' % data_name, ), + (indent + ' %s' % line for line in DescribeDict(common_items)), +@@ -602,7 +602,7 @@ + + def _DescribeSizeInfoContainerCoverage(raw_symbols, container): + """Yields lines describing how accurate |size_info| is.""" +- for section, section_name in models.SECTION_TO_SECTION_NAME.items(): ++ for section, section_name in list(models.SECTION_TO_SECTION_NAME.items()): + expected_size = container.section_sizes.get(section_name) + in_section = raw_symbols.WhereInSection(section_name, container=container) + actual_size = in_section.size +@@ -830,7 +830,7 @@ + + def DescribeDict(input_dict): + display_dict = {} +- for k, v in input_dict.items(): ++ for k, v in list(input_dict.items()): + if k == models.METADATA_ELF_MTIME: + timestamp_obj = datetime.datetime.utcfromtimestamp(v) + display_dict[k] = ( +@@ -847,7 +847,7 @@ + display_dict[k] = '' + else: + display_dict[k] = repr(v) +- return sorted('%s=%s' % t for t in display_dict.items()) ++ return sorted('%s=%s' % t for t in list(display_dict.items())) + + + def GenerateLines(obj, verbose=False, recursive=False, summarize=True, +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/diff.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/diff.py 2025-01-16 02:26:08.622762342 +0800 +@@ -85,7 +85,7 @@ + len(delta_symbols), len(after)) + + unmatched_before = [] +- for syms in before_symbols_by_key.values(): ++ for syms in list(before_symbols_by_key.values()): + unmatched_before.extend(syms) + return delta_symbols, unmatched_before, unmatched_after + +@@ -114,7 +114,7 @@ + container_from_name = {c.name: c for c in containers} + + # Create a DeltaSymbol to represent the zero'd out padding of matched symbols. +- for (container_name, section_name), padding in padding_by_segment.items(): ++ for (container_name, section_name), padding in list(padding_by_segment.items()): + # Values need to be integer (crbug.com/1132394). + padding = round(padding) + if padding != 0: +@@ -168,7 +168,7 @@ + else: + pairs[c.name] = [c, models.Container.Empty()] + ret = [] +- for name, [before_c, after_c] in pairs.items(): ++ for name, [before_c, after_c] in list(pairs.items()): + ret.append( + models.Container(name=name, + metadata=_DiffObj(before_c.metadata, after_c.metadata), +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/file_format.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/file_format.py 2025-01-16 02:26:08.622762342 +0800 +@@ -349,7 +349,7 @@ + + If |delta| is True, the differences in values are written instead.""" + for group in symbol_group_by_segment: +- gen = map(func, group) ++ gen = list(map(func, group)) + w.WriteNumberList(gen_delta(gen) if delta else gen) + + write_groups(lambda s: s.address, delta=True) +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/html_report.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/html_report.py 2025-01-16 02:26:08.622762342 +0800 +@@ -168,9 +168,9 @@ + inserted_smalls_abs_pss = 0 + skipped_smalls_count = 0 + skipped_smalls_abs_pss = 0 +- for tup, type_to_pss in small_symbol_pss.items(): ++ for tup, type_to_pss in list(small_symbol_pss.items()): + path, component = tup +- for section_name, pss in type_to_pss.items(): ++ for section_name, pss in list(type_to_pss.items()): + if abs(pss) < _MIN_OTHER_PSS: + skipped_smalls_count += 1 + skipped_smalls_abs_pss += abs(pss) +@@ -301,5 +301,5 @@ + # Use a random UUID as the filename so user can copy-and-paste command + # directly without a name collision. + upload_id = uuid.uuid4() +- print('\n'.join(msg).format(supersize_path, args.output_report_file, +- upload_id)) ++ print(('\n'.join(msg).format(supersize_path, args.output_report_file, ++ upload_id))) +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/linker_map_parser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/linker_map_parser.py 2025-01-16 02:26:08.622762342 +0800 +@@ -818,7 +818,7 @@ + + with open(args.linker_file, 'r') as map_file: + linker_name = DetectLinkerNameFromMapFile(map_file) +- print('Linker type: %s' % linker_name) ++ print(('Linker type: %s' % linker_name)) + + with open(args.linker_file, 'r') as map_file: + section_ranges, syms, extras = MapFileParser().Parse(linker_name, map_file) +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/main.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/main.py 2025-01-16 02:26:08.622762342 +0800 +@@ -109,7 +109,7 @@ + _SaveDiffAction(), + 'Create a stand-alone .sizediff diff report from two .size files.') + +- for name, tup in actions.items(): ++ for name, tup in list(actions.items()): + sub_parser = sub_parsers.add_parser(name, help=tup[1]) + _AddCommonArguments(sub_parser) + tup[0].AddArguments(sub_parser) +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/models.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/models.py 2025-01-16 02:26:08.622762342 +0800 +@@ -241,7 +241,7 @@ + + def ClassifySections(self): + if not self._classified_sections: +- self._classified_sections = ClassifySections(self.section_sizes.keys()) ++ self._classified_sections = ClassifySections(list(self.section_sizes.keys())) + return self._classified_sections + + @staticmethod +@@ -356,7 +356,7 @@ + """ + assert len(self.containers) == 1 + metadata = self.containers[0].metadata.copy() +- for k, v in self.build_config.items(): ++ for k, v in list(self.build_config.items()): + assert k not in metadata + metadata[k] = v + return metadata +@@ -1157,7 +1157,7 @@ + # Create the subgroups. + include_singles = min_count >= 0 + min_count = abs(min_count) +- for token, symbol_or_list in symbols_by_token.items(): ++ for token, symbol_or_list in list(symbols_by_token.items()): + count = 1 + if symbol_or_list.__class__ == list: + count = len(symbol_or_list) +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/ninja_parser.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/ninja_parser.py 2025-01-16 02:26:08.622762342 +0800 +@@ -60,7 +60,7 @@ + return len(self._unmatched_paths) + + def IterAllPaths(self): +- return self._dep_map.keys() ++ return list(self._dep_map.keys()) + + + def _ParseNinjaPathList(path_list): +@@ -142,15 +142,15 @@ + if not elf_inputs: + elf_inputs = [] + +- print('Found {} elf_inputs, and {} source mappings'.format( +- len(elf_inputs), len(source_mapper._dep_map))) ++ print(('Found {} elf_inputs, and {} source mappings'.format( ++ len(elf_inputs), len(source_mapper._dep_map)))) + if args.show_inputs: + print('elf_inputs:') +- print('\n'.join(elf_inputs)) ++ print(('\n'.join(elf_inputs))) + if args.show_mappings: + print('object_path -> source_path:') + for path in source_mapper.IterAllPaths(): +- print('{} -> {}'.format(path, source_mapper.FindSourceForPath(path))) ++ print(('{} -> {}'.format(path, source_mapper.FindSourceForPath(path)))) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/nm.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/nm.py 2025-01-16 02:26:08.622762342 +0800 +@@ -122,7 +122,7 @@ + names_by_address[address].add(mangled_name) + + # Need to add before demangling because |names_by_address| changes type. +- for address, count in num_outlined_functions_at_address.items(): ++ for address, count in list(num_outlined_functions_at_address.items()): + name = '** outlined function' + (' * %d' % count if count > 1 else '') + names_by_address[address].add(name) + +@@ -134,7 +134,7 @@ + # Also: Sort to ensure stable ordering. + return { + addr: sorted(names) +- for addr, names in names_by_address.items() ++ for addr, names in list(names_by_address.items()) + if len(names) > 1 or num_outlined_functions_at_address.get(addr, 0) > 1 + } + +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/obj_analyzer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/obj_analyzer.py 2025-01-16 02:26:08.622762342 +0800 +@@ -150,7 +150,7 @@ + for encoded_syms, encoded_strs, num_no_symbols in results: + total_no_symbols += num_no_symbols + symbol_names_by_path = parallel.DecodeDictOfLists(encoded_syms) +- for path, names in symbol_names_by_path.items(): ++ for path, names in list(symbol_names_by_path.items()): + for name in names: + all_paths_by_name[name].append(path) + +@@ -185,7 +185,7 @@ + self._paths_by_name = demangle.DemangleKeysAndMergeLists( + self._paths_by_name, self._tool_prefix) + # Sort and uniquefy. +- for key in self._paths_by_name.keys(): ++ for key in list(self._paths_by_name.keys()): + self._paths_by_name[key] = sorted(set(self._paths_by_name[key])) + + def _ReadElfStringData(self, elf_path, elf_string_ranges): +@@ -437,10 +437,10 @@ + bulk_analyzer.SortPaths() + + names_to_paths = bulk_analyzer.GetSymbolNames() +- print('Found {} names'.format(len(names_to_paths))) ++ print(('Found {} names'.format(len(names_to_paths)))) + if args.show_names: +- for name, paths in names_to_paths.items(): +- print('{}: {!r}'.format(name, paths)) ++ for name, paths in list(names_to_paths.items()): ++ print(('{}: {!r}'.format(name, paths))) + + if args.elf_file: + address, offset, size = string_extract.LookupElfRodataInfo( +@@ -448,15 +448,15 @@ + bulk_analyzer.AnalyzeStringLiterals(args.elf_file, ((address, size),)) + + positions_by_path = bulk_analyzer.GetStringPositions()[0] +- print('Found {} string literals'.format( +- sum(len(v) for v in positions_by_path.values()))) ++ print(('Found {} string literals'.format( ++ sum(len(v) for v in list(positions_by_path.values()))))) + if args.show_strings: + logging.debug('.rodata adjust=%d', address - offset) +- for path, positions in positions_by_path.items(): ++ for path, positions in list(positions_by_path.items()): + strs = string_extract.ReadFileChunks( + args.elf_file, ((offset + addr, size) for addr, size in positions)) +- print('{}: {!r}'.format( +- path, [s if len(s) < 20 else s[:20] + '...' for s in strs])) ++ print(('{}: {!r}'.format( ++ path, [s if len(s) < 20 else s[:20] + '...' for s in strs]))) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/parallel.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/parallel.py 2025-01-16 02:26:08.622762342 +0800 +@@ -208,7 +208,7 @@ + pool = _MakeProcessPool(arg_tuples, **kwargs) + wrapped_func = _FuncWrapper(func) + try: +- for result in pool.imap_unordered(wrapped_func, range(len(arg_tuples))): ++ for result in pool.imap_unordered(wrapped_func, list(range(len(arg_tuples)))): + _CheckForException(result) + yield result + finally: +@@ -233,16 +233,16 @@ + Does not support '' as keys, nor [''] as values. + """ + assert '' not in d +- assert [''] not in iter(d.values()) ++ assert [''] not in iter(list(d.values())) + keys = iter(d) + if key_transform: + keys = (key_transform(k) for k in keys) + keys = '\x01'.join(keys) + if value_transform: + values = '\x01'.join( +- '\x02'.join(value_transform(y) for y in x) for x in d.values()) ++ '\x02'.join(value_transform(y) for y in x) for x in list(d.values())) + else: +- values = '\x01'.join('\x02'.join(x) for x in d.values()) ++ values = '\x01'.join('\x02'.join(x) for x in list(d.values())) + return keys, values + + +--- a/src/3rdparty/chromium/tools/binary_size/libsupersize/string_extract.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/binary_size/libsupersize/string_extract.py 2025-01-16 02:26:08.622762342 +0800 +@@ -258,7 +258,7 @@ + string_addresses_by_path = parallel.DecodeDictOfLists( + encoded_string_addresses_by_path) + # Assign |target| as archive path, or a list of object paths. +- any_path = next(iter(string_addresses_by_path.keys())) ++ any_path = next(iter(list(string_addresses_by_path.keys()))) + target = _ExtractArchivePath(any_path) + if not target: + target = list(string_addresses_by_path.keys()) +@@ -271,7 +271,7 @@ + target, output_directory, section_positions_by_path) + + def GeneratePathAndValues(): +- for path, object_addresses in string_addresses_by_path.items(): ++ for path, object_addresses in list(string_addresses_by_path.items()): + for value in _IterStringLiterals( + path, object_addresses, string_sections_by_path.get(path)): + yield path, value +@@ -287,7 +287,7 @@ + encoded_strings_by_path, value_transform=ast.literal_eval) + + def GeneratePathAndValues(): +- for path, strings in strings_by_path.items(): ++ for path, strings in list(strings_by_path.items()): + for value in strings: + yield path, value + +--- a/src/3rdparty/chromium/tools/bisect_repackage/bisect_repackage.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/bisect_repackage/bisect_repackage.py 2025-01-16 02:26:08.622762342 +0800 +@@ -6,7 +6,7 @@ + This script repacakges chrome builds for manual bisect script. + """ + +-from __future__ import print_function ++ + + from functools import partial + import json +@@ -18,7 +18,7 @@ + import sys + import tempfile + import threading +-import urllib ++import urllib.request, urllib.parse, urllib.error + import bisect_repackage_utils + import re + # This script uses cloud_storage module which contains gsutils wrappers. +@@ -170,11 +170,11 @@ + def get_cp_from_hash(git_hash): + """Converts a git hash to commit position number.""" + json_url = CHROMIUM_GITHASH_TO_SVN_URL % git_hash +- response = urllib.urlopen(json_url) ++ response = urllib.request.urlopen(json_url) + if response.getcode() == 200: + try: + data = json.loads(response.read()) +- except Exception,e: ++ except Exception as e: + logging.warning('JSON URL: %s, Error Message: %s' % json_url, e) + raise GitConversionError + else: +@@ -238,7 +238,7 @@ + try: + cloud_storage.Get(context.original_gs_bucket, + remote_file_path, zip_file_name) +- except Exception, e: ++ except Exception as e: + logging.warning('Failed to download: %s, error: %s', zip_file_name, e) + return False + return True +@@ -295,7 +295,7 @@ + bisect_repackage_utils.IsGitCommitHash) + hash_list = list(set(hash_list)-set(revision_map.values())) + cp_num_to_hash_map = create_cp_from_hash_map(hash_list) +- merged_dict = dict(cp_num_to_hash_map.items() + revision_map.items()) ++ merged_dict = dict(list(cp_num_to_hash_map.items()) + list(revision_map.items())) + upload_revision_map(merged_dict, context) + + +@@ -335,7 +335,7 @@ + code = bisect_repackage_utils.RunCommand(command) + if code != 0: + raise ChromeExecutionError('An error occurred when executing Chrome') +- except ChromeExecutionError,e: ++ except ChromeExecutionError as e: + print(str(e)) + + +@@ -438,13 +438,13 @@ + def get_hash_from_cp(cp_num): + """Converts a commit position number to git hash.""" + json_url = CHROMIUM_CP_TO_GITHASH % cp_num +- response = urllib.urlopen(json_url) ++ response = urllib.request.urlopen(json_url) + if response.getcode() == 200: + try: + data = json.loads(response.read()) + if 'git_sha' in data: + return data['git_sha'] +- except Exception, e: ++ except Exception as e: + logging.warning('Failed to fetch git_hash: %s, error: %s' % json_url, e) + else: + logging.warning('Failed to fetch git_hash: %s, CP: %s' % json_url, cp_num) +@@ -460,7 +460,7 @@ + return revision_map + + def get_overwrite_revisions(revision_map): +- return sorted(revision_map.keys(), reverse=True) ++ return sorted(list(revision_map.keys()), reverse=True) + + + class RepackageJob(object): +--- a/src/3rdparty/chromium/tools/bisect_repackage/bisect_repackage_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/bisect_repackage/bisect_repackage_utils.py 2025-01-16 02:26:08.622762342 +0800 +@@ -8,7 +8,7 @@ + and build/scripts/common/slave_utils. + """ + +-from __future__ import print_function ++ + + import errno + import os +@@ -97,7 +97,7 @@ + file_path = os.path.join(*path) + try: + os.makedirs(file_path) +- except OSError, e: ++ except OSError as e: + if e.errno != errno.EEXIST: + raise + +@@ -117,7 +117,7 @@ + try: + RemoveFile(new_path) + os.rename(path, new_path) +- except OSError, e: ++ except OSError as e: + if e.errno != errno.ENOENT: + raise + +@@ -127,7 +127,7 @@ + file_path = os.path.join(*path) + try: + os.remove(file_path) +- except OSError, e: ++ except OSError as e: + if e.errno != errno.ENOENT: + raise + +@@ -253,7 +253,7 @@ + if sys.platform == 'win32': + # Give up and use cmd.exe's rd command. + file_path = os.path.normcase(file_path) +- for _ in xrange(3): ++ for _ in range(3): + print('RemoveDirectory running %s' % (' '.join( + ['cmd.exe', '/c', 'rd', '/q', '/s', file_path]))) + if not subprocess.call(['cmd.exe', '/c', 'rd', '/q', '/s', file_path]): +@@ -305,7 +305,7 @@ + for root, dirs, files in os.walk(file_path, topdown=False): + # For POSIX: making the directory writable guarantees removability. + # Windows will ignore the non-read-only bits in the chmod value. +- os.chmod(root, 0770) ++ os.chmod(root, 0o770) + for name in files: + remove_with_retry(os.remove, os.path.join(root, name)) + for name in dirs: +@@ -505,4 +505,4 @@ + if IsMac(): + # Restore permission bits. + os.chmod(os.path.join(output_dir, name), +- zf.getinfo(name).external_attr >> 16L) ++ zf.getinfo(name).external_attr >> 16) +--- a/src/3rdparty/chromium/tools/check_ecs_deps/check_ecs_deps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/check_ecs_deps/check_ecs_deps.py 2025-01-16 02:26:08.622762342 +0800 +@@ -6,7 +6,7 @@ + ''' Verifies that builds of the embedded content_shell do not included + unnecessary dependencies.''' + +-from __future__ import print_function ++ + + import os + import re +@@ -147,11 +147,11 @@ + if options.verbose: + output['verbose'] = lambda x: stdmsg(None, x) + +- forbidden_regexp = re.compile(string.join(map(re.escape, +- kUndesiredLibraryList), '|')) ++ forbidden_regexp = re.compile(string.join(list(map(re.escape, ++ kUndesiredLibraryList)), '|')) + mapping_regexp = re.compile(r"\s*([^/]*) => (.*)") +- blessed_regexp = re.compile(r"(%s)[-0-9.]*\.so" % string.join(map(re.escape, +- kAllowedLibraryList), '|')) ++ blessed_regexp = re.compile(r"(%s)[-0-9.]*\.so" % string.join(list(map(re.escape, ++ kAllowedLibraryList)), '|')) + built_regexp = re.compile(re.escape(build_dir + os.sep)) + + success = 0 +--- a/src/3rdparty/chromium/tools/checkbins/checkbins.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/checkbins/checkbins.py 2025-01-16 02:26:08.622762342 +0800 +@@ -10,7 +10,7 @@ + /NXCOMPAT, /DYNAMICBASE and /SAFESEH. + """ + +-from __future__ import print_function ++ + + import json + import os +@@ -58,7 +58,7 @@ + + # Load FILES.cfg + exec_globals = {'__builtins__': None} +- execfile(FILES_CFG, exec_globals) ++ exec(compile(open(FILES_CFG, "rb").read(), FILES_CFG, 'exec'), exec_globals) + files_cfg = exec_globals['FILES'] + + # Determines whether a specified file is in the 'default' +--- a/src/3rdparty/chromium/tools/checklicenses/checklicenses.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/checklicenses/checklicenses.py 2025-01-16 02:26:08.622762342 +0800 +@@ -5,7 +5,7 @@ + + """Makes sure that all files contain proper licensing information.""" + +-from __future__ import print_function ++ + + import json + import optparse +@@ -750,7 +750,7 @@ + + if not len(args): + unused_suppressions = set( +- PATH_SPECIFIC_WHITELISTED_LICENSES.iterkeys()).difference( ++ PATH_SPECIFIC_WHITELISTED_LICENSES.keys()).difference( + used_suppressions) + if unused_suppressions: + print("\nNOTE: unused suppressions detected:\n") +--- a/src/3rdparty/chromium/tools/checkperms/checkperms.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/checkperms/checkperms.py 2025-01-16 02:26:08.622762342 +0800 +@@ -28,7 +28,7 @@ + file paths should be only lowercase. + """ + +-from __future__ import print_function ++ + + import json + import logging +@@ -328,7 +328,7 @@ + def check_files(root, files): + gen = (check_file(root, f) for f in files + if not is_ignored(f) and not os.path.isdir(f)) +- return filter(None, gen) ++ return [_f for _f in gen if _f] + + + class ApiBase(object): +--- a/src/3rdparty/chromium/tools/checkteamtags/checkteamtags.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/checkteamtags/checkteamtags.py 2025-01-16 02:26:08.622762342 +0800 +@@ -5,7 +5,7 @@ + + """Makes sure OWNERS files have consistent TEAM and COMPONENT tags.""" + +-from __future__ import print_function ++ + + import json + import logging +@@ -14,7 +14,7 @@ + import posixpath + import re + import sys +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + from collections import defaultdict + +@@ -89,7 +89,7 @@ + Returns: + A string containing the details of any multi-team per component. + """ +- mappings_file = json.load(urllib2.urlopen(options.current_mapping_url)) ++ mappings_file = json.load(urllib.request.urlopen(options.current_mapping_url)) + new_dir_to_component = mappings_file.get('dir-to-component', {}) + new_dir_to_team = mappings_file.get('dir-to-team', {}) + +@@ -106,7 +106,7 @@ + deleted.append(os.path.dirname(rel)) + + # Update component mapping with current changes. +- for rel_path_native, tags in affected.iteritems(): ++ for rel_path_native, tags in affected.items(): + # Make the path use forward slashes always. + rel_path = uniform_path_format(rel_path_native) + component = tags.get('component') +@@ -132,7 +132,7 @@ + # For the components affected by this patch, compute the directories that map + # to it. + affected_component_to_dirs = {} +- for d, component in new_dir_to_component.iteritems(): ++ for d, component in new_dir_to_component.items(): + if component in affected_components: + affected_component_to_dirs.setdefault(component, []) + affected_component_to_dirs[component].append(d) +@@ -143,12 +143,12 @@ + new_dir_to_team[d] + for d in dirs + if d in new_dir_to_team +- ])) for component, dirs in affected_component_to_dirs.iteritems() ++ ])) for component, dirs in affected_component_to_dirs.items() + } + + # Perform cardinality check. + warnings = '' +- for component, teams in affected_component_to_teams.iteritems(): ++ for component, teams in affected_component_to_teams.items(): + if len(teams) > 1: + warnings += ('\nThe set of all OWNERS files with COMPONENT: %s list ' + "multiple TEAM's: %s") % (component, ', '.join(teams)) +@@ -234,8 +234,8 @@ + levels = [logging.ERROR, logging.INFO, logging.DEBUG] + logging.basicConfig(level=levels[min(len(levels) - 1, options.verbose)]) + +- errors = filter(None, [check_owners(*rel_and_full_paths(options.root, f)) +- for f in args]) ++ errors = [_f for _f in [check_owners(*rel_and_full_paths(options.root, f)) ++ for f in args] if _f] + + warnings = None + if not errors: +--- a/src/3rdparty/chromium/tools/checkteamtags/extract_components.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/checkteamtags/extract_components.py 2025-01-16 02:26:08.622762342 +0800 +@@ -16,7 +16,7 @@ + Refer to crbug.com/667952 + """ + +-from __future__ import print_function ++ + + import json + import optparse +--- a/src/3rdparty/chromium/tools/checkteamtags/extract_components_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/checkteamtags/extract_components_test.py 2025-01-16 02:26:08.622762342 +0800 +@@ -8,7 +8,7 @@ + import sys + import unittest + +-from StringIO import StringIO ++from io import StringIO + + import extract_components + +--- a/src/3rdparty/chromium/tools/checkteamtags/owners_file_tags.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/checkteamtags/owners_file_tags.py 2025-01-16 02:26:08.622762342 +0800 +@@ -81,7 +81,7 @@ + dir_to_component = {} + dir_missing_info_by_depth = defaultdict(list) + dir_to_team = {} +- for rel_dirname, owners_data in all_owners_data.iteritems(): ++ for rel_dirname, owners_data in all_owners_data.items(): + # Normalize this relative path to posix-style to make counting separators + # work correctly as a means of obtaining the file_depth. + rel_path = uniform_path_format(os.path.relpath(rel_dirname, root)) +@@ -114,10 +114,10 @@ + + mappings = { + 'component-to-team': { +- k: v['team'] for k, v in topmost_team.iteritems() ++ k: v['team'] for k, v in topmost_team.items() + }, + 'teams-per-component': { +- k: sorted(list(v)) for k, v in teams_per_component.iteritems() ++ k: sorted(list(v)) for k, v in teams_per_component.items() + }, + 'dir-to-component': dir_to_component, + 'dir-to-team': dir_to_team, +--- a/src/3rdparty/chromium/tools/clang/blink_gc_plugin/process-graph.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/blink_gc_plugin/process-graph.py 2025-01-16 02:26:08.622762342 +0800 +@@ -3,11 +3,11 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + import argparse, os, sys, json, subprocess, pickle + + try: +- from StringIO import StringIO # Python 2 ++ from io import StringIO # Python 2 + except: + from io import StringIO + +@@ -64,7 +64,7 @@ + + try: + # Python3 remove sys.maxint. +- maxint = sys.maxint ++ maxint = sys.maxsize + except AttributeError: + # Also see https://stackoverflow.com/a/13795777/4052492. + maxint = sys.maxsize +@@ -99,7 +99,7 @@ + node.counts[ptr] += 1 + + def add_counts(s1, s2): +- for (k, v) in s2.iteritems(): ++ for (k, v) in s2.items(): + s1[k] += s2[k] + + # Representation of graph nodes. Basically a map of directed edges. +@@ -122,10 +122,10 @@ + else: + self.edges[new_edge.key] = new_edge + def super_edges(self): +- return [e for e in self.edges.values() if e.is_super()] ++ return [e for e in list(self.edges.values()) if e.is_super()] + + def subclass_edges(self): +- return [e for e in self.edges.values() if e.is_subclass()] ++ return [e for e in list(self.edges.values()) if e.is_subclass()] + + def reset(self): + self.cost = maxint +@@ -135,7 +135,7 @@ + for ptr in ptr_types: + self.counts[ptr] = 0 + def update_counts(self): +- for e in self.edges.values(): ++ for e in list(self.edges.values()): + inc_ptr(e.dst, e.ptr) + + # Representation of directed graph edges. +@@ -214,7 +214,7 @@ + copy_super_edges(e) + # Copy strong super-class edges (ignoring sub-class edges) to the sub class. + sub_node = graph[edge.src] +- for e in super_node.edges.values(): ++ for e in list(super_node.edges.values()): + if e.keeps_alive() and not e.is_subclass(): + new_edge = Edge( + src = sub_node.name, +@@ -237,16 +237,16 @@ + super_node.edges[sub_edge.key] = sub_edge + + def complete_graph(): +- for node in graph.values(): ++ for node in list(graph.values()): + for edge in node.super_edges(): + copy_super_edges(edge) +- for edge in node.edges.values(): ++ for edge in list(node.edges.values()): + if edge.is_root(): + roots.append(edge) + log("Copied edges down edges for %d graph nodes" % global_inc_copy) + + def reset_graph(): +- for n in graph.values(): ++ for n in list(graph.values()): + n.reset() + + def shortest_path(start, end): +@@ -258,7 +258,7 @@ + current.visited = True + if current == end or current.cost >= end.cost + 1: + return +- for e in current.edges.values(): ++ for e in list(current.edges.values()): + if not e.keeps_alive(): + continue + dst = graph.get(e.dst) +@@ -385,7 +385,7 @@ + gc_managed = [] + hierarchies = [] + +- for node in graph.values(): ++ for node in list(graph.values()): + node.update_counts() + for sup in node.super_edges(): + if sup.dst in gcref_bases: +--- a/src/3rdparty/chromium/tools/clang/pylib/clang/compile_db.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/pylib/clang/compile_db.py 2025-01-16 02:26:08.622762342 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import json + import os +@@ -51,8 +51,7 @@ + # not recognized or used by clangd. They only suppress or activate graphical + # output anyway. + blocklisted_arguments = ['/nologo', '/showIncludes'] +- command_parts = filter(lambda arg: arg not in blocklisted_arguments, +- command.split()) ++ command_parts = [arg for arg in command.split() if arg not in blocklisted_arguments] + + return " ".join(command_parts) + +--- a/src/3rdparty/chromium/tools/clang/pylib/clang/compile_db_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/pylib/clang/compile_db_test.py 2025-01-16 02:26:08.622762342 +0800 +@@ -9,7 +9,7 @@ + import sys + import unittest + +-import compile_db ++from . import compile_db + + + # Input compile DB. +--- a/src/3rdparty/chromium/tools/clang/pylib/clang/plugin_testing.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/pylib/clang/plugin_testing.py 2025-01-16 02:26:08.622762342 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import glob + import os +--- a/src/3rdparty/chromium/tools/clang/scripts/apply_edits.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/apply_edits.py 2025-01-16 02:26:08.622762342 +0800 +@@ -283,7 +283,7 @@ + edit_count = 0 + error_count = 0 + done_files = 0 +- for k, v in edits.iteritems(): ++ for k, v in edits.items(): + tmp_edit_count, tmp_error_count = _ApplyEditsToSingleFile(k, v) + edit_count += tmp_edit_count + error_count += tmp_error_count +@@ -356,7 +356,7 @@ + filenames = set(_GetFilesFromGit(args.path_filter)) + edits = _ParseEditsFromStdin(args.p) + return _ApplyEdits( +- {k: v for k, v in edits.iteritems() ++ {k: v for k, v in edits.items() + if os.path.realpath(k) in filenames}) + + +--- a/src/3rdparty/chromium/tools/clang/scripts/apply_edits_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/apply_edits_test.py 2025-01-16 02:26:08.622762342 +0800 +@@ -508,11 +508,11 @@ + # + # Previous versions of apply_edits.py would not skip the BOM marker when + # figuring out where to insert the new include header. +- old_contents = u'''\ufeff// Copyright ++ old_contents = '''\ufeff// Copyright + + #include "old/header.h" + ''' +- expected_new_contents = u'''\ufeff// Copyright ++ expected_new_contents = '''\ufeff// Copyright + + #include "new/header.h" + #include "old/header.h" +@@ -563,7 +563,7 @@ + expected_msg_regex = 'Conflicting replacement text' + expected_msg_regex += '.*some_file.cc at offset 4, length 3' + expected_msg_regex += '.*"bar" != "foo"' +- with self.assertRaisesRegexp(ValueError, expected_msg_regex): ++ with self.assertRaisesRegex(ValueError, expected_msg_regex): + _ApplyEdit(old_text, edit, last_edit=last) + + def testUnrecognizedEditDirective(self): +@@ -571,7 +571,7 @@ + edit = apply_edits.Edit('unknown_directive', 123, 456, "foo") + expected_msg_regex = 'Unrecognized edit directive "unknown_directive"' + expected_msg_regex += '.*some_file.cc' +- with self.assertRaisesRegexp(ValueError, expected_msg_regex): ++ with self.assertRaisesRegex(ValueError, expected_msg_regex): + _ApplyEdit(old_text, edit) + + def testOverlappingReplacement(self): +@@ -582,7 +582,7 @@ + expected_msg_regex += '.*some_file.cc' + expected_msg_regex += '.*offset 0, length 7.*"bar"' + expected_msg_regex += '.*offset 4, length 7.*"foo"' +- with self.assertRaisesRegexp(ValueError, expected_msg_regex): ++ with self.assertRaisesRegex(ValueError, expected_msg_regex): + _ApplyEdit(old_text, edit, last_edit=last) + + +--- a/src/3rdparty/chromium/tools/clang/scripts/apply_fixits.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/apply_fixits.py 2025-01-16 02:26:08.622762342 +0800 +@@ -18,7 +18,7 @@ + # 3. Apply the fixits with this script: + # python apply_fixits.py -p < generated-fixits + +-from __future__ import print_function ++ + + import argparse + import collections +@@ -60,7 +60,7 @@ + fixits[m.group('file')].append(FixIt( + int(m.group('start_line')), -int(m.group('start_col')), int(m.group( + 'end_line')), -int(m.group('end_col')), m.group('text'))) +- for k, v in fixits.iteritems(): ++ for k, v in fixits.items(): + v.sort() + with open(os.path.join(args.p, k), 'rb+') as f: + lines = f.readlines() +--- a/src/3rdparty/chromium/tools/clang/scripts/build.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/build.py 2025-01-16 02:26:08.622762342 +0800 +@@ -10,7 +10,7 @@ + nobody should run this script as part of normal development. + """ + +-from __future__ import print_function ++ + + import argparse + import glob +--- a/src/3rdparty/chromium/tools/clang/scripts/build_clang_tools_extra.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/build_clang_tools_extra.py 2025-01-16 02:26:08.622762342 +0800 +@@ -10,7 +10,7 @@ + clangd-indexer + """ + +-from __future__ import print_function ++ + + import argparse + import errno +--- a/src/3rdparty/chromium/tools/clang/scripts/build_file.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/build_file.py 2025-01-16 02:26:08.622762342 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + import json +--- a/src/3rdparty/chromium/tools/clang/scripts/clang_tidy_tool.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/clang_tidy_tool.py 2025-01-16 02:26:08.622762342 +0800 +@@ -23,7 +23,7 @@ + out/Release chrome + """ + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/tools/clang/scripts/expand_thin_archives.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/expand_thin_archives.py 2025-01-16 02:26:08.622762342 +0800 +@@ -6,10 +6,10 @@ + # Library and tool to expand command lines that mention thin archives + # into command lines that mention the contained object files. + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function +-from __future__ import unicode_literals ++ ++ ++ ++ + + import argparse + import sys +--- a/src/3rdparty/chromium/tools/clang/scripts/extract_edits.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/extract_edits.py 2025-01-16 02:26:08.622762342 +0800 +@@ -40,7 +40,7 @@ + | sort | uniq + """ + +-from __future__ import print_function ++ + + import sys + +--- a/src/3rdparty/chromium/tools/clang/scripts/goma_ld.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/goma_ld.py 2025-01-16 02:26:08.622762342 +0800 +@@ -9,10 +9,10 @@ + # E.g. original: clang++ -o foo foo.o + # Becomes: goma-ld clang++ -o foo foo.o + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function +-from __future__ import unicode_literals ++ ++ ++ ++ + + import goma_link + +--- a/src/3rdparty/chromium/tools/clang/scripts/goma_link.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/goma_link.py 2025-01-16 02:26:08.622762342 +0800 +@@ -9,10 +9,10 @@ + # E.g. original: lld-link -out:foo foo.obj + # Becomes: goma_link.py lld-link -out:foo foo.obj + +-from __future__ import absolute_import +-from __future__ import division +-from __future__ import print_function +-from __future__ import unicode_literals ++ ++ ++ ++ + + import argparse + import errno +@@ -30,7 +30,7 @@ + # which is like long in Python 2. So we check if long is defined, and, if not, + # define it to be the same as int. + try: +- long ++ int + except NameError: + long = int + +@@ -124,13 +124,13 @@ + f.seek(32, io.SEEK_CUR) + m = re.match(b'/([0-9]+)', file_id) + if long_names and m: +- name_pos = long(m.group(1)) ++ name_pos = int(m.group(1)) + name_end = long_names.find(b'/\n', name_pos) + name = long_names[name_pos:name_end] + else: + name = file_id + try: +- size = long(f.read(10)) ++ size = int(f.read(10)) + except: + sys.stderr.write('While parsing %r, pos %r\n' % (path, f.tell())) + raise +--- a/src/3rdparty/chromium/tools/clang/scripts/goma_link_integration_tests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/goma_link_integration_tests.py 2025-01-16 02:26:08.622762342 +0800 +@@ -393,7 +393,7 @@ + dbginfo = subprocess.check_output( + ['llvm-dwarfdump', '-debug-info', 'main']).decode( + 'utf-8', 'backslashreplace') +- self.assertRegexpMatches(dbginfo, '\\bDW_AT_GNU_dwo_name\\b.*\\.dwo"') ++ self.assertRegex(dbginfo, '\\bDW_AT_GNU_dwo_name\\b.*\\.dwo"') + self.assertNotRegexpMatches(dbginfo, '\\bDW_AT_name\\b.*foo\\.cpp"') + + def test_distributed_lto_params(self): +--- a/src/3rdparty/chromium/tools/clang/scripts/package.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/package.py 2025-01-16 02:26:08.622762342 +0800 +@@ -6,7 +6,7 @@ + """This script will check out llvm and clang, and then package the results up + to a tgz file.""" + +-from __future__ import print_function ++ + + import argparse + import fnmatch +--- a/src/3rdparty/chromium/tools/clang/scripts/process_crashreports.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/process_crashreports.py 2025-01-16 02:26:08.622762342 +0800 +@@ -6,7 +6,7 @@ + """Looks for crash reports in tools/clang/crashreports and uploads them to GCS. + """ + +-from __future__ import print_function ++ + + import argparse + import datetime +--- a/src/3rdparty/chromium/tools/clang/scripts/run_tool.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/run_tool.py 2025-01-16 02:26:08.622762342 +0800 +@@ -47,7 +47,7 @@ + apply_edits.py reads edit lines from stdin and applies the edits + """ + +-from __future__ import print_function ++ + + import argparse + from collections import namedtuple +--- a/src/3rdparty/chromium/tools/clang/scripts/test_tool.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/test_tool.py 2025-01-16 02:26:08.622762342 +0800 +@@ -5,7 +5,7 @@ + + """Test harness for chromium clang tools.""" + +-from __future__ import print_function ++ + + import argparse + import difflib +@@ -145,8 +145,7 @@ + + + def _NormalizeRawOutput(output_lines, test_dir): +- return map(lambda line: _NormalizeSingleRawOutputLine(line, test_dir), +- output_lines) ++ return [_NormalizeSingleRawOutputLine(line, test_dir) for line in output_lines] + + + def main(argv): +--- a/src/3rdparty/chromium/tools/clang/scripts/update.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/update.py 2025-01-16 02:26:08.622762342 +0800 +@@ -14,8 +14,8 @@ + (Note that the output dir may be deleted and re-created if it exists.) + """ + +-from __future__ import division +-from __future__ import print_function ++ ++ + import argparse + import os + import shutil +@@ -26,7 +26,8 @@ + import time + + try: +- from urllib2 import HTTPError, URLError, urlopen ++ from urllib.error import HTTPError, URLError ++ from urllib.request import urlopen + except ImportError: # For Py3 compatibility + from urllib.error import HTTPError, URLError + from urllib.request import urlopen +--- a/src/3rdparty/chromium/tools/clang/scripts/upload_revision.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/scripts/upload_revision.py 2025-01-16 02:26:08.622762342 +0800 +@@ -7,7 +7,7 @@ + creates a feature branch, puts this revision into update.py, uploads + a CL, triggers Clang Upload try bots, and tells what to do next""" + +-from __future__ import print_function ++ + + import argparse + import fnmatch +--- a/src/3rdparty/chromium/tools/clang/translation_unit/test_translation_unit.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/clang/translation_unit/test_translation_unit.py 2025-01-16 02:26:08.622762342 +0800 +@@ -5,7 +5,7 @@ + + """Test for TranslationUnitGenerator tool.""" + +-from __future__ import print_function ++ + + import difflib + import glob +--- a/src/3rdparty/chromium/tools/code_coverage/coverage.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/code_coverage/coverage.py 2025-01-16 02:26:08.622762342 +0800 +@@ -65,7 +65,7 @@ + https://chromium.googlesource.com/chromium/src/+/master/docs/testing/code_coverage.md + """ + +-from __future__ import print_function ++ + + import sys + +@@ -78,7 +78,7 @@ + import shlex + import shutil + import subprocess +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + sys.path.append( + os.path.join( +@@ -335,7 +335,7 @@ + output_file_path = os.path.join(_GetLogsDirectoryPath(), output_file_name) + + profdata_file_path = None +- for _ in xrange(MERGE_RETRIES): ++ for _ in range(MERGE_RETRIES): + logging.info('Running command: "%s", the output is redirected to "%s".', + command, output_file_path) + +@@ -1040,7 +1040,7 @@ + args.ignore_filename_regex, args.format) + component_mappings = None + if not args.no_component_view: +- component_mappings = json.load(urllib2.urlopen(COMPONENT_MAPPING_URL)) ++ component_mappings = json.load(urllib.request.urlopen(COMPONENT_MAPPING_URL)) + + # Call prepare here. + processor = coverage_utils.CoverageReportPostProcessor( +--- a/src/3rdparty/chromium/tools/compile_test/compile_test.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/compile_test/compile_test.py 2025-01-16 02:26:08.622762342 +0800 +@@ -9,7 +9,7 @@ + This is similar to checks done by ./configure scripts. + """ + +-from __future__ import print_function ++ + + import optparse + import os +--- a/src/3rdparty/chromium/tools/coverity/coverity.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/coverity/coverity.py 2025-01-16 02:26:08.622762342 +0800 +@@ -24,7 +24,7 @@ + + """ + +-from __future__ import print_function ++ + + import optparse + import os +@@ -112,7 +112,7 @@ + try: + lock_file = os.open(lock_filename, + os.O_CREAT | os.O_EXCL | os.O_TRUNC | os.O_RDWR) +- except OSError, err: ++ except OSError as err: + print('Failed to open lock file:\n ' + str(err)) + return 1 + +--- a/src/3rdparty/chromium/tools/cr/main.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/main.py 2025-01-16 02:26:08.622762342 +0800 +@@ -7,7 +7,7 @@ + Holds the main function and all it's support code. + """ + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/tools/cr/cr/autocomplete.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/autocomplete.py 2025-01-16 02:26:08.626012286 +0800 +@@ -8,7 +8,7 @@ + current command line. + """ + +-from __future__ import print_function ++ + + import cr + +--- a/src/3rdparty/chromium/tools/cr/cr/config.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/config.py 2025-01-16 02:26:08.626012286 +0800 +@@ -203,7 +203,7 @@ + return self + + def ApplyMap(self, arg): +- for key, value in arg.items(): ++ for key, value in list(arg.items()): + self._Set(key, value) + return self + +--- a/src/3rdparty/chromium/tools/cr/cr/loader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/loader.py 2025-01-16 02:26:08.626012286 +0800 +@@ -12,7 +12,7 @@ + boilerplate so the actual functionality is clearer. + """ + +-from __future__ import print_function ++ + + from importlib import import_module + import os +@@ -31,7 +31,7 @@ + + def _AutoExportScanner(module): + """Scan the modules for things that need wiring up automatically.""" +- for name, value in module.__dict__.items(): ++ for name, value in list(module.__dict__.items()): + if isinstance(value, type) and issubclass(value, AutoExport): + # Add this straight to the cr module. + if not hasattr(cr, name): +--- a/src/3rdparty/chromium/tools/cr/cr/plugin.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/plugin.py 2025-01-16 02:26:08.626012286 +0800 +@@ -10,7 +10,7 @@ + discover plugins as they are loaded. + """ + +-from __future__ import print_function ++ + + from operator import attrgetter + +@@ -206,7 +206,7 @@ + plugin = cls() + _plugins[cls] = plugin + # Wire up the hierarchy for Config objects. +- for name, value in cls.__dict__.items(): ++ for name, value in list(cls.__dict__.items()): + if isinstance(value, cr.Config): + for base in cls.__bases__: + child = getattr(base, name, None) +--- a/src/3rdparty/chromium/tools/cr/cr/visitor.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/visitor.py 2025-01-16 02:26:08.626012286 +0800 +@@ -128,7 +128,7 @@ + if self.current_node.export is False: + # not exporting from this config + return +- for key in store.keys(): ++ for key in list(store.keys()): + if key in self.store: + # duplicate + continue +--- a/src/3rdparty/chromium/tools/cr/cr/actions/adb.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/actions/adb.py 2025-01-16 02:26:08.626012286 +0800 +@@ -4,7 +4,7 @@ + + """A module to hold adb specific action implementations.""" + +-from __future__ import print_function ++ + + import re + +--- a/src/3rdparty/chromium/tools/cr/cr/actions/gn.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/actions/gn.py 2025-01-16 02:26:08.626012286 +0800 +@@ -4,7 +4,7 @@ + + """A module to add gn support to cr.""" + +-from __future__ import print_function ++ + + import cr + import os +@@ -23,7 +23,7 @@ + def UpdateContext(self): + # Collapse GN_ARGS from all GN_ARG prefixes. + gn_args = cr.context.Find('GN_ARGS') or '' +- for key, value in cr.context.exported.items(): ++ for key, value in list(cr.context.exported.items()): + if key.startswith(GN_ARG_PREFIX): + gn_args += ' %s=%s' % (key[len(GN_ARG_PREFIX):], value) + +@@ -72,7 +72,7 @@ + arg_lines.append(line.strip()) + + # Append new settings. +- for key, value in args.items(): ++ for key, value in list(args.items()): + arg_lines.append('%s = %s' % (key, value)) + + try: +--- a/src/3rdparty/chromium/tools/cr/cr/base/android.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/base/android.py 2025-01-16 02:26:08.626012286 +0800 +@@ -4,7 +4,7 @@ + + """The android specific platform implementation module.""" + +-from __future__ import print_function ++ + + import os + import subprocess +--- a/src/3rdparty/chromium/tools/cr/cr/base/client.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/base/client.py 2025-01-16 02:26:08.626012286 +0800 +@@ -10,7 +10,7 @@ + rest of the cr tool what the client is capable of. + """ + +-from __future__ import print_function ++ + + import os + import pprint +@@ -138,7 +138,7 @@ + + def _WriteConfig(writer, data): + writer.write(CONFIG_FILE_PREFIX) +- for key, value in data.items(): ++ for key, value in list(data.items()): + writer.write(CONFIG_VAR_LINE.format(key, value)) + writer.write(CONFIG_FILE_SUFFIX) + +@@ -194,7 +194,7 @@ + gclient_file = cr.context.Substitute( + os.path.join('{CR_CLIENT_PATH}', GCLIENT_FILENAME)) + spec = '\n'.join('%s = %s' % (key, pprint.pformat(value)) +- for key,value in cr.context.gclient.items()) ++ for key,value in list(cr.context.gclient.items())) + if cr.context.dry_run: + print('Write the following spec to', gclient_file) + print(spec) +@@ -259,7 +259,7 @@ + print('Build config file is', _GetConfigFile( + _GetConfigDir(use_build_dir=True))) + try: +- for name in cr.auto.build.config.OVERRIDES.exported.keys(): ++ for name in list(cr.auto.build.config.OVERRIDES.exported.keys()): + print(' ', name, '=', cr.context.Get(name)) + except AttributeError: + pass +--- a/src/3rdparty/chromium/tools/cr/cr/base/context.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/base/context.py 2025-01-16 02:26:08.626012286 +0800 +@@ -8,7 +8,7 @@ + This includes the configuration variables and command line handling. + """ + +-from __future__ import print_function ++ + + import argparse + import os +@@ -122,7 +122,7 @@ + self.AddChildren(*cr.config.GLOBALS) + self.AddChildren( + cr.config.Config('ENVIRONMENT', literal=True, export=True).Set( +- {k: self.ParseValue(v) for k, v in os.environ.items()}), ++ {k: self.ParseValue(v) for k, v in list(os.environ.items())}), + self._data.arguments, + self._data.derived, + ) +@@ -235,7 +235,7 @@ + self._data.arguments.Wipe() + if self._data.args: + self._data.arguments.Set( +- {k: v for k, v in vars(self._data.args).items() if v is not None}) ++ {k: v for k, v in list(vars(self._data.args).items()) if v is not None}) + + def DumpValues(self, with_source): + _DumpVisitor(with_source).VisitNode(self) +--- a/src/3rdparty/chromium/tools/cr/cr/base/host.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/base/host.py 2025-01-16 02:26:08.626012286 +0800 +@@ -4,7 +4,7 @@ + + """Module for build host support.""" + +-from __future__ import print_function ++ + + import os + import pipes +@@ -77,7 +77,7 @@ + """ + with cr.context.Trace(): + command = [cr.context.Substitute(arg) for arg in command if arg] +- command = filter(bool, command) ++ command = list(filter(bool, command)) + trail = cr.context.trail + if not command: + print('Empty command passed to execute') +@@ -95,7 +95,7 @@ + try: + p = subprocess.Popen( + command, shell=shell, +- env={k: str(v) for k, v in cr.context.exported.items()}, ++ env={k: str(v) for k, v in list(cr.context.exported.items())}, + stdout=out) + except OSError: + print('Failed to exec', command) +@@ -159,7 +159,7 @@ + True if the response was yes. + """ + options = 'Y/n' if default else 'y/N' +- result = raw_input(question + ' [' + options + '] ').lower() ++ result = input(question + ' [' + options + '] ').lower() + if result == '': + return default + return result in ['y', 'yes'] +--- a/src/3rdparty/chromium/tools/cr/cr/commands/args.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/commands/args.py 2025-01-16 02:26:08.626012286 +0800 +@@ -4,7 +4,7 @@ + + """A module for the args command.""" + +-from __future__ import print_function ++ + + import os + +--- a/src/3rdparty/chromium/tools/cr/cr/commands/clobber.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/commands/clobber.py 2025-01-16 02:26:08.626012286 +0800 +@@ -4,7 +4,7 @@ + + """A module for the clobber command.""" + +-from __future__ import print_function ++ + + import os + +--- a/src/3rdparty/chromium/tools/cr/cr/commands/info.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/commands/info.py 2025-01-16 02:26:08.626012286 +0800 +@@ -4,7 +4,7 @@ + + """A module for the info implementation of Command.""" + +-from __future__ import print_function ++ + + import cr + +--- a/src/3rdparty/chromium/tools/cr/cr/commands/init.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/commands/init.py 2025-01-16 02:26:08.626012286 +0800 +@@ -4,7 +4,7 @@ + + """A module for the init command.""" + +-from __future__ import print_function ++ + + import os + +--- a/src/3rdparty/chromium/tools/cr/cr/commands/shell.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/commands/shell.py 2025-01-16 02:26:08.626012286 +0800 +@@ -4,7 +4,7 @@ + + """A module for the shell command.""" + +-from __future__ import print_function ++ + + import os + import tempfile +--- a/src/3rdparty/chromium/tools/cr/cr/fixups/arch.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/fixups/arch.py 2025-01-16 02:26:08.626012286 +0800 +@@ -4,7 +4,7 @@ + + """A module for architecture output directory fixups.""" + +-from __future__ import print_function ++ + + import cr + +--- a/src/3rdparty/chromium/tools/cr/cr/targets/target.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cr/cr/targets/target.py 2025-01-16 02:26:08.626012286 +0800 +@@ -3,7 +3,7 @@ + # found in the LICENSE file. + """Module to hold the Target plugin.""" + +-from __future__ import print_function ++ + + import operator + import re +@@ -59,7 +59,7 @@ + self.target_name = self.Find('CR_TARGET_NAME') + + def GetRunDependencies(self): +- return map(Target.CreateTarget, self.Get('CR_RUN_DEPENDENCIES')) ++ return list(map(Target.CreateTarget, self.Get('CR_RUN_DEPENDENCIES'))) + + @property + def build_target(self): +--- a/src/3rdparty/chromium/tools/cygprofile/cluster.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cygprofile/cluster.py 2025-01-16 02:26:08.626012286 +0800 +@@ -173,7 +173,7 @@ + neighbors = [] + for sym_list in sym_lists: + for i, s in enumerate(sym_list): +- for j in xrange(i + 1, min(i + self.NEIGHBOR_DISTANCE, len(sym_list))): ++ for j in range(i + 1, min(i + self.NEIGHBOR_DISTANCE, len(sym_list))): + if s == sym_list[j]: + # Free functions that are static inline seem to be the only + # source of these duplicates. +@@ -294,17 +294,17 @@ + # |process_type| can be : browser, renderer, gpu-process, etc. + for process_type in offsets_graph: + for process in offsets_graph[process_type]: +- process = sorted(process, key=lambda k: long(k['index'])) ++ process = sorted(process, key=lambda k: int(k['index'])) + graph_list = [] + for el in process: +- index = long(el['index']) ++ index = int(el['index']) + callee_symbol = _GetOffsetSymbolName(processor, +- long(el['callee_offset'])) ++ int(el['callee_offset'])) + misses = 0 + caller_and_count = [] + for bucket in el['caller_and_count']: +- caller_offset = long(bucket['caller_offset']) +- count = long(bucket['count']) ++ caller_offset = int(bucket['caller_offset']) ++ count = int(bucket['count']) + if caller_offset == 0: + misses += count + continue +@@ -381,7 +381,7 @@ + browser_clustering = Clustering.ClusteredSymbolLists( + process_symbols[_BROWSER], size_map) + other_lists = [] +- for process, syms in process_symbols.items(): ++ for process, syms in list(process_symbols.items()): + if process not in (_RENDERER, _BROWSER): + other_lists.extend(syms) + if other_lists: +--- a/src/3rdparty/chromium/tools/cygprofile/cluster_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cygprofile/cluster_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -158,12 +158,12 @@ + self.assertFalse((n.src, n.dst) in distances) + distances[(n.src, n.dst)] = n.dist + self.assertEqual(5, len(distances)) +- self.assertEquals(-2, distances[('a', 'b')]) +- self.assertEquals(-2, distances[('a', 'c')]) +- self.assertEquals(-4, distances[('b', 'd')]) +- self.assertEquals(-6, distances[('c', 'd')]) +- self.assertEquals(-100, distances[('d', 'f')]) +- self.assertEquals(list('abcdf'), c.ClusterToList()) ++ self.assertEqual(-2, distances[('a', 'b')]) ++ self.assertEqual(-2, distances[('a', 'c')]) ++ self.assertEqual(-4, distances[('b', 'd')]) ++ self.assertEqual(-6, distances[('c', 'd')]) ++ self.assertEqual(-100, distances[('d', 'f')]) ++ self.assertEqual(list('abcdf'), c.ClusterToList()) + + def testClusterOffsetsFromCallGraph(self): + process1 = ('{"call_graph": [ {' +--- a/src/3rdparty/chromium/tools/cygprofile/compare_orderfiles.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cygprofile/compare_orderfiles.py 2025-01-16 02:26:08.626012286 +0800 +@@ -9,7 +9,7 @@ + updating commit made by the orderfile bot. + """ + +-from __future__ import print_function ++ + + import argparse + import collections +--- a/src/3rdparty/chromium/tools/cygprofile/cyglog_to_orderfile.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cygprofile/cyglog_to_orderfile.py 2025-01-16 02:26:08.626012286 +0800 +@@ -229,7 +229,7 @@ + def _ReadReachedOffsets(filename): + """Reads and returns a list of reached offsets.""" + with open(filename, 'r') as f: +- offsets = [int(x.rstrip('\n')) for x in f.xreadlines()] ++ offsets = [int(x.rstrip('\n')) for x in f] + return offsets + + +--- a/src/3rdparty/chromium/tools/cygprofile/cyglog_to_orderfile_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cygprofile/cyglog_to_orderfile_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import collections + import os +@@ -63,7 +63,7 @@ + else: + try: + self.assertListEqual(sorted(expected[i]), sorted(observed[i])) +- except self.failureException, e: ++ except self.failureException as e: + raise self.failureException('For key {}: {}'.format(i, e)) + for i in observed: + # All i that are in expected have already been tested. +@@ -88,16 +88,16 @@ + generator = cyglog_to_orderfile.OffsetOrderfileGenerator( + test_utils.TestSymbolOffsetProcessor(symbol_infos), None) + syms = generator._SymbolsAtOffset(0x10) +- self.assertEquals(1, len(syms)) +- self.assertEquals(symbol_infos[0], syms[0]) ++ self.assertEqual(1, len(syms)) ++ self.assertEqual(symbol_infos[0], syms[0]) + + def testSymbolsAtOffsetInexectMatch(self): + symbol_infos = [SimpleTestSymbol('1', 0x10, 0x13)] + generator = cyglog_to_orderfile.OffsetOrderfileGenerator( + test_utils.TestSymbolOffsetProcessor(symbol_infos), None) + syms = generator._SymbolsAtOffset(0x11) +- self.assertEquals(1, len(syms)) +- self.assertEquals(symbol_infos[0], syms[0]) ++ self.assertEqual(1, len(syms)) ++ self.assertEqual(symbol_infos[0], syms[0]) + + def testSameCtorOrDtorNames(self): + same_name = cyglog_to_orderfile.ObjectFileProcessor._SameCtorOrDtorNames +--- a/src/3rdparty/chromium/tools/cygprofile/orderfile_generator_backend.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cygprofile/orderfile_generator_backend.py 2025-01-16 02:26:08.626012286 +0800 +@@ -13,7 +13,7 @@ + tools/cygprofile/orderfile_generator_backend.py --use-goma --target-arch=arm + """ + +-from __future__ import print_function ++ + + import argparse + import csv +@@ -388,7 +388,7 @@ + Exception if the hash file does not match the file. + NotImplementedError when the commit logic hasn't been overridden. + """ +- files_to_commit = list(filter(None, files)) ++ files_to_commit = list([_f for _f in files if _f]) + if files_to_commit: + self._CommitStashedFiles(files_to_commit) + +@@ -965,7 +965,7 @@ + assert self._options.manual_libname + assert self._options.manual_objdir + with file(self._options.manual_symbol_offsets) as f: +- symbol_offsets = [int(x) for x in f.xreadlines()] ++ symbol_offsets = [int(x) for x in f] + processor = process_profiles.SymbolOffsetProcessor( + self._compiler.manual_libname) + generator = cyglog_to_orderfile.OffsetOrderfileGenerator( +--- a/src/3rdparty/chromium/tools/cygprofile/patch_orderfile.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cygprofile/patch_orderfile.py 2025-01-16 02:26:08.626012286 +0800 +@@ -174,7 +174,7 @@ + # guarantee ordering after code changes before the next orderfile is + # generated. So we double the number of outlined functions as a measure of + # security. +- for idx in xrange(2 * max_outlined_index + 1): ++ for idx in range(2 * max_outlined_index + 1): + yield _OUTLINED_FUNCTION_FORMAT.format(idx) + + +@@ -189,7 +189,7 @@ + Symbol names, cleaned and unique. + """ + with open(orderfile) as f: +- for line in f.xreadlines(): ++ for line in f: + line = line.strip() + if line: + yield line +--- a/src/3rdparty/chromium/tools/cygprofile/patch_orderfile_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cygprofile/patch_orderfile_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -12,12 +12,12 @@ + class TestPatchOrderFile(unittest.TestCase): + def testRemoveSuffixes(self): + no_clone = 'this.does.not.contain.clone' +- self.assertEquals(no_clone, patch_orderfile.RemoveSuffixes(no_clone)) ++ self.assertEqual(no_clone, patch_orderfile.RemoveSuffixes(no_clone)) + with_clone = 'this.does.contain.clone.' +- self.assertEquals( ++ self.assertEqual( + 'this.does.contain', patch_orderfile.RemoveSuffixes(with_clone)) + with_part = 'this.is.a.part.42' +- self.assertEquals( ++ self.assertEqual( + 'this.is.a', patch_orderfile.RemoveSuffixes(with_part)) + + def testUniqueGenerator(self): +@@ -31,19 +31,19 @@ + self.assertEqual(list(TestIterator()), [1,2,3]) + + def testMaxOutlinedIndex(self): +- self.assertEquals(7, patch_orderfile._GetMaxOutlinedIndex( ++ self.assertEqual(7, patch_orderfile._GetMaxOutlinedIndex( + {'OUTLINED_FUNCTION_{}'.format(idx): None + for idx in [1, 2, 3, 7]})) + self.assertRaises(AssertionError, patch_orderfile._GetMaxOutlinedIndex, + {'OUTLINED_FUNCTION_{}'.format(idx): None + for idx in [1, 200, 3, 11]}) +- self.assertEquals(None, patch_orderfile._GetMaxOutlinedIndex( ++ self.assertEqual(None, patch_orderfile._GetMaxOutlinedIndex( + {'a': None, 'b': None})) + + def testPatchedSymbols(self): + # From input symbols a b c d, symbols a and d match themselves, symbol + # b matches b and x, and symbol c is missing. +- self.assertEquals(list('abxd'), ++ self.assertEqual(list('abxd'), + list(patch_orderfile._PatchedSymbols( + {'a': 'a', 'b': 'bx', 'd': 'd'}, + 'abcd', None))) +@@ -51,9 +51,9 @@ + def testPatchedSymbolsWithOutlining(self): + # As above, but add outlined functions at the end. The aliased outlined + # function should be ignored. +- self.assertEquals(list('abd') + ++ self.assertEqual(list('abd') + + ['OUTLINED_FUNCTION_{}'.format(i) +- for i in xrange(5)], ++ for i in range(5)], + list(patch_orderfile._PatchedSymbols( + {'a': 'a', + 'b': ['b', 'OUTLINED_FUNCTION_4'], +--- a/src/3rdparty/chromium/tools/cygprofile/process_profiles.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cygprofile/process_profiles.py 2025-01-16 02:26:08.626012286 +0800 +@@ -390,10 +390,10 @@ + return self._count.get((phase, process), 0) + + def Processes(self): +- return set(k[1] for k in self._count.iterkeys()) ++ return set(k[1] for k in self._count.keys()) + + def Phases(self): +- return set(k[0] for k in self._count.iterkeys()) ++ return set(k[0] for k in self._count.keys()) + + def Offset(self): + return self._offset +@@ -468,7 +468,7 @@ + for offset in self._ReadOffsets(f): + offset_map.setdefault(offset, self.AnnotatedOffset(offset)).Increment( + phase, process) +- return offset_map.values() ++ return list(offset_map.values()) + + def GetProcessOffsetLists(self): + """Returns all symbol offsets lists, grouped by process.""" +@@ -478,12 +478,12 @@ + return offsets_by_process + + def _SanityCheckAllCallsCapturedByTheInstrumentation(self, process_info): +- total_calls_count = long(process_info['total_calls_count']) ++ total_calls_count = int(process_info['total_calls_count']) + call_graph = process_info['call_graph'] + count = 0 + for el in call_graph: + for bucket in el['caller_and_count']: +- count += long(bucket['count']) ++ count += int(bucket['count']) + + # This is a sanity check to ensure the number of race-related + # inconsistencies is small. +@@ -580,7 +580,7 @@ + assert self._run_groups + if len(self._run_groups) < 5: + return # Small runs have too much variance for testing. +- sizes = map(lambda g: len(g.Filenames()), self._run_groups) ++ sizes = [len(g.Filenames()) for g in self._run_groups] + avg_size = sum(sizes) / len(self._run_groups) + num_outliers = len([s for s in sizes + if s > 1.5 * avg_size or s < 0.75 * avg_size]) +--- a/src/3rdparty/chromium/tools/cygprofile/process_profiles_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cygprofile/process_profiles_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -116,12 +116,12 @@ + self.assertEqual(13, processor.SymbolsSize(['W', 'Y', 'Z'])) + + def testMedian(self): +- self.assertEquals(None, process_profiles._Median([])) +- self.assertEquals(5, process_profiles._Median([5])) +- self.assertEquals(5, process_profiles._Median([1, 5, 20])) +- self.assertEquals(5, process_profiles._Median([4, 6])) +- self.assertEquals(5, process_profiles._Median([1, 4, 6, 100])) +- self.assertEquals(5, process_profiles._Median([1, 4, 5, 6, 100])) ++ self.assertEqual(None, process_profiles._Median([])) ++ self.assertEqual(5, process_profiles._Median([5])) ++ self.assertEqual(5, process_profiles._Median([1, 5, 20])) ++ self.assertEqual(5, process_profiles._Median([4, 6])) ++ self.assertEqual(5, process_profiles._Median([1, 4, 6, 100])) ++ self.assertEqual(5, process_profiles._Median([1, 4, 5, 6, 100])) + + def testRunGroups(self): + files = [ProfileFile(40, 0), ProfileFile(100, 0), +@@ -129,10 +129,10 @@ + ProfileFile(42, 0), ProfileFile(95, 0)] + mgr = process_profiles.ProfileManager(files) + mgr._ComputeRunGroups() +- self.assertEquals(3, len(mgr._run_groups)) +- self.assertEquals(3, len(mgr._run_groups[0].Filenames())) +- self.assertEquals(2, len(mgr._run_groups[1].Filenames())) +- self.assertEquals(1, len(mgr._run_groups[2].Filenames())) ++ self.assertEqual(3, len(mgr._run_groups)) ++ self.assertEqual(3, len(mgr._run_groups[0].Filenames())) ++ self.assertEqual(2, len(mgr._run_groups[1].Filenames())) ++ self.assertEqual(1, len(mgr._run_groups[2].Filenames())) + self.assertTrue(files[0] in mgr._run_groups[0].Filenames()) + self.assertTrue(files[3] in mgr._run_groups[0].Filenames()) + self.assertTrue(files[4] in mgr._run_groups[0].Filenames()) +@@ -143,7 +143,7 @@ + def testRunGroupSanity(self): + files = [] + # Generate 20 sets of files in groups separated by 60s. +- for ts_base in xrange(0, 20): ++ for ts_base in range(0, 20): + ts = ts_base * 60 + files.extend([ProfileFile(ts, 0, 'browser'), + ProfileFile(ts + 1, 0, 'renderer'), +@@ -158,7 +158,7 @@ + ProfileFile(20 * 60 + 2, 1, 'renderer'), + ProfileFile(21 * 60, 0, 'browser')] + + [ProfileFile(22 * 60, 0, 'renderer') +- for _ in xrange(0, 10)]) ++ for _ in range(0, 10)]) + + self.assertRaises(AssertionError, + process_profiles.ProfileManager(files)._ComputeRunGroups) +@@ -179,15 +179,15 @@ + ProfileFile(150, 0): [9, 11, 13], + ProfileFile(40, 1): [5, 6, 7]}) + offsets_list = mgr.GetRunGroupOffsets() +- self.assertEquals(2, len(offsets_list)) ++ self.assertEqual(2, len(offsets_list)) + self.assertListEqual([1, 2, 3, 4, 5, 6, 7], offsets_list[0]) + self.assertListEqual([9, 11, 13], offsets_list[1]) + offsets_list = mgr.GetRunGroupOffsets(0) +- self.assertEquals(2, len(offsets_list)) ++ self.assertEqual(2, len(offsets_list)) + self.assertListEqual([1, 2, 3, 4], offsets_list[0]) + self.assertListEqual([9, 11, 13], offsets_list[1]) + offsets_list = mgr.GetRunGroupOffsets(1) +- self.assertEquals(2, len(offsets_list)) ++ self.assertEqual(2, len(offsets_list)) + self.assertListEqual([5, 6, 7], offsets_list[0]) + self.assertListEqual([], offsets_list[1]) + +@@ -199,7 +199,7 @@ + ProfileFile(150, 0): [9, 11, 13], + ProfileFile(30, 1): [5, 6, 7]}) + offsets_list = mgr.GetRunGroupOffsets() +- self.assertEquals(2, len(offsets_list)) ++ self.assertEqual(2, len(offsets_list)) + self.assertListEqual([5, 6, 7, 1, 2, 3, 4], offsets_list[0]) + + def testPhases(self): +@@ -209,7 +209,7 @@ + ProfileFile(30, 1): [], + ProfileFile(30, 2): [], + ProfileFile(30, 0): []}) +- self.assertEquals(set([0,1,2]), mgr.GetPhases()) ++ self.assertEqual(set([0,1,2]), mgr.GetPhases()) + + def testGetAnnotatedOffsets(self): + mgr = TestProfileManager({ +--- a/src/3rdparty/chromium/tools/cygprofile/profile_android_startup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cygprofile/profile_android_startup.py 2025-01-16 02:26:08.626012286 +0800 +@@ -10,7 +10,7 @@ + to make runs repeatable. + """ + +-from __future__ import print_function ++ + + import argparse + import logging +@@ -529,7 +529,7 @@ + + apk = apk_helper.ApkHelper(args.apk_path) + package_info = None +- for p in constants.PACKAGE_INFO.itervalues(): ++ for p in constants.PACKAGE_INFO.values(): + if p.package == apk.GetPackageName(): + package_info = p + break +--- a/src/3rdparty/chromium/tools/cygprofile/symbol_extractor.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cygprofile/symbol_extractor.py 2025-01-16 02:26:08.626012286 +0800 +@@ -174,9 +174,8 @@ + + # Outlined functions are known to be repeated often, so ignore them in the + # repeated symbol count. +- repeated_symbols = filter(lambda s: len(name_to_offsets[s]) > 1, +- (k for k in name_to_offsets.keys() +- if not k.startswith('OUTLINED_FUNCTION_'))) ++ repeated_symbols = [s for s in (k for k in list(name_to_offsets.keys()) ++ if not k.startswith('OUTLINED_FUNCTION_')) if len(name_to_offsets[s]) > 1] + if repeated_symbols: + # Log the first 5 repeated offsets of the first 10 repeated symbols. + logging.warning('%d symbols repeated with multiple offsets:\n %s', +@@ -316,7 +315,7 @@ + # check_orderfile. + symbol_infos_by_name = {} + warnings = cygprofile_utils.WarningCollector(_MAX_WARNINGS_TO_PRINT) +- for infos in GroupSymbolInfosByName(symbol_infos).itervalues(): ++ for infos in GroupSymbolInfosByName(symbol_infos).values(): + first_symbol_info = min(infos, key=lambda x: x.offset) + symbol_infos_by_name[first_symbol_info.name] = first_symbol_info + if len(infos) > 1: +--- a/src/3rdparty/chromium/tools/cygprofile/symbol_extractor_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cygprofile/symbol_extractor_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -70,10 +70,10 @@ + line = ('009faf60 l O .text\t00000500' + SPACES + 'AES_Td') + symbol_info = symbol_extractor._FromObjdumpLine(line) + self.assertIsNotNone(symbol_info) +- self.assertEquals(0x009faf60, symbol_info.offset) +- self.assertEquals('.text', symbol_info.section) +- self.assertEquals(0x500, symbol_info.size) +- self.assertEquals('AES_Td', symbol_info.name) ++ self.assertEqual(0x009faf60, symbol_info.offset) ++ self.assertEqual('.text', symbol_info.section) ++ self.assertEqual(0x500, symbol_info.size) ++ self.assertEqual('AES_Td', symbol_info.name) + + def testSymbolFromLocalLabel(self): + line = ('00f64b80 l .text\t00000000' + SPACES + 'Builtins_Abort') +@@ -85,8 +85,8 @@ + '.hidden linker_script_start_of_text') + symbol_info = symbol_extractor._FromObjdumpLine(line) + self.assertIsNotNone(symbol_info) +- self.assertEquals(0x00918000, symbol_info.offset) +- self.assertEquals('linker_script_start_of_text', symbol_info.name) ++ self.assertEqual(0x00918000, symbol_info.offset) ++ self.assertEqual('linker_script_start_of_text', symbol_info.name) + + def testSymbolInfo(self): + line = ('00c1c05c l F .text\t0000002c' + SPACES + +@@ -97,10 +97,10 @@ + test_section = '.text' + symbol_info = symbol_extractor._FromObjdumpLine(line) + self.assertIsNotNone(symbol_info) +- self.assertEquals(test_offset, symbol_info.offset) +- self.assertEquals(test_size, symbol_info.size) +- self.assertEquals(test_name, symbol_info.name) +- self.assertEquals(test_section, symbol_info.section) ++ self.assertEqual(test_offset, symbol_info.offset) ++ self.assertEqual(test_size, symbol_info.size) ++ self.assertEqual(test_name, symbol_info.name) ++ self.assertEqual(test_section, symbol_info.section) + + def testHiddenSymbol(self): + line = ('00c1c05c l F .text\t0000002c' + SPACES + +@@ -111,10 +111,10 @@ + test_section = '.text' + symbol_info = symbol_extractor._FromObjdumpLine(line) + self.assertIsNotNone(symbol_info) +- self.assertEquals(test_offset, symbol_info.offset) +- self.assertEquals(test_size, symbol_info.size) +- self.assertEquals(test_name, symbol_info.name) +- self.assertEquals(test_section, symbol_info.section) ++ self.assertEqual(test_offset, symbol_info.offset) ++ self.assertEqual(test_size, symbol_info.size) ++ self.assertEqual(test_name, symbol_info.name) ++ self.assertEqual(test_section, symbol_info.section) + + def testDollarInSymbolName(self): + # A $ character elsewhere in the symbol name is fine. +@@ -123,10 +123,10 @@ + '_ZZL11get_globalsvENK3$_1clEv') + symbol_info = symbol_extractor._FromObjdumpLine(line) + self.assertIsNotNone(symbol_info) +- self.assertEquals(0xc1b228, symbol_info.offset) +- self.assertEquals(0x60, symbol_info.size) +- self.assertEquals('_ZZL11get_globalsvENK3$_1clEv', symbol_info.name) +- self.assertEquals('.text', symbol_info.section) ++ self.assertEqual(0xc1b228, symbol_info.offset) ++ self.assertEqual(0x60, symbol_info.size) ++ self.assertEqual('_ZZL11get_globalsvENK3$_1clEv', symbol_info.name) ++ self.assertEqual('.text', symbol_info.section) + + def testOutlinedFunction(self): + # Test that an outlined function is reported normally. Also note that +@@ -135,10 +135,10 @@ + 'OUTLINED_FUNCTION_4') + symbol_info = symbol_extractor._FromObjdumpLine(line) + self.assertIsNotNone(symbol_info) +- self.assertEquals(0x20fab4c, symbol_info.offset) +- self.assertEquals(0x14, symbol_info.size) +- self.assertEquals('OUTLINED_FUNCTION_4', symbol_info.name) +- self.assertEquals('.text', symbol_info.section) ++ self.assertEqual(0x20fab4c, symbol_info.offset) ++ self.assertEqual(0x14, symbol_info.size) ++ self.assertEqual('OUTLINED_FUNCTION_4', symbol_info.name) ++ self.assertEqual('.text', symbol_info.section) + + def testNeitherLocalNorGlobalSymbol(self): + # This happens, see crbug.com/992884. +@@ -146,10 +146,10 @@ + line = '0287ae50 w F .text\t000001e8 log2l' + symbol_info = symbol_extractor._FromObjdumpLine(line) + self.assertIsNotNone(symbol_info) +- self.assertEquals(0x287ae50, symbol_info.offset) +- self.assertEquals(0x1e8, symbol_info.size) +- self.assertEquals('log2l', symbol_info.name) +- self.assertEquals('.text', symbol_info.section) ++ self.assertEqual(0x287ae50, symbol_info.offset) ++ self.assertEqual(0x1e8, symbol_info.size) ++ self.assertEqual('log2l', symbol_info.name) ++ self.assertEqual('.text', symbol_info.section) + + class TestSymbolInfosFromStream(unittest.TestCase): + +@@ -161,11 +161,11 @@ + 'more garbage', + '00155 g F .text\t00000012' + SPACES + 'second'] + symbol_infos = symbol_extractor._SymbolInfosFromStream(lines) +- self.assertEquals(len(symbol_infos), 2) ++ self.assertEqual(len(symbol_infos), 2) + first = symbol_extractor.SymbolInfo('first', 0x00c1c05c, 0x2c, '.text') +- self.assertEquals(first, symbol_infos[0]) ++ self.assertEqual(first, symbol_infos[0]) + second = symbol_extractor.SymbolInfo('second', 0x00155, 0x12, '.text') +- self.assertEquals(second, symbol_infos[1]) ++ self.assertEqual(second, symbol_infos[1]) + + + class TestSymbolInfoMappings(unittest.TestCase): +@@ -179,21 +179,21 @@ + def testGroupSymbolInfosByOffset(self): + offset_to_symbol_info = symbol_extractor.GroupSymbolInfosByOffset( + self.symbol_infos) +- self.assertEquals(len(offset_to_symbol_info), 2) ++ self.assertEqual(len(offset_to_symbol_info), 2) + self.assertIn(0x42, offset_to_symbol_info) +- self.assertEquals(offset_to_symbol_info[0x42][0], self.symbol_infos[0]) +- self.assertEquals(offset_to_symbol_info[0x42][1], self.symbol_infos[1]) ++ self.assertEqual(offset_to_symbol_info[0x42][0], self.symbol_infos[0]) ++ self.assertEqual(offset_to_symbol_info[0x42][1], self.symbol_infos[1]) + self.assertIn(0x64, offset_to_symbol_info) +- self.assertEquals(offset_to_symbol_info[0x64][0], self.symbol_infos[2]) ++ self.assertEqual(offset_to_symbol_info[0x64][0], self.symbol_infos[2]) + + def testCreateNameToSymbolInfo(self): + name_to_symbol_info = symbol_extractor.CreateNameToSymbolInfo( + self.symbol_infos) +- self.assertEquals(len(name_to_symbol_info), 3) ++ self.assertEqual(len(name_to_symbol_info), 3) + for i in range(3): + name = self.symbol_infos[i].name + self.assertIn(name, name_to_symbol_info) +- self.assertEquals(self.symbol_infos[i], name_to_symbol_info[name]) ++ self.assertEqual(self.symbol_infos[i], name_to_symbol_info[name]) + + def testSymbolCollisions(self): + symbol_infos_with_collision = list(self.symbol_infos) +@@ -203,11 +203,11 @@ + # The symbol added above should not affect the output. + name_to_symbol_info = symbol_extractor.CreateNameToSymbolInfo( + self.symbol_infos) +- self.assertEquals(len(name_to_symbol_info), 3) ++ self.assertEqual(len(name_to_symbol_info), 3) + for i in range(3): + name = self.symbol_infos[i].name + self.assertIn(name, name_to_symbol_info) +- self.assertEquals(self.symbol_infos[i], name_to_symbol_info[name]) ++ self.assertEqual(self.symbol_infos[i], name_to_symbol_info[name]) + + if __name__ == '__main__': + unittest.main() +--- a/src/3rdparty/chromium/tools/cygprofile/test_utils.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/cygprofile/test_utils.py 2025-01-16 02:26:08.626012286 +0800 +@@ -23,7 +23,7 @@ + + class TestProfileManager(process_profiles.ProfileManager): + def __init__(self, filecontents_mapping): +- super(TestProfileManager, self).__init__(filecontents_mapping.keys()) ++ super(TestProfileManager, self).__init__(list(filecontents_mapping.keys())) + self._filecontents_mapping = filecontents_mapping + + def _ReadOffsets(self, filename): +--- a/src/3rdparty/chromium/tools/determinism/compare_build_artifacts.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/determinism/compare_build_artifacts.py 2025-01-16 02:26:08.626012286 +0800 +@@ -5,7 +5,7 @@ + + """Compare the artifacts from two builds.""" + +-from __future__ import print_function ++ + + import ast + import difflib +@@ -113,11 +113,11 @@ + if not lhs_data or not rhs_data: + break + if lhs_data != rhs_data: +- for i in xrange(min(len(lhs_data), len(rhs_data))): ++ for i in range(min(len(lhs_data), len(rhs_data))): + if lhs_data[i] != rhs_data[i]: + num_diffs += 1 + if len(streams) < MAX_STREAMS: +- for idx in xrange(NUM_CHUNKS_IN_BLOCK): ++ for idx in range(NUM_CHUNKS_IN_BLOCK): + lhs_chunk = lhs_data[idx * CHUNK_SIZE:(idx + 1) * CHUNK_SIZE] + rhs_chunk = rhs_data[idx * CHUNK_SIZE:(idx + 1) * CHUNK_SIZE] + if lhs_chunk != rhs_chunk: +@@ -293,7 +293,7 @@ + + epoch_hex = struct.pack('POST OK.

"); + length = int(self.headers.getheader('content-length')) +- parameters = urlparse.parse_qs(self.rfile.read(length)) ++ parameters = urllib.parse.parse_qs(self.rfile.read(length)) + self.server.got_post = True + self.server.post_data = parameters['data'] + +@@ -240,7 +240,7 @@ + browser_name = os.path.splitext(os.path.basename(browser))[0] + spreadsheet_writer.WriteBrowserBenchmarkTitle(browser_name) + benchmark_results = BenchmarkResults() +- for run_number in xrange(options.run_count): ++ for run_number in range(options.run_count): + print('%s run %i' % (browser_name, run_number + 1)) + # Run browser. + test_page = 'http://localhost:%i/index.html?%s&automated&post_json' % ( +@@ -257,7 +257,7 @@ + browser_process.wait() + + # Insert test results into spreadsheet. +- for (test_name, test_data) in benchmark_results.data.iteritems(): ++ for (test_name, test_data) in benchmark_results.data.items(): + spreadsheet_writer.WriteBrowserBenchmarkResults(test_name, test_data) + + server.socket.close() +--- a/src/3rdparty/chromium/tools/dump_process_memory/analyze_dumps.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/dump_process_memory/analyze_dumps.py 2025-01-16 02:26:08.626012286 +0800 +@@ -8,7 +8,7 @@ + its content. + """ + +-from __future__ import print_function ++ + + import argparse + import array +@@ -212,9 +212,9 @@ + total_present_zero_pages = sum( + sum(x == (True, True) for x in zip(stats.is_zero, stats.is_present)) + for stats in dump_stats) +- total_freed_space = {x: 0 for x in FREED_PATTERNS.values()} ++ total_freed_space = {x: 0 for x in list(FREED_PATTERNS.values())} + for dump in dump_stats: +- for (freed_data_type, value) in dump.freed.items(): ++ for (freed_data_type, value) in list(dump.freed.items()): + total_freed_space[freed_data_type] += value + + content_to_count = collections.defaultdict(int) +@@ -241,8 +241,8 @@ + """ + dump_stats = [_GetStatsFromFileDump(filename) for filename in dumps] + total = _AggregateStats(dump_stats) +- duplicated_pages = sum(x - 1 for x in total.content_to_count.values()) +- count_and_hashes = sorted(((v, k) for k, v in total.content_to_count.items()), ++ duplicated_pages = sum(x - 1 for x in list(total.content_to_count.values())) ++ count_and_hashes = sorted(((v, k) for k, v in list(total.content_to_count.items())), + reverse=True) + max_common_pages = count_and_hashes[0][0] - 1 + total_size_non_zero_pages = (total.pages - total.zero_pages) * PAGE_SIZE +--- a/src/3rdparty/chromium/tools/dump_process_memory/collect_process_dump.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/dump_process_memory/collect_process_dump.py 2025-01-16 02:26:08.626012286 +0800 +@@ -43,7 +43,7 @@ + logging.info('Finding the first renderer PID') + renderer_name = '%s:sandboxed_process' % args.package + renderer_pids = device.GetPids(renderer_name) +- pid = int(renderer_pids.items()[0][1][0]) ++ pid = int(list(renderer_pids.items())[0][1][0]) + logging.info('PID = %d', pid) + + logging.info('Setting up directories') +--- a/src/3rdparty/chromium/tools/find_runtime_symbols/find_runtime_symbols.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/find_runtime_symbols/find_runtime_symbols.py 2025-01-16 02:26:08.626012286 +0800 +@@ -8,7 +8,7 @@ + are actually not. + """ + +-from __future__ import print_function ++ + + import json + import logging +@@ -122,7 +122,7 @@ + def _find_runtime_function_symbols(symbols_in_process, addresses): + result = OrderedDict() + for address in addresses: +- if isinstance(address, basestring): ++ if isinstance(address, str): + address = int(address, 16) + found = symbols_in_process.find_procedure(address) + if found: +@@ -135,7 +135,7 @@ + def _find_runtime_sourcefile_symbols(symbols_in_process, addresses): + result = OrderedDict() + for address in addresses: +- if isinstance(address, basestring): ++ if isinstance(address, str): + address = int(address, 16) + found = symbols_in_process.find_sourcefile(address) + if found: +@@ -148,7 +148,7 @@ + def _find_runtime_typeinfo_symbols(symbols_in_process, addresses): + result = OrderedDict() + for address in addresses: +- if isinstance(address, basestring): ++ if isinstance(address, str): + address = int(address, 16) + if address == 0: + result[address] = 'no typeinfo' +@@ -203,7 +203,7 @@ + symbols_dict = find_runtime_symbols(FUNCTION_SYMBOLS, + symbols_in_process, + sys.stdin) +- for address, symbol in symbols_dict.iteritems(): ++ for address, symbol in symbols_dict.items(): + if symbol: + print('%016x %s' % (address, symbol)) + else: +--- a/src/3rdparty/chromium/tools/find_runtime_symbols/prepare_symbol_info.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/find_runtime_symbols/prepare_symbol_info.py 2025-01-16 02:26:08.626012286 +0800 +@@ -152,7 +152,7 @@ + LOGGER.debug(' %016x-%016x +%06x %s' % ( + entry.begin, entry.end, entry.offset, entry.name)) + binary_path = entry.name +- for target_path, host_path in alternative_dirs.iteritems(): ++ for target_path, host_path in alternative_dirs.items(): + if entry.name.startswith(target_path): + binary_path = entry.name.replace(target_path, host_path, 1) + if not (ProcMaps.EXECUTABLE_PATTERN.match(binary_path) or +--- a/src/3rdparty/chromium/tools/find_runtime_symbols/reduce_debugline.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/find_runtime_symbols/reduce_debugline.py 2025-01-16 02:26:08.626012286 +0800 +@@ -13,7 +13,7 @@ + Note: the option '-wL' has the same meaning with '--debug-dump=decodedline'. + """ + +-from __future__ import print_function ++ + + import re + import sys +--- a/src/3rdparty/chromium/tools/flags/generate_expired_list.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/flags/generate_expired_list.py 2025-01-16 02:26:08.626012286 +0800 +@@ -16,7 +16,7 @@ + This program can be run with no arguments to run its own unit tests. + """ + +-from __future__ import print_function ++ + + import list_flags + import os +--- a/src/3rdparty/chromium/tools/flags/generate_unexpire_flags_unittests.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/flags/generate_unexpire_flags_unittests.py 2025-01-16 02:26:08.626012286 +0800 +@@ -20,17 +20,17 @@ + def testCcFile(self): + cc = generate_unexpire_flags.gen_features_impl('foobar', 123) + golden_cc = self.read_golden_file('cc') +- self.assertEquals(golden_cc, cc) ++ self.assertEqual(golden_cc, cc) + + def testHFile(self): + h = generate_unexpire_flags.gen_features_header('foobar', 123) + golden_h = self.read_golden_file('h') +- self.assertEquals(golden_h, h) ++ self.assertEqual(golden_h, h) + + def testIncFile(self): + inc = generate_unexpire_flags.gen_flags_fragment('foobar', 123) + golden_inc = self.read_golden_file('inc') +- self.assertEquals(golden_inc, inc) ++ self.assertEqual(golden_inc, inc) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/tools/flags/list_flags.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/flags/list_flags.py 2025-01-16 02:26:08.626012286 +0800 +@@ -6,7 +6,7 @@ + """Emits a formatted, optionally filtered view of the list of flags. + """ + +-from __future__ import print_function ++ + + import argparse + import os +--- a/src/3rdparty/chromium/tools/flakiness/find_flakiness.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/flakiness/find_flakiness.py 2025-01-16 02:26:08.626012286 +0800 +@@ -7,7 +7,7 @@ + case in parallel repeatedly to identify flaky tests. + """ + +-from __future__ import print_function ++ + + import os + import re +@@ -83,7 +83,7 @@ + data_file.write('%i runs\n' % num_runs) + print('%i passes' % num_passes) + data_file.write('%i passes\n' % num_passes) +- for (test, count) in failed_tests.iteritems(): ++ for (test, count) in failed_tests.items(): + print('%s -> %i' % (test, count)) + data_file.write('%s -> %i\n' % (test, count)) + data_file.close() +--- a/src/3rdparty/chromium/tools/flakiness/is_flaky.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/flakiness/is_flaky.py 2025-01-16 02:26:08.626012286 +0800 +@@ -6,7 +6,7 @@ + """Runs a test repeatedly to measure its flakiness. The return code is non-zero + if the failure rate is higher than the specified threshold, but is not 100%.""" + +-from __future__ import print_function ++ + + import argparse + import multiprocessing.dummy +--- a/src/3rdparty/chromium/tools/fuchsia/local-sdk.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/fuchsia/local-sdk.py 2025-01-16 02:26:08.626012286 +0800 +@@ -4,7 +4,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import hashlib + import json +--- a/src/3rdparty/chromium/tools/fuchsia/comparative_tester/comparative_tester.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/fuchsia/comparative_tester/comparative_tester.py 2025-01-16 02:26:08.626012286 +0800 +@@ -7,7 +7,7 @@ + # Fuchsia devices and then compares their output to each other, extracting the + # relevant performance data from the output of gtest. + +-from __future__ import print_function ++ + + import argparse + import logging +--- a/src/3rdparty/chromium/tools/fuchsia/comparative_tester/generate_perf_report.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/fuchsia/comparative_tester/generate_perf_report.py 2025-01-16 02:26:08.626012286 +0800 +@@ -132,7 +132,7 @@ + else: + lines[line.desc].append(line) + test_lines = [] +- for _, line_list in lines.items(): ++ for _, line_list in list(lines.items()): + stat_line = LineFromList(line_list) + if stat_line: + test_lines.append(stat_line) +@@ -195,10 +195,10 @@ + # so that in the event tests flake out, their average times can + # still be accurately calculated + for test in result.tests: +- if not test.name in tests.keys(): ++ if not test.name in list(tests.keys()): + tests[test.name] = [test] + tests[test.name].append(test) +- test_stats = [TestFromList(test_list) for _, test_list in tests.items()] ++ test_stats = [TestFromList(test_list) for _, test_list in list(tests.items())] + return TargetStats(name, sample_num, test_stats) + + +@@ -238,13 +238,13 @@ + resultMap = {} # type: Dict[str, List[TargetResult]] + for file in os.listdir(directory): + results = ReadTargetFromJson("{}/{}".format(directory, file)) +- if not results.name in resultMap.keys(): ++ if not results.name in list(resultMap.keys()): + resultMap[results.name] = [results] + else: + resultMap[results.name].append(results) + + targets = [] +- for _, resultList in resultMap.items(): ++ for _, resultList in list(resultMap.items()): + targets.append(TargetFromList(resultList)) + return targets + +@@ -355,12 +355,12 @@ + for item in left: + key = pred(item) + # the first list shouldn't cause any key collisions +- assert key not in paired_items.keys() ++ assert key not in list(paired_items.keys()) + paired_items[key] = item, None + + for item in right: + key = pred(item) +- if key in paired_items.keys(): ++ if key in list(paired_items.keys()): + # elem 1 of the tuple is always None if the key exists in the map + prev, _ = paired_items[key] + paired_items[key] = prev, item +@@ -380,7 +380,7 @@ + returning a new dictionary with the new values. + """ + out_dict = {} +- for key, val in dct.items(): ++ for key, val in list(dct.items()): + out_dict[key] = predicate(*val) + return out_dict + +@@ -390,7 +390,7 @@ + fuchsia_avgs = DirectoryStats(target_spec.raw_fuchsia_dir) + paired_targets = ZipListsByPredicate(linux_avgs, fuchsia_avgs, + lambda target: target.name) +- for name, targets in paired_targets.items(): ++ for name, targets in list(paired_targets.items()): + comparison_dict = CompareTargets(*targets) + if comparison_dict: + with open("{}/{}.json".format(target_spec.results_dir, name), +--- a/src/3rdparty/chromium/tools/generate_stubs/generate_stubs_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/generate_stubs/generate_stubs_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -21,7 +21,7 @@ + + import generate_stubs as gs + import re +-import StringIO ++import io + import sys + import unittest + +@@ -73,18 +73,18 @@ + + def testParseSignatures_EmptyFile(self): + # Empty file just generates empty signatures. +- infile = StringIO.StringIO() ++ infile = io.StringIO() + signatures = gs.ParseSignatures(infile) + self.assertEqual(0, len(signatures)) + + def testParseSignatures_SimpleSignatures(self): + file_contents = '\n'.join([x[0] for x in SIMPLE_SIGNATURES]) +- infile = StringIO.StringIO(file_contents) ++ infile = io.StringIO(file_contents) + signatures = gs.ParseSignatures(infile) + self.assertEqual(len(SIMPLE_SIGNATURES), len(signatures)) + + # We assume signatures are in order. +- for i in xrange(len(SIMPLE_SIGNATURES)): ++ for i in range(len(SIMPLE_SIGNATURES)): + self.assertEqual(SIMPLE_SIGNATURES[i][1], signatures[i], + msg='Expected %s\nActual %s\nFor %s' % + (SIMPLE_SIGNATURES[i][1], +@@ -93,12 +93,12 @@ + + def testParseSignatures_TrickySignatures(self): + file_contents = '\n'.join([x[0] for x in TRICKY_SIGNATURES]) +- infile = StringIO.StringIO(file_contents) ++ infile = io.StringIO(file_contents) + signatures = gs.ParseSignatures(infile) + self.assertEqual(len(TRICKY_SIGNATURES), len(signatures)) + + # We assume signatures are in order. +- for i in xrange(len(TRICKY_SIGNATURES)): ++ for i in range(len(TRICKY_SIGNATURES)): + self.assertEqual(TRICKY_SIGNATURES[i][1], signatures[i], + msg='Expected %s\nActual %s\nFor %s' % + (TRICKY_SIGNATURES[i][1], +@@ -107,7 +107,7 @@ + + def testParseSignatures_InvalidSignatures(self): + for i in INVALID_SIGNATURES: +- infile = StringIO.StringIO(i) ++ infile = io.StringIO(i) + self.assertRaises(gs.BadSignatureError, gs.ParseSignatures, infile) + + def testParseSignatures_CommentsIgnored(self): +@@ -124,7 +124,7 @@ + my_sigs.append(SIMPLE_SIGNATURES[0][0]) + + file_contents = '\n'.join(my_sigs) +- infile = StringIO.StringIO(file_contents) ++ infile = io.StringIO(file_contents) + signatures = gs.ParseSignatures(infile) + self.assertEqual(5, len(signatures)) + +@@ -133,7 +133,7 @@ + def testWriteWindowsDefFile(self): + module_name = 'my_module-1' + signatures = [sig[1] for sig in SIMPLE_SIGNATURES] +- outfile = StringIO.StringIO() ++ outfile = io.StringIO() + gs.WriteWindowsDefFile(module_name, signatures, outfile) + contents = outfile.getvalue() + +@@ -149,7 +149,7 @@ + msg='Expected match of "%s" in %s' % (pattern, contents)) + + def testQuietRun(self): +- output = StringIO.StringIO() ++ output = io.StringIO() + gs.QuietRun([ + sys.executable, '-c', + 'from __future__ import print_function; print("line 1 and suffix\\nline 2")' +@@ -157,7 +157,7 @@ + write_to=output) + self.assertEqual('line 1 and suffix\nline 2\n', output.getvalue()) + +- output = StringIO.StringIO() ++ output = io.StringIO() + gs.QuietRun([ + sys.executable, '-c', + 'from __future__ import print_function; print("line 1 and suffix\\nline 2")' +@@ -227,7 +227,7 @@ + }""", gs.PosixStubWriter.StubFunction(SIMPLE_SIGNATURES[6][1])) + + def testWriteImplemenationContents(self): +- outfile = StringIO.StringIO() ++ outfile = io.StringIO() + self.writer.WriteImplementationContents('my_namespace', outfile) + contents = outfile.getvalue() + +@@ -263,7 +263,7 @@ + module_names = ['oneModule', 'twoModule'] + + # Make the header. +- outfile = StringIO.StringIO() ++ outfile = io.StringIO() + self.writer.WriteHeaderContents(module_names, 'my_namespace', 'GUARD_', + outfile, 'base/logging.h') + contents = outfile.getvalue() +@@ -298,7 +298,7 @@ + module_names = ['oneModule', 'twoModule'] + + # Make the header. +- outfile = StringIO.StringIO() ++ outfile = io.StringIO() + self.writer.WriteUmbrellaInitializer(module_names, 'my_namespace', outfile, + 'VLOG(1)') + contents = outfile.getvalue() +--- a/src/3rdparty/chromium/tools/git/for-all-touched-files.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/git/for-all-touched-files.py 2025-01-16 02:26:08.626012286 +0800 +@@ -22,7 +22,7 @@ + %prog -t "~~BINGO~~" "echo I modified ~~BINGO~~" + """ + +-from __future__ import print_function ++ + + import optparse + import os +--- a/src/3rdparty/chromium/tools/git/git-diff-ide.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/git/git-diff-ide.py 2025-01-16 02:26:08.626012286 +0800 +@@ -18,7 +18,7 @@ + %prog HEAD + """ + +-from __future__ import print_function ++ + + import subprocess + import sys +--- a/src/3rdparty/chromium/tools/git/mass-rename.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/git/mass-rename.py 2025-01-16 02:26:08.626012286 +0800 +@@ -13,7 +13,7 @@ + 3) look at git diff (without --cached) to see what the damage is + """ + +-from __future__ import print_function ++ + + import os + import subprocess +--- a/src/3rdparty/chromium/tools/git/mffr.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/git/mffr.py 2025-01-16 02:26:08.626012286 +0800 +@@ -17,7 +17,7 @@ + back-references. + """ + +-from __future__ import print_function ++ + + import optparse + import os +--- a/src/3rdparty/chromium/tools/git/move_source_file.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/git/move_source_file.py 2025-01-16 02:26:08.626012286 +0800 +@@ -16,7 +16,7 @@ + find files that reference the moved file. + """ + +-from __future__ import print_function ++ + + import optparse + import os +--- a/src/3rdparty/chromium/tools/git/suggest_owners.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/git/suggest_owners.py 2025-01-16 02:26:08.626012286 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import argparse + import subprocess +@@ -40,7 +40,7 @@ + def getEditsForDirectory(commit, directory): + additions = deletions = 0 + for commit_directory, (directory_additions, directory_deletions) \ +- in commit.dirs.items(): ++ in list(commit.dirs.items()): + # check if commit_directory is same as or a descendant of directory + if isSubDirectory(directory, commit_directory): + additions += directory_additions +@@ -53,7 +53,7 @@ + def _PropagateCommit(options, commit): + touched_dirs = set() + # first get all the touched dirs and their ancestors +- for directory in commit.dirs.iterkeys(): ++ for directory in commit.dirs.keys(): + while directory != '': + touched_dirs.add(directory) + # get the parent directory +@@ -152,11 +152,11 @@ + + def _CountCommits(directory): + return sum( +- [count for (count, _a, _d) in DIRECTORY_AUTHORS[directory].itervalues()]) ++ [count for (count, _a, _d) in DIRECTORY_AUTHORS[directory].values()]) + + + def _GetOwnerLevel(options, author, directory): +- sorted_owners = sorted(_GetOwners(options, directory), key=lambda (o,l): l) ++ sorted_owners = sorted(_GetOwners(options, directory), key=lambda o_l: o_l[1]) + for owner, level in sorted_owners: + if author == owner: + return level +@@ -223,7 +223,7 @@ + def computeSuggestions(options): + directory_suggestions = [] + for directory, authors in sorted( +- DIRECTORY_AUTHORS.iteritems(), key=lambda (d, a): d): ++ iter(DIRECTORY_AUTHORS.items()), key=lambda d_a: d_a[0]): + if _IsTrivialDirectory(options, directory): + continue + if _CountCommits(directory) < options.dir_commit_limit: +@@ -233,8 +233,8 @@ + and not isSubDirectory(options.subdirectory, directory)): + continue + # sort authors by descending number of commits +- sorted_authors = sorted(authors.items(), +- key=lambda (author, details): -details[0]) ++ sorted_authors = sorted(list(authors.items()), ++ key=lambda author_details: -author_details[1][0]) + # keep only authors above the limit + suggestions = [(a,c) for a,c in sorted_authors if \ + a not in options.ignore_authors \ +--- a/src/3rdparty/chromium/tools/gn/roll_gn.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/gn/roll_gn.py 2025-01-16 02:26:08.626012286 +0800 +@@ -20,14 +20,14 @@ + need to do that afterwards. + """ + +-from __future__ import print_function ++ + + import argparse + import json + import os + import subprocess + import sys +-import urllib2 ++import urllib.request, urllib.error, urllib.parse + + + THIS_DIR = os.path.dirname(__file__) +@@ -70,7 +70,7 @@ + try: + url = ('https://gn.googlesource.com/gn.git/+log/%s..%s?format=JSON' % + (current_revision, args.revision)) +- resp = urllib2.urlopen(url) ++ resp = urllib.request.urlopen(url) + except Exception as e: + print('Failed to fetch log via %s: %s' % (url, str(e)), file=sys.stderr) + return 1 +--- a/src/3rdparty/chromium/tools/grit/grit.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit.py 2025-01-16 02:26:08.626012286 +0800 +@@ -6,7 +6,7 @@ + '''Bootstrapping for GRIT. + ''' + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/tools/grit/grit_info.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit_info.py 2025-01-16 02:26:08.626012286 +0800 +@@ -6,7 +6,7 @@ + '''Tool to determine inputs and outputs of a grit file. + ''' + +-from __future__ import print_function ++ + + import optparse + import os +--- a/src/3rdparty/chromium/tools/grit/minify_with_uglify.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/minify_with_uglify.py 2025-01-16 02:26:08.626012286 +0800 +@@ -3,7 +3,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/tools/grit/minimize_css_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/minimize_css_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -17,7 +17,7 @@ + } + """ + minimized = minimize_css.CSSMinimizer.minimize_css(source) +- self.assertEquals(minimized, "div{color: blue}") ++ self.assertEqual(minimized, "div{color: blue}") + + def test_attribute_selectors(self): + source = """ +@@ -26,7 +26,7 @@ + } + """ + minimized = minimize_css.CSSMinimizer.minimize_css(source) +- self.assertEquals( ++ self.assertEqual( + minimized, + # pylint: disable=line-too-long + """input[type="search" i]::-webkit-textfield-decoration-container{direction: ltr}""") +@@ -41,18 +41,18 @@ + /* footer */ + """ + minimized = minimize_css.CSSMinimizer.minimize_css(source) +- self.assertEquals(minimized, "html{ display: block}") ++ self.assertEqual(minimized, "html{ display: block}") + + def test_no_strip_inside_quotes(self): + source = """div[foo=' bar ']""" + minimized = minimize_css.CSSMinimizer.minimize_css(source) +- self.assertEquals(minimized, source) ++ self.assertEqual(minimized, source) + + source = """div[foo=" bar "]""" + minimized = minimize_css.CSSMinimizer.minimize_css(source) +- self.assertEquals(minimized, source) ++ self.assertEqual(minimized, source) + + def test_escape_string(self): + source = """content: " ";""" + minimized = minimize_css.CSSMinimizer.minimize_css(source) +- self.assertEquals(minimized, source) ++ self.assertEqual(minimized, source) +--- a/src/3rdparty/chromium/tools/grit/pak_util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/pak_util.py 2025-01-16 02:26:08.626012286 +0800 +@@ -9,7 +9,7 @@ + https://dev.chromium.org/developers/design-documents/linuxresourcesandlocalizedstrings + """ + +-from __future__ import print_function ++ + + import argparse + import gzip +@@ -55,7 +55,7 @@ + pak = data_pack.ReadDataPack(args.pak_file) + if args.textual_id: + info_dict = data_pack.ReadGrdInfo(args.pak_file) +- for resource_id, payload in pak.resources.items(): ++ for resource_id, payload in list(pak.resources.items()): + filename = ( + info_dict[resource_id].textual_id + if args.textual_id else str(resource_id)) +@@ -109,7 +109,7 @@ + try: + desc = six.text_type(data, encoding) + if len(desc) > 60: +- desc = desc[:60] + u'...' ++ desc = desc[:60] + '...' + desc = desc.replace('\n', '\\n') + except UnicodeDecodeError: + pass +@@ -118,12 +118,12 @@ + textual_id = info_dict[resource_id].textual_id + canonical_textual_id = info_dict[canonical_id].textual_id + output.write( +- u'Entry(id={}, canonical_id={}, size={}, sha1={}): {}\n'.format( ++ 'Entry(id={}, canonical_id={}, size={}, sha1={}): {}\n'.format( + textual_id, canonical_textual_id, len(data), sha1, + desc).encode('utf-8')) + else: + output.write( +- u'Entry(id={}, canonical_id={}, size={}, sha1={}): {}\n'.format( ++ 'Entry(id={}, canonical_id={}, size={}, sha1={}): {}\n'.format( + resource_id, canonical_id, len(data), sha1, desc).encode('utf-8')) + + +--- a/src/3rdparty/chromium/tools/grit/setup.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/setup.py 2025-01-16 02:26:08.626012286 +0800 +@@ -5,7 +5,7 @@ + + """Install the package!""" + +-from __future__ import absolute_import ++ + + import setuptools + +--- a/src/3rdparty/chromium/tools/grit/stamp_grit_sources.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/stamp_grit_sources.py 2025-01-16 02:26:08.626012286 +0800 +@@ -12,7 +12,7 @@ + # Usage: + # stamp_grit_sources.py <.d-file> + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/tools/grit/grit/__init__.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/__init__.py 2025-01-16 02:26:08.626012286 +0800 +@@ -5,7 +5,7 @@ + '''Package 'grit' + ''' + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/tools/grit/grit/clique.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/clique.py 2025-01-16 02:26:08.626012286 +0800 +@@ -6,7 +6,7 @@ + collections of cliques (uber-cliques). + ''' + +-from __future__ import print_function ++ + + import re + +@@ -80,12 +80,12 @@ + if len(self.fallback_translations_): + lines.append( + "WARNING: Fell back to English for the following translations:") +- for (id, langs) in self.fallback_translations_.items(): ++ for (id, langs) in list(self.fallback_translations_.items()): + lines.append( + ReportTranslation(self.cliques_[id][0], list(langs.keys()))) + if len(self.missing_translations_): + lines.append("ERROR: The following translations are MISSING:") +- for (id, langs) in self.missing_translations_.items(): ++ for (id, langs) in list(self.missing_translations_.items()): + lines.append( + ReportTranslation(self.cliques_[id][0], list(langs.keys()))) + return '\n'.join(lines) +@@ -194,7 +194,7 @@ + '''Iterates over all cliques. Note that this can return multiple cliques + with the same ID. + ''' +- for cliques in self.cliques_.values(): ++ for cliques in list(self.cliques_.values()): + for c in cliques: + yield c + +--- a/src/3rdparty/chromium/tools/grit/grit/clique_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/clique_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.clique''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -32,8 +32,8 @@ + tclib.Placeholder('USERNAME', '%s', 'Joi')]) + c = factory.MakeClique(msg) + +- self.failUnless(c.GetMessage() == msg) +- self.failUnless(c.GetId() == msg.GetId()) ++ self.assertTrue(c.GetMessage() == msg) ++ self.assertTrue(c.GetId() == msg.GetId()) + + msg_fr = tclib.Translation(text='Bonjour USERNAME, comment ca va?', + id=msg.GetId(), placeholders=[ +@@ -47,9 +47,9 @@ + + # sort() sorts lists in-place and does not return them + for lang in ('en', 'fr', 'de'): +- self.failUnless(lang in c.clique) ++ self.assertTrue(lang in c.clique) + +- self.failUnless(c.MessageForLanguage('fr').GetRealContent() == ++ self.assertTrue(c.MessageForLanguage('fr').GetRealContent() == + msg_fr.GetRealContent()) + + try: +@@ -58,11 +58,11 @@ + except: + pass + +- self.failUnless(c.MessageForLanguage('zh-CN', True) != None) ++ self.assertTrue(c.MessageForLanguage('zh-CN', True) != None) + + rex = re.compile('fr|de|bingo') +- self.failUnless(len(c.AllMessagesThatMatch(rex, False)) == 2) +- self.failUnless( ++ self.assertTrue(len(c.AllMessagesThatMatch(rex, False)) == 2) ++ self.assertTrue( + c.AllMessagesThatMatch(rex, True)[pseudo.PSEUDO_LANG] is not None) + + def testBestClique(self): +@@ -84,16 +84,16 @@ + text = msg.GetRealContent() + description = msg.GetDescription() + if text == 'Alfur': +- self.failUnless(description == 'alfaholl') ++ self.assertTrue(description == 'alfaholl') + elif text == 'Gryla': +- self.failUnless(description == 'vondakerling') ++ self.assertTrue(description == 'vondakerling') + elif text == 'Leppaludi': +- self.failUnless(description == 'ID: IDS_LL') +- self.failUnless(count_best_cliques == 5) ++ self.assertTrue(description == 'ID: IDS_LL') ++ self.assertTrue(count_best_cliques == 5) + + def testAllInUberClique(self): + resources = grd_reader.Parse( +- StringIO(u''' ++ StringIO(''' + + + +@@ -110,17 +110,17 @@ + resources.SetOutputLanguage('en') + resources.RunGatherers() + content_list = [] +- for clique_list in resources.UberClique().cliques_.values(): ++ for clique_list in list(resources.UberClique().cliques_.values()): + for clique in clique_list: + content_list.append(clique.GetMessage().GetRealContent()) +- self.failUnless('Hello %s, how are you doing today?' in content_list) +- self.failUnless('Jack "Black" Daniels' in content_list) +- self.failUnless('Hello!' in content_list) ++ self.assertTrue('Hello %s, how are you doing today?' in content_list) ++ self.assertTrue('Jack "Black" Daniels' in content_list) ++ self.assertTrue('Hello!' in content_list) + + def testCorrectExceptionIfWrongEncodingOnResourceFile(self): + '''This doesn't really belong in this unittest file, but what the heck.''' + resources = grd_reader.Parse( +- StringIO(u''' ++ StringIO(''' + + + +@@ -137,7 +137,7 @@ + tclib.Message(text='Hello USERNAME', + placeholders=[tclib.Placeholder('USERNAME', '%s', 'Joi')]), + ] +- self.failUnless(messages[0].GetId() == messages[1].GetId()) ++ self.assertTrue(messages[0].GetId() == messages[1].GetId()) + + # Both of the above would share a translation. + translation = tclib.Translation(id=messages[0].GetId(), +@@ -151,9 +151,9 @@ + for clq in cliques: + clq.AddTranslation(translation, 'fr') + +- self.failUnless(cliques[0].MessageForLanguage('fr').GetRealContent() == ++ self.assertTrue(cliques[0].MessageForLanguage('fr').GetRealContent() == + 'Bonjour $1') +- self.failUnless(cliques[1].MessageForLanguage('fr').GetRealContent() == ++ self.assertTrue(cliques[1].MessageForLanguage('fr').GetRealContent() == + 'Bonjour %s') + + def testMissingTranslations(self): +@@ -163,17 +163,17 @@ + + cliques[1].MessageForLanguage('fr', False, True) + +- self.failUnless(not factory.HasMissingTranslations()) ++ self.assertTrue(not factory.HasMissingTranslations()) + + cliques[0].MessageForLanguage('de', False, False) + +- self.failUnless(factory.HasMissingTranslations()) ++ self.assertTrue(factory.HasMissingTranslations()) + + report = factory.MissingTranslationsReport() +- self.failUnless(report.count('WARNING') == 1) +- self.failUnless(report.count('8053599568341804890 "Goodbye" fr') == 1) +- self.failUnless(report.count('ERROR') == 1) +- self.failUnless(report.count('800120468867715734 "Hello" de') == 1) ++ self.assertTrue(report.count('WARNING') == 1) ++ self.assertTrue(report.count('8053599568341804890 "Goodbye" fr') == 1) ++ self.assertTrue(report.count('ERROR') == 1) ++ self.assertTrue(report.count('800120468867715734 "Hello" de') == 1) + + def testCustomTypes(self): + factory = clique.UberClique() +@@ -191,22 +191,22 @@ + 'grit.clique_unittest.DummyCustomType', clique.CustomType)) + translation = tclib.Translation(id=message.GetId(), text='Bilingo bolongo') + c.AddTranslation(translation, 'fr') +- self.failUnless(c.MessageForLanguage('fr').GetRealContent().startswith('jjj')) ++ self.assertTrue(c.MessageForLanguage('fr').GetRealContent().startswith('jjj')) + + def testWhitespaceMessagesAreNontranslateable(self): + factory = clique.UberClique() + + message = tclib.Message(text=' \t') + c = factory.MakeClique(message, translateable=True) +- self.failIf(c.IsTranslateable()) ++ self.assertFalse(c.IsTranslateable()) + + message = tclib.Message(text='\n \n ') + c = factory.MakeClique(message, translateable=True) +- self.failIf(c.IsTranslateable()) ++ self.assertFalse(c.IsTranslateable()) + + message = tclib.Message(text='\n hello') + c = factory.MakeClique(message, translateable=True) +- self.failUnless(c.IsTranslateable()) ++ self.assertTrue(c.IsTranslateable()) + + def testEachCliqueKeptSorted(self): + factory = clique.UberClique() +@@ -218,10 +218,10 @@ + clique_a = factory.MakeClique(msg_a, translateable=True) + clique_c = factory.MakeClique(msg_c, translateable=True) + clique_list = factory.cliques_[clique_a.GetId()] +- self.failUnless(len(clique_list) == 3) +- self.failUnless(clique_list[0] == clique_a) +- self.failUnless(clique_list[1] == clique_b) +- self.failUnless(clique_list[2] == clique_c) ++ self.assertTrue(len(clique_list) == 3) ++ self.assertTrue(clique_list[0] == clique_a) ++ self.assertTrue(clique_list[1] == clique_b) ++ self.assertTrue(clique_list[2] == clique_c) + + def testBestCliqueSortIsStable(self): + factory = clique.UberClique() +@@ -236,19 +236,19 @@ + # Insert in an order that tests all outcomes. + clique_no_description = factory.MakeClique(msg_no_description, + translateable=True) +- self.failUnless(factory.BestClique(clique_id) == clique_no_description) ++ self.assertTrue(factory.BestClique(clique_id) == clique_no_description) + clique_id_description_b = factory.MakeClique(msg_id_description_b, + translateable=True) +- self.failUnless(factory.BestClique(clique_id) == clique_id_description_b) ++ self.assertTrue(factory.BestClique(clique_id) == clique_id_description_b) + clique_id_description_a = factory.MakeClique(msg_id_description_a, + translateable=True) +- self.failUnless(factory.BestClique(clique_id) == clique_id_description_a) ++ self.assertTrue(factory.BestClique(clique_id) == clique_id_description_a) + clique_description_y = factory.MakeClique(msg_description_y, + translateable=True) +- self.failUnless(factory.BestClique(clique_id) == clique_description_y) ++ self.assertTrue(factory.BestClique(clique_id) == clique_description_y) + clique_description_x = factory.MakeClique(msg_description_x, + translateable=True) +- self.failUnless(factory.BestClique(clique_id) == clique_description_x) ++ self.assertTrue(factory.BestClique(clique_id) == clique_description_x) + + + class DummyCustomType(clique.CustomType): +--- a/src/3rdparty/chromium/tools/grit/grit/constants.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/constants.py 2025-01-16 02:26:08.626012286 +0800 +@@ -5,12 +5,12 @@ + '''Constant definitions for GRIT. + ''' + +-from __future__ import print_function ++ + + # This is the Icelandic noun meaning "grit" and is used to check that our + # input files are in the correct encoding. The middle character gets encoded + # as two bytes in UTF-8, so this is sufficient to detect incorrect encoding. +-ENCODING_CHECK = u'm\u00f6l' ++ENCODING_CHECK = 'm\u00f6l' + + # A special language, translations into which are always "TTTTTT". + CONSTANT_LANGUAGE = 'x_constant' +--- a/src/3rdparty/chromium/tools/grit/grit/exception.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/exception.py 2025-01-16 02:26:08.626012286 +0800 +@@ -5,7 +5,7 @@ + '''Exception types for GRIT. + ''' + +-from __future__ import print_function ++ + + class Base(Exception): + '''A base exception that uses the class's docstring in addition to any +--- a/src/3rdparty/chromium/tools/grit/grit/grd_reader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/grd_reader.py 2025-01-16 02:26:08.626012286 +0800 +@@ -6,7 +6,7 @@ + '''Class for reading GRD files into memory, without processing them. + ''' + +-from __future__ import print_function ++ + + import os.path + import sys +@@ -54,7 +54,7 @@ + return + + if self.debug: +- attr_list = ' '.join('%s="%s"' % kv for kv in attrs.items()) ++ attr_list = ' '.join('%s="%s"' % kv for kv in list(attrs.items())) + print("Starting parsing of element %s with attributes %r" % + (name, attr_list or '(none)')) + +@@ -76,7 +76,7 @@ + node.SetDefines(self.defines) + self.stack.append(node) + +- for attr, attrval in attrs.items(): ++ for attr, attrval in list(attrs.items()): + node.HandleAttribute(attr, attrval) + + def endElement(self, name): +--- a/src/3rdparty/chromium/tools/grit/grit/grd_reader_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/grd_reader_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grd_reader package''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -26,7 +26,7 @@ + + class GrdReaderUnittest(unittest.TestCase): + def testParsingAndXmlOutput(self): +- input = u''' ++ input = ''' + + + +@@ -57,13 +57,13 @@ + pseudo_file = StringIO(input) + tree = grd_reader.Parse(pseudo_file, '.') + output = six.text_type(tree) +- expected_output = input.replace(u' base_dir="."', u'') ++ expected_output = input.replace(' base_dir="."', '') + self.assertEqual(expected_output, output) +- self.failUnless(tree.GetNodeById('IDS_GREETING')) ++ self.assertTrue(tree.GetNodeById('IDS_GREETING')) + + + def testStopAfter(self): +- input = u''' ++ input = ''' + + + +@@ -78,11 +78,11 @@ + pseudo_file = StringIO(input) + tree = grd_reader.Parse(pseudo_file, '.', stop_after='outputs') + # only an child +- self.failUnless(len(tree.children) == 1) +- self.failUnless(tree.children[0].name == 'outputs') ++ self.assertTrue(len(tree.children) == 1) ++ self.assertTrue(tree.children[0].name == 'outputs') + + def testLongLinesWithComments(self): +- input = u''' ++ input = ''' + + + +@@ -98,12 +98,12 @@ + tree = grd_reader.Parse(pseudo_file, '.') + + greeting = tree.GetNodeById('IDS_GREETING') +- self.failUnless(greeting.GetCliques()[0].GetMessage().GetRealContent() == ++ self.assertTrue(greeting.GetCliques()[0].GetMessage().GetRealContent() == + 'This is a very long line with no linebreaks yes yes it ' + 'stretches on and on and on!') + + def doTestAssignFirstIds(self, first_ids_path): +- input = u''' ++ input = ''' + + +@@ -122,8 +122,8 @@ + root = grd_reader.Parse(pseudo_file, os.path.split(fake_input_path)[0]) + root.AssignFirstIds(fake_input_path, {}) + messages_node = root.children[0].children[0] +- self.failUnless(isinstance(messages_node, empty.MessagesNode)) +- self.failUnless(messages_node.attrs["first_id"] != ++ self.assertTrue(isinstance(messages_node, empty.MessagesNode)) ++ self.assertTrue(messages_node.attrs["first_id"] != + empty.MessagesNode().DefaultAttributes()["first_id"]) + + def testAssignFirstIds(self): +@@ -135,7 +135,7 @@ + def testAssignFirstIdsMultipleMessages(self): + """If there are multiple messages sections, the resource_ids file + needs to list multiple first_id values.""" +- input = u''' ++ input = ''' + + +@@ -166,7 +166,7 @@ + self.assertEqual('10000', messages_node.attrs["first_id"]) + + def testUseNameForIdAndPpIfdef(self): +- input = u''' ++ input = ''' + + + +@@ -184,10 +184,10 @@ + # Check if the ID is set to the name. In the past, there was a bug + # that caused the ID to be a generated number. + hello = root.GetNodeById('IDS_HELLO') +- self.failUnless(hello.GetCliques()[0].GetId() == 'IDS_HELLO') ++ self.assertTrue(hello.GetCliques()[0].GetId() == 'IDS_HELLO') + + def testUseNameForIdWithIfElse(self): +- input = u''' ++ input = ''' + + + +@@ -212,16 +212,16 @@ + # Check if the ID is set to the name. In the past, there was a bug + # that caused the ID to be a generated number. + hello = root.GetNodeById('IDS_HELLO') +- self.failUnless(hello.GetCliques()[0].GetId() == 'IDS_HELLO') ++ self.assertTrue(hello.GetCliques()[0].GetId() == 'IDS_HELLO') + + def testPartInclusionAndCorrectSource(self): +- arbitrary_path_grd = u'''\ ++ arbitrary_path_grd = '''\ + + test5 + ''' + tmp_dir = util.TempDir({'arbitrary_path.grp': arbitrary_path_grd}) + arbitrary_path_grd_file = tmp_dir.GetPath('arbitrary_path.grp') +- top_grd = u'''\ ++ top_grd = '''\ + + + +@@ -233,17 +233,17 @@ + + + ''' % arbitrary_path_grd_file +- sub_grd = u'''\ ++ sub_grd = '''\ + + test2 + + test3 + ''' +- subsub_grd = u'''\ ++ subsub_grd = '''\ + + test4 + ''' +- expected_output = u'''\ ++ expected_output = '''\ + + + +@@ -292,7 +292,7 @@ + tmp_dir.CleanUp() + + def testPartInclusionFailure(self): +- template = u''' ++ template = ''' + + + %s +@@ -300,28 +300,28 @@ + ''' + + part_failures = [ +- (exception.UnexpectedContent, u'fnord'), ++ (exception.UnexpectedContent, 'fnord'), + (exception.UnexpectedChild, +- u''), +- (exception.FileNotFound, u''), ++ ''), ++ (exception.FileNotFound, ''), + ] + for raises, data in part_failures: + data = StringIO(template % data) + self.assertRaises(raises, grd_reader.Parse, data, '.') + + gritpart_failures = [ +- (exception.UnexpectedAttribute, u''), +- (exception.MissingElement, u''), ++ (exception.UnexpectedAttribute, ''), ++ (exception.MissingElement, ''), + ] + for raises, data in gritpart_failures: +- top_grd = StringIO(template % u'') ++ top_grd = StringIO(template % '') + with util.TempDir({'bad.grp': data}) as temp_dir: + self.assertRaises(raises, grd_reader.Parse, top_grd, temp_dir.GetPath()) + + def testEarlyEnoughPlatformSpecification(self): + # This is a regression test for issue + # https://code.google.com/p/grit-i18n/issues/detail?id=23 +- grd_text = u''' ++ grd_text = ''' + + + +--- a/src/3rdparty/chromium/tools/grit/grit/grit_runner.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/grit_runner.py 2025-01-16 02:26:08.626012286 +0800 +@@ -7,7 +7,7 @@ + GRIT tools. + """ + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/tools/grit/grit/grit_runner_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/grit_runner_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.py''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -33,9 +33,9 @@ + util.PathFromRoot('grit/testdata/simple-input.xml'), + 'test', 'bla', 'voff', 'ga']) + output = self.buf.getvalue() +- self.failUnless(output.count("'test'") == 0) # tool name doesn't occur +- self.failUnless(output.count('bla')) +- self.failUnless(output.count('simple-input.xml')) ++ self.assertTrue(output.count("'test'") == 0) # tool name doesn't occur ++ self.assertTrue(output.count('bla')) ++ self.assertTrue(output.count('simple-input.xml')) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/tools/grit/grit/lazy_re.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/lazy_re.py 2025-01-16 02:26:08.626012286 +0800 +@@ -8,7 +8,7 @@ + time in some cases. + ''' + +-from __future__ import print_function ++ + + import re + +--- a/src/3rdparty/chromium/tools/grit/grit/lazy_re_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/lazy_re_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -6,7 +6,7 @@ + '''Unit test for lazy_re. + ''' + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/tools/grit/grit/pseudo.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/pseudo.py 2025-01-16 02:26:08.626012286 +0800 +@@ -21,7 +21,7 @@ + the latin-1 character set which will stress character encoding bugs. + ''' + +-from __future__ import print_function ++ + + from grit import lazy_re + from grit import tclib +@@ -36,22 +36,22 @@ + # a better solution, i.e. one that introduces a non-latin1 character into the + # pseudotranslation. + #_QOF = u'\u05e7' +-_QOF = u'P' ++_QOF = 'P' + + # How we map each vowel. + _VOWELS = { +- u'a' : u'\u00e5', # a with ring +- u'e' : u'\u00e9', # e acute +- u'i' : u'\u00ef', # i diaresis +- u'o' : u'\u00f4', # o circumflex +- u'u' : u'\u00fc', # u diaresis +- u'y' : u'\u00fd', # y acute +- u'A' : u'\u00c5', # A with ring +- u'E' : u'\u00c9', # E acute +- u'I' : u'\u00cf', # I diaresis +- u'O' : u'\u00d4', # O circumflex +- u'U' : u'\u00dc', # U diaresis +- u'Y' : u'\u00dd', # Y acute ++ 'a' : '\u00e5', # a with ring ++ 'e' : '\u00e9', # e acute ++ 'i' : '\u00ef', # i diaresis ++ 'o' : '\u00f4', # o circumflex ++ 'u' : '\u00fc', # u diaresis ++ 'y' : '\u00fd', # y acute ++ 'A' : '\u00c5', # A with ring ++ 'E' : '\u00c9', # E acute ++ 'I' : '\u00cf', # I diaresis ++ 'O' : '\u00d4', # O circumflex ++ 'U' : '\u00dc', # U diaresis ++ 'Y' : '\u00dd', # Y acute + } + _VOWELS_KEYS = set(_VOWELS.keys()) + +@@ -87,7 +87,7 @@ + if str in _existing_translations: + return _existing_translations[str] + +- outstr = u'' ++ outstr = '' + ix = 0 + while ix < len(str): + if str[ix] not in _VOWELS_KEYS: +@@ -96,7 +96,7 @@ + else: + # We want to treat consecutive vowels as one composite vowel. This is not + # always accurate e.g. in composite words but good enough. +- consecutive_vowels = u'' ++ consecutive_vowels = '' + while ix < len(str) and str[ix] in _VOWELS_KEYS: + consecutive_vowels += str[ix] + ix += 1 +--- a/src/3rdparty/chromium/tools/grit/grit/pseudo_rtl.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/pseudo_rtl.py 2025-01-16 02:26:08.626012286 +0800 +@@ -7,7 +7,7 @@ + More info at https://sites.google.com/a/chromium.org/dev/Home/fake-bidi + ''' + +-from __future__ import print_function ++ + + import re + +@@ -15,17 +15,17 @@ + from grit import tclib + + ACCENTED_STRINGS = { +- 'a': u"\u00e5", 'e': u"\u00e9", 'i': u"\u00ee", 'o': u"\u00f6", +- 'u': u"\u00fb", 'A': u"\u00c5", 'E': u"\u00c9", 'I': u"\u00ce", +- 'O': u"\u00d6", 'U': u"\u00db", 'c': u"\u00e7", 'd': u"\u00f0", +- 'n': u"\u00f1", 'p': u"\u00fe", 'y': u"\u00fd", 'C': u"\u00c7", +- 'D': u"\u00d0", 'N': u"\u00d1", 'P': u"\u00de", 'Y': u"\u00dd", +- 'f': u"\u0192", 's': u"\u0161", 'S': u"\u0160", 'z': u"\u017e", +- 'Z': u"\u017d", 'g': u"\u011d", 'G': u"\u011c", 'h': u"\u0125", +- 'H': u"\u0124", 'j': u"\u0135", 'J': u"\u0134", 'k': u"\u0137", +- 'K': u"\u0136", 'l': u"\u013c", 'L': u"\u013b", 't': u"\u0163", +- 'T': u"\u0162", 'w': u"\u0175", 'W': u"\u0174", +- '$': u"\u20ac", '?': u"\u00bf", 'R': u"\u00ae", r'!': u"\u00a1", ++ 'a': "\u00e5", 'e': "\u00e9", 'i': "\u00ee", 'o': "\u00f6", ++ 'u': "\u00fb", 'A': "\u00c5", 'E': "\u00c9", 'I': "\u00ce", ++ 'O': "\u00d6", 'U': "\u00db", 'c': "\u00e7", 'd': "\u00f0", ++ 'n': "\u00f1", 'p': "\u00fe", 'y': "\u00fd", 'C': "\u00c7", ++ 'D': "\u00d0", 'N': "\u00d1", 'P': "\u00de", 'Y': "\u00dd", ++ 'f': "\u0192", 's': "\u0161", 'S': "\u0160", 'z': "\u017e", ++ 'Z': "\u017d", 'g': "\u011d", 'G': "\u011c", 'h': "\u0125", ++ 'H': "\u0124", 'j': "\u0135", 'J': "\u0134", 'k': "\u0137", ++ 'K': "\u0136", 'l': "\u013c", 'L': "\u013b", 't': "\u0163", ++ 'T': "\u0162", 'w': "\u0175", 'W': "\u0174", ++ '$': "\u20ac", '?': "\u00bf", 'R': "\u00ae", r'!': "\u00a1", + } + + # a character set containing the keys in ACCENTED_STRINGS +@@ -34,7 +34,7 @@ + # character. We also need to consider the case like "\\n", which means + # a blackslash and a character "n", we will accent the character "n". + TO_ACCENT = lazy_re.compile( +- r'[%s]|\\[a-z\\]' % ''.join(ACCENTED_STRINGS.keys())) ++ r'[%s]|\\[a-z\\]' % ''.join(list(ACCENTED_STRINGS.keys()))) + + # Lex text so that we don't interfere with html tokens and entities. + # This lexing scheme will handle all well formed tags and entities, html or +@@ -62,8 +62,8 @@ + + ALPHABETIC_RUN = lazy_re.compile(r'([^\W0-9_]+)') + +-RLO = u'\u202e' +-PDF = u'\u202c' ++RLO = '\u202e' ++PDF = '\u202c' + + def PseudoRTLString(text): + '''Returns a fake bidirectional version of the source string. This code is +--- a/src/3rdparty/chromium/tools/grit/grit/pseudo_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/pseudo_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.pseudo''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -20,24 +20,24 @@ + + class PseudoUnittest(unittest.TestCase): + def testVowelMapping(self): +- self.failUnless(pseudo.MapVowels('abebibobuby') == +- u'\u00e5b\u00e9b\u00efb\u00f4b\u00fcb\u00fd') +- self.failUnless(pseudo.MapVowels('ABEBIBOBUBY') == +- u'\u00c5B\u00c9B\u00cfB\u00d4B\u00dcB\u00dd') ++ self.assertTrue(pseudo.MapVowels('abebibobuby') == ++ '\u00e5b\u00e9b\u00efb\u00f4b\u00fcb\u00fd') ++ self.assertTrue(pseudo.MapVowels('ABEBIBOBUBY') == ++ '\u00c5B\u00c9B\u00cfB\u00d4B\u00dcB\u00dd') + + def testPseudoString(self): + out = pseudo.PseudoString('hello') +- self.failUnless(out == pseudo.MapVowels(u'hePelloPo', True)) ++ self.assertTrue(out == pseudo.MapVowels('hePelloPo', True)) + + def testConsecutiveVowels(self): + out = pseudo.PseudoString("beautiful weather, ain't it?") +- self.failUnless(out == pseudo.MapVowels( +- u"beauPeautiPifuPul weaPeathePer, aiPain't iPit?", 1)) ++ self.assertTrue(out == pseudo.MapVowels( ++ "beauPeautiPifuPul weaPeathePer, aiPain't iPit?", 1)) + + def testCapitals(self): + out = pseudo.PseudoString("HOWDIE DOODIE, DR. JONES") +- self.failUnless(out == pseudo.MapVowels( +- u"HOPOWDIEPIE DOOPOODIEPIE, DR. JOPONEPES", 1)) ++ self.assertTrue(out == pseudo.MapVowels( ++ "HOPOWDIEPIE DOOPOODIEPIE, DR. JOPONEPES", 1)) + + def testPseudoMessage(self): + msg = tclib.Message(text='Hello USERNAME, how are you?', +@@ -46,9 +46,9 @@ + trans = pseudo.PseudoMessage(msg) + # TODO(joi) It would be nicer if 'you' -> 'youPou' instead of + # 'you' -> 'youPyou' and if we handled the silent e in 'are' +- self.failUnless(trans.GetPresentableContent() == ++ self.assertTrue(trans.GetPresentableContent() == + pseudo.MapVowels( +- u'HePelloPo USERNAME, hoPow aParePe youPyou?', 1)) ++ 'HePelloPo USERNAME, hoPow aParePe youPyou?', 1)) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/tools/grit/grit/shortcuts.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/shortcuts.py 2025-01-16 02:26:08.626012286 +0800 +@@ -5,7 +5,7 @@ + '''Stuff to prevent conflicting shortcuts. + ''' + +-from __future__ import print_function ++ + + from grit import lazy_re + +@@ -35,7 +35,7 @@ + return + + self.cliques.append(c) +- for (lang, msg) in c.clique.items(): ++ for (lang, msg) in list(c.clique.items()): + if lang not in self.keys_by_lang: + self.keys_by_lang[lang] = {} + keymap = self.keys_by_lang[lang] +@@ -53,8 +53,8 @@ + # For any language that has more than one occurrence of any shortcut, + # make a list of the conflicting shortcuts. + problem_langs = {} +- for (lang, keys) in self.keys_by_lang.items(): +- for (key, count) in keys.items(): ++ for (lang, keys) in list(self.keys_by_lang.items()): ++ for (key, count) in list(keys.items()): + if count > 1: + if lang not in problem_langs: + problem_langs[lang] = [] +@@ -64,7 +64,7 @@ + if len(problem_langs): + warnings.append("WARNING - duplicate keys exist in shortcut group %s" % + self.name) +- for (lang,keys) in problem_langs.items(): ++ for (lang,keys) in list(problem_langs.items()): + warnings.append(" %6s duplicates: %s" % (lang, ', '.join(keys))) + return warnings + +@@ -88,6 +88,6 @@ + if group not in groups: + groups[group] = ShortcutGroup(group) + groups[group].AddClique(c) +- for group in groups.values(): ++ for group in list(groups.values()): + warnings += group.GenerateWarnings(tc_project) + return warnings +--- a/src/3rdparty/chromium/tools/grit/grit/shortcuts_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/shortcuts_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -5,7 +5,7 @@ + '''Unit tests for grit.shortcuts + ''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -33,7 +33,7 @@ + c.AddToShortcutGroup('group_name') + + warnings = shortcuts.GenerateDuplicateShortcutsWarnings(self.uq, 'PROJECT') +- self.failUnless(warnings) ++ self.assertTrue(warnings) + + def testAmpersandEscaping(self): + c = self.uq.MakeClique(tclib.Message(text="Hello &there")) +@@ -42,7 +42,7 @@ + c.AddToShortcutGroup('group_name') + + warnings = shortcuts.GenerateDuplicateShortcutsWarnings(self.uq, 'PROJECT') +- self.failUnless(len(warnings) == 0) ++ self.assertTrue(len(warnings) == 0) + + def testDialog(self): + dlg = rc.Dialog(StringIO('''\ +@@ -75,5 +75,5 @@ + dlg.Parse() + + warnings = shortcuts.GenerateDuplicateShortcutsWarnings(self.uq, 'PROJECT') +- self.failUnless(len(warnings) == 0) ++ self.assertTrue(len(warnings) == 0) + +--- a/src/3rdparty/chromium/tools/grit/grit/tclib.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tclib.py 2025-01-16 02:26:08.626012286 +0800 +@@ -5,7 +5,7 @@ + '''Adaptation of the extern.tclib classes for our needs. + ''' + +-from __future__ import print_function ++ + + import functools + import re +@@ -54,7 +54,7 @@ + # substrings of the longer tag. + # E.g. "EXAMPLE_FOO_NAME" must be matched before "EXAMPLE_FOO", + # otherwise "EXAMPLE_FOO" splits "EXAMPLE_FOO_NAME" too. +- tags = sorted(tag_map.keys(), ++ tags = sorted(list(tag_map.keys()), + key=functools.cmp_to_key( + lambda x, y: len(x) - len(y) or ((x > y) - (x < y))), + reverse=True) +--- a/src/3rdparty/chromium/tools/grit/grit/tclib_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tclib_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.tclib''' + +-from __future__ import print_function ++ + + import sys + import os.path +@@ -24,41 +24,41 @@ + + class TclibUnittest(unittest.TestCase): + def testInit(self): +- msg = tclib.Message(text=u'Hello Earthlings', ++ msg = tclib.Message(text='Hello Earthlings', + description='Greetings\n\t message') +- self.failUnlessEqual(msg.GetPresentableContent(), 'Hello Earthlings') +- self.failUnless(isinstance(msg.GetPresentableContent(), six.string_types)) +- self.failUnlessEqual(msg.GetDescription(), 'Greetings message') ++ self.assertEqual(msg.GetPresentableContent(), 'Hello Earthlings') ++ self.assertTrue(isinstance(msg.GetPresentableContent(), six.string_types)) ++ self.assertEqual(msg.GetDescription(), 'Greetings message') + + def testGetAttr(self): + msg = tclib.Message() +- msg.AppendText(u'Hello') # Tests __getattr__ +- self.failUnless(msg.GetPresentableContent() == 'Hello') +- self.failUnless(isinstance(msg.GetPresentableContent(), six.string_types)) ++ msg.AppendText('Hello') # Tests __getattr__ ++ self.assertTrue(msg.GetPresentableContent() == 'Hello') ++ self.assertTrue(isinstance(msg.GetPresentableContent(), six.string_types)) + + def testAll(self): +- text = u'Howdie USERNAME' +- phs = [tclib.Placeholder(u'USERNAME', u'%s', 'Joi')] ++ text = 'Howdie USERNAME' ++ phs = [tclib.Placeholder('USERNAME', '%s', 'Joi')] + msg = tclib.Message(text=text, placeholders=phs) +- self.failUnless(msg.GetPresentableContent() == 'Howdie USERNAME') ++ self.assertTrue(msg.GetPresentableContent() == 'Howdie USERNAME') + + trans = tclib.Translation(text=text, placeholders=phs) +- self.failUnless(trans.GetPresentableContent() == 'Howdie USERNAME') +- self.failUnless(isinstance(trans.GetPresentableContent(), six.string_types)) ++ self.assertTrue(trans.GetPresentableContent() == 'Howdie USERNAME') ++ self.assertTrue(isinstance(trans.GetPresentableContent(), six.string_types)) + + def testUnicodeReturn(self): +- text = u'\u00fe' ++ text = '\u00fe' + msg = tclib.Message(text=text) +- self.failUnless(msg.GetPresentableContent() == text) ++ self.assertTrue(msg.GetPresentableContent() == text) + from_list = msg.GetContent()[0] +- self.failUnless(from_list == text) ++ self.assertTrue(from_list == text) + + def testRegressionTranslationInherited(self): + '''Regression tests a bug that was caused by grit.tclib.Translation + inheriting from the translation console's Translation object + instead of only owning an instance of it. + ''' +- msg = tclib.Message(text=u"BLA1\r\nFrom: BLA2 \u00fe BLA3", ++ msg = tclib.Message(text="BLA1\r\nFrom: BLA2 \u00fe BLA3", + placeholders=[ + tclib.Placeholder('BLA1', '%s', '%s'), + tclib.Placeholder('BLA2', '%s', '%s'), +@@ -66,7 +66,7 @@ + transl = tclib.Translation(text=msg.GetPresentableContent(), + placeholders=msg.GetPlaceholders()) + content = transl.GetContent() +- self.failUnless(isinstance(content[3], six.string_types)) ++ self.assertTrue(isinstance(content[3], six.string_types)) + + def testFingerprint(self): + # This has Windows line endings. That is on purpose. +@@ -171,7 +171,7 @@ + phs = [tclib.Placeholder(word[:i], str(i), str(i)) for i in range(1, 11)] + try: + msg = tclib.Message(text=text, placeholders=phs) +- self.failUnless(msg.GetRealContent() == '1 2 3 4 5 6 7 8 9 10') ++ self.assertTrue(msg.GetRealContent() == '1 2 3 4 5 6 7 8 9 10') + except: + self.fail('tclib.Message() should handle placeholders that are ' + 'substrings of each other') +--- a/src/3rdparty/chromium/tools/grit/grit/test_suite_all.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/test_suite_all.py 2025-01-16 02:26:08.626012286 +0800 +@@ -5,7 +5,7 @@ + + '''Unit test suite that collects all test cases for GRIT.''' + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/tools/grit/grit/util.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/util.py 2025-01-16 02:40:03.301721266 +0800 +@@ -5,7 +5,7 @@ + '''Utilities used by GRIT. + ''' + +-from __future__ import print_function ++ + + import codecs + import io +@@ -30,7 +30,7 @@ + + + # Unique constants representing data pack encodings. +-_, UTF8, UTF16 = range(3) ++_, UTF8, UTF16 = list(range(3)) + + def abs(filename): + return os.path.normpath(os.path.join(os.getcwd(), filename)) +@@ -211,7 +211,7 @@ + mode = 'rb' + encoding = None + else: +- mode = 'rU' ++ mode = 'r' + + with io.open(abs(filename), mode, encoding=encoding) as f: + return f.read() +@@ -272,16 +272,16 @@ + def Replace(match): + groups = match.groupdict() + if groups['hex']: +- return six.unichr(int(groups['hex'], 16)) ++ return six.chr(int(groups['hex'], 16)) + elif groups['decimal']: +- return six.unichr(int(groups['decimal'], 10)) ++ return six.chr(int(groups['decimal'], 10)) + else: + name = groups['named'] + if name == 'nbsp' and not replace_nbsp: + return match.group() # Don't replace   + assert name != None + if name in entities.name2codepoint: +- return six.unichr(entities.name2codepoint[name]) ++ return six.chr(entities.name2codepoint[name]) + else: + return match.group() # Unknown HTML character entity - don't replace + +@@ -658,7 +658,7 @@ + def __init__(self, file_data, mode='w'): + self._tmp_dir_name = tempfile.mkdtemp() + assert not os.listdir(self.GetPath()) +- for name, contents in file_data.items(): ++ for name, contents in list(file_data.items()): + file_path = self.GetPath(name) + dir_path = os.path.split(file_path)[0] + if not os.path.exists(dir_path): +--- a/src/3rdparty/chromium/tools/grit/grit/util_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/util_unittest.py 2025-01-16 02:26:08.626012286 +0800 +@@ -6,7 +6,7 @@ + '''Unit test that checks some of util functions. + ''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -29,35 +29,35 @@ + # Should fail, it is not supported by the function now (as documented) + cls = util.NewClassInstance('grit.util.TestClassToLoad', + TestBaseClassToLoad) +- self.failUnless(cls == None) ++ self.assertTrue(cls == None) + + # Test non existent class name + cls = util.NewClassInstance('grit.util_unittest.NotExistingClass', + TestBaseClassToLoad) +- self.failUnless(cls == None) ++ self.assertTrue(cls == None) + + # Test valid class name and valid base class + cls = util.NewClassInstance('grit.util_unittest.TestClassToLoad', + TestBaseClassToLoad) +- self.failUnless(isinstance(cls, TestBaseClassToLoad)) ++ self.assertTrue(isinstance(cls, TestBaseClassToLoad)) + + # Test valid class name with wrong hierarchy + cls = util.NewClassInstance('grit.util_unittest.TestClassNoBase', + TestBaseClassToLoad) +- self.failUnless(cls == None) ++ self.assertTrue(cls == None) + + def testCanonicalLanguage(self): +- self.failUnless(util.CanonicalLanguage('en') == 'en') +- self.failUnless(util.CanonicalLanguage('pt_br') == 'pt-BR') +- self.failUnless(util.CanonicalLanguage('pt-br') == 'pt-BR') +- self.failUnless(util.CanonicalLanguage('pt-BR') == 'pt-BR') +- self.failUnless(util.CanonicalLanguage('pt/br') == 'pt-BR') +- self.failUnless(util.CanonicalLanguage('pt/BR') == 'pt-BR') +- self.failUnless(util.CanonicalLanguage('no_no_bokmal') == 'no-NO-BOKMAL') ++ self.assertTrue(util.CanonicalLanguage('en') == 'en') ++ self.assertTrue(util.CanonicalLanguage('pt_br') == 'pt-BR') ++ self.assertTrue(util.CanonicalLanguage('pt-br') == 'pt-BR') ++ self.assertTrue(util.CanonicalLanguage('pt-BR') == 'pt-BR') ++ self.assertTrue(util.CanonicalLanguage('pt/br') == 'pt-BR') ++ self.assertTrue(util.CanonicalLanguage('pt/BR') == 'pt-BR') ++ self.assertTrue(util.CanonicalLanguage('no_no_bokmal') == 'no-NO-BOKMAL') + + def testUnescapeHtml(self): +- self.failUnless(util.UnescapeHtml('ϲ') == six.unichr(1010)) +- self.failUnless(util.UnescapeHtml('ꯍ') == six.unichr(43981)) ++ self.assertTrue(util.UnescapeHtml('ϲ') == six.chr(1010)) ++ self.assertTrue(util.UnescapeHtml('ꯍ') == six.chr(43981)) + + def testRelativePath(self): + """ Verify that MakeRelativePath works in some tricky cases.""" +@@ -69,7 +69,7 @@ + for path1 in [base_path, base_path + os.path.sep]: + for path2 in [other_path, other_path + os.path.sep]: + result = util.MakeRelativePath(path1, path2) +- self.failUnless(result == expected_result) ++ self.assertTrue(result == expected_result) + + # set-up variables + root_dir = 'c:%sa' % os.path.sep +--- a/src/3rdparty/chromium/tools/grit/grit/xtb_reader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/xtb_reader.py 2025-01-16 02:26:08.632512176 +0800 +@@ -5,7 +5,7 @@ + '''Fast and efficient parser for XTB files. + ''' + +-from __future__ import print_function ++ + + import sys + import xml.sax +--- a/src/3rdparty/chromium/tools/grit/grit/xtb_reader_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/xtb_reader_unittest.py 2025-01-16 02:26:08.632512176 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.xtb_reader''' + +-from __future__ import print_function ++ + + import io + import os +@@ -40,10 +40,10 @@ + def Callback(id, structure): + messages.append((id, structure)) + xtb_reader.Parse(xtb_file, Callback) +- self.failUnless(len(messages[0][1]) == 1) +- self.failUnless(messages[3][1][0]) # PROBLEM_REPORT placeholder +- self.failUnless(messages[4][0] == '7729135689895381486') +- self.failUnless(messages[4][1][7][1] == 'and another after a blank line.') ++ self.assertTrue(len(messages[0][1]) == 1) ++ self.assertTrue(messages[3][1][0]) # PROBLEM_REPORT placeholder ++ self.assertTrue(messages[4][0] == '7729135689895381486') ++ self.assertTrue(messages[4][1][7][1] == 'and another after a blank line.') + + def testParsingIntoMessages(self): + root = util.ParseGrdForUnittest(''' +@@ -71,7 +71,7 @@ + msgs.UberClique().GenerateXtbParserCallback('is')) + self.assertEqual('Meirihattar!', + clique_mega.MessageForLanguage('is').GetRealContent()) +- self.failUnless('Saelir %s', ++ self.assertTrue('Saelir %s', + clique_hello_user.MessageForLanguage('is').GetRealContent()) + + def testIfNodesWithUseNameForId(self): +--- a/src/3rdparty/chromium/tools/grit/grit/extern/BogoFP.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/extern/BogoFP.py 2025-01-16 02:26:08.632512176 +0800 +@@ -9,7 +9,7 @@ + grit.py -h grit.extern.BogoFP xmb /tmp/foo + """ + +-from __future__ import print_function ++ + + import grit.extern.FP + +--- a/src/3rdparty/chromium/tools/grit/grit/extern/FP.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/extern/FP.py 2025-01-16 02:26:08.632512176 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + try: + import hashlib +--- a/src/3rdparty/chromium/tools/grit/grit/extern/tclib.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/extern/tclib.py 2025-01-16 02:26:08.632512176 +0800 +@@ -10,7 +10,7 @@ + # for creating Windows .rc and .h files. These are the only parts needed by + # the Chrome build process. + +-from __future__ import print_function ++ + + from grit.extern import FP + +--- a/src/3rdparty/chromium/tools/grit/grit/format/android_xml.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/android_xml.py 2025-01-16 02:26:08.632512176 +0800 +@@ -59,7 +59,7 @@ + + """ + +-from __future__ import print_function ++ + + import os + import re +@@ -81,7 +81,7 @@ + + # Most strings are output as a element. Note the double quotes + # around the value to preserve whitespace. +-_STRING_TEMPLATE = u'"%s"\n' ++_STRING_TEMPLATE = '"%s"\n' + + # Some strings are output as a element. + _PLURALS_TEMPLATE = '\n%s\n' +--- a/src/3rdparty/chromium/tools/grit/grit/format/android_xml_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/android_xml_unittest.py 2025-01-16 02:26:08.632512176 +0800 +@@ -5,7 +5,7 @@ + + """Unittest for android_xml.py.""" + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/tools/grit/grit/format/c_format.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/c_format.py 2025-01-16 02:26:08.632512176 +0800 +@@ -5,7 +5,7 @@ + """Formats as a .C file for compilation. + """ + +-from __future__ import print_function ++ + + import codecs + import os +--- a/src/3rdparty/chromium/tools/grit/grit/format/c_format_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/c_format_unittest.py 2025-01-16 02:26:08.632512176 +0800 +@@ -6,7 +6,7 @@ + """Unittest for c_format.py. + """ + +-from __future__ import print_function ++ + + import os + import sys +@@ -24,7 +24,7 @@ + class CFormatUnittest(unittest.TestCase): + + def testMessages(self): +- root = util.ParseGrdForUnittest(u""" ++ root = util.ParseGrdForUnittest(""" + + Do you want to play questions? + +@@ -44,7 +44,7 @@ + buf = StringIO() + build.RcBuilder.ProcessNode(root, DummyOutput('c_format', 'en'), buf) + output = util.StripBlankLinesAndComments(buf.getvalue()) +- self.assertEqual(u"""\ ++ self.assertEqual("""\ + #include "resource.h" + const char* GetString(int id) { + switch (id) { +--- a/src/3rdparty/chromium/tools/grit/grit/format/chrome_messages_json.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/chrome_messages_json.py 2025-01-16 02:26:08.632512176 +0800 +@@ -5,7 +5,7 @@ + """Formats as a .json file that can be used to localize Google Chrome + extensions.""" + +-from __future__ import print_function ++ + + from json import JSONEncoder + +--- a/src/3rdparty/chromium/tools/grit/grit/format/chrome_messages_json_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/chrome_messages_json_unittest.py 2025-01-16 02:26:08.632512176 +0800 +@@ -6,7 +6,7 @@ + """Unittest for chrome_messages_json.py. + """ + +-from __future__ import print_function ++ + + import json + import os +@@ -29,7 +29,7 @@ + maxDiff = None + + def testMessages(self): +- root = util.ParseGrdForUnittest(u""" ++ root = util.ParseGrdForUnittest(""" + + + Simple message. +@@ -65,7 +65,7 @@ + build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'en'), + buf) + output = buf.getvalue() +- test = u""" ++ test = """ + { + "SIMPLE_MESSAGE": { + "message": "Simple message." +@@ -119,7 +119,7 @@ + build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'fr'), + buf) + output = buf.getvalue() +- test = u""" ++ test = """ + { + "ID_HELLO": { + "message": "H\u00e9P\u00e9ll\u00f4P\u00f4!" +@@ -149,11 +149,11 @@ + build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'fr'), + buf) + output = buf.getvalue() +- test = u'{}' ++ test = '{}' + self.assertEqual(test, output) + + def testVerifyMinification(self): +- root = util.ParseGrdForUnittest(u""" ++ root = util.ParseGrdForUnittest(""" + + + $1atest$2b +@@ -165,8 +165,8 @@ + build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'en'), + buf) + output = buf.getvalue() +- test = (u'{"IDS":{"message":"$1$test$2$","placeholders":' +- u'{"1":{"content":"$1"},"2":{"content":"$2"}}}}') ++ test = ('{"IDS":{"message":"$1$test$2$","placeholders":' ++ '{"1":{"content":"$1"},"2":{"content":"$2"}}}}') + self.assertEqual(test, output) + + +--- a/src/3rdparty/chromium/tools/grit/grit/format/data_pack.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/data_pack.py 2025-01-16 02:26:08.632512176 +0800 +@@ -7,7 +7,7 @@ + files. + """ + +-from __future__ import print_function ++ + + import collections + import os +@@ -25,7 +25,7 @@ + + + PACK_FILE_VERSION = 5 +-BINARY, UTF8, UTF16 = range(3) ++BINARY, UTF8, UTF16 = list(range(3)) + + + GrdInfoItem = collections.namedtuple('GrdInfoItem', +@@ -49,7 +49,7 @@ + + @property + def total(self): +- return sum(v for v in self.__dict__.values()) ++ return sum(v for v in list(self.__dict__.values())) + + def __iter__(self): + yield ('header', self.header) +@@ -158,7 +158,7 @@ + # Use reversed() so that for duplicates lower IDs clobber higher ones. + id_by_data = {resources[k]: k for k in reversed(resource_ids)} + # Map of resource_id -> resource_id, where value < key. +- alias_map = {k: id_by_data[v] for k, v in resources.items() ++ alias_map = {k: id_by_data[v] for k, v in list(resources.items()) + if id_by_data[v] != k} + + # Write file header. +@@ -294,11 +294,11 @@ + + if allowlist: + allowlisted_resources = dict([(key, input_resources[key]) +- for key in input_resources.keys() ++ for key in list(input_resources.keys()) + if key in allowlist]) + resources.update(allowlisted_resources) + removed_keys = [ +- key for key in input_resources.keys() if key not in allowlist ++ key for key in list(input_resources.keys()) if key not in allowlist + ] + if not suppress_removed_key_output: + for key in removed_keys: +--- a/src/3rdparty/chromium/tools/grit/grit/format/data_pack_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/data_pack_unittest.py 2025-01-16 02:26:08.632512176 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.format.data_pack''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -58,7 +58,7 @@ + 10: b'this is id 4', + } + data = data_pack.WriteDataPackToString(input_resources, data_pack.UTF8) +- self.assertEquals(data, expected_data) ++ self.assertEqual(data, expected_data) + + expected_data_pack = data_pack.DataPackContents({ + 1: b'', +--- a/src/3rdparty/chromium/tools/grit/grit/format/gen_predetermined_ids.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/gen_predetermined_ids.py 2025-01-16 02:26:08.632512176 +0800 +@@ -9,7 +9,7 @@ + a while and its output checked in. See tools/gritsettings/README.md for details. + """ + +-from __future__ import print_function ++ + + import os + import re +--- a/src/3rdparty/chromium/tools/grit/grit/format/gen_predetermined_ids_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/gen_predetermined_ids_unittest.py 2025-01-16 02:26:08.632512176 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for the gen_predetermined_ids module.''' + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/tools/grit/grit/format/gzip_string.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/gzip_string.py 2025-01-16 02:26:08.632512176 +0800 +@@ -4,7 +4,7 @@ + """Provides gzip utilities for strings. + """ + +-from __future__ import print_function ++ + + import gzip + import io +--- a/src/3rdparty/chromium/tools/grit/grit/format/gzip_string_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/gzip_string_unittest.py 2025-01-16 02:26:08.632512176 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.format.gzip_string''' + +-from __future__ import print_function ++ + + import gzip + import io +@@ -32,7 +32,7 @@ + b'') + + compressed = gzip_string.GzipStringRsyncable(input) +- self.failUnless(header_begin == compressed[:2]) ++ self.assertTrue(header_begin == compressed[:2]) + + compressed_file = io.BytesIO() + compressed_file.write(compressed) +@@ -40,7 +40,7 @@ + + with gzip.GzipFile(mode='rb', fileobj=compressed_file) as f: + output = f.read() +- self.failUnless(output == input) ++ self.assertTrue(output == input) + + def testGzipString(self): + header_begin = b'\x1f\x8b' # gzip first two bytes +@@ -50,7 +50,7 @@ + b'') + + compressed = gzip_string.GzipString(input) +- self.failUnless(header_begin == compressed[:2]) ++ self.assertTrue(header_begin == compressed[:2]) + + compressed_file = io.BytesIO() + compressed_file.write(compressed) +@@ -58,7 +58,7 @@ + + with gzip.GzipFile(mode='rb', fileobj=compressed_file) as f: + output = f.read() +- self.failUnless(output == input) ++ self.assertTrue(output == input) + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/tools/grit/grit/format/html_inline.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/html_inline.py 2025-01-16 02:26:08.633595491 +0800 +@@ -10,7 +10,7 @@ + dependencies. It recursively inlines the included files. + """ + +-from __future__ import print_function ++ + + import os + import re +@@ -305,7 +305,7 @@ + filename_expansion_function=filename_expansion_function) + + def GetFilepath(src_match, base_path = input_filepath): +- filename = [v for k, v in src_match.groupdict().items() ++ filename = [v for k, v in list(src_match.groupdict().items()) + if k.startswith('file') and v][0] + + if filename.find(':') != -1: +--- a/src/3rdparty/chromium/tools/grit/grit/format/html_inline_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/html_inline_unittest.py 2025-01-16 02:26:08.633595491 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.format.html_inline''' + +-from __future__ import print_function ++ + + import os + import re +@@ -77,7 +77,7 @@ + resources = html_inline.GetResourceFilenames(tmp_dir.GetPath('index.html'), + None) + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) ++ self.assertEqual(resources, source_resources) + tmp_dir.CleanUp() + + def testUnmatchedEndIfBlock(self): +@@ -102,7 +102,7 @@ + + with self.assertRaises(Exception) as cm: + html_inline.GetResourceFilenames(tmp_dir.GetPath('index.html'), None) +- self.failUnlessEqual(str(cm.exception), 'Unmatched ') ++ self.assertEqual(str(cm.exception), 'Unmatched ') + tmp_dir.CleanUp() + + def testCompressedJavaScript(self): +@@ -122,7 +122,7 @@ + resources = html_inline.GetResourceFilenames(tmp_dir.GetPath('index.js'), + None) + resources.add(tmp_dir.GetPath('index.js')) +- self.failUnlessEqual(resources, source_resources) ++ self.assertEqual(resources, source_resources) + tmp_dir.CleanUp() + + def testInlineCSSImports(self): +@@ -177,8 +177,8 @@ + result = html_inline.DoInline(tmp_dir.GetPath('index.html'), None) + resources = result.inlined_files + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) +- self.failUnlessEqual(expected_inlined, ++ self.assertEqual(resources, source_resources) ++ self.assertEqual(expected_inlined, + util.FixLineEnd(result.inlined_data, '\n')) + + tmp_dir.CleanUp() +@@ -257,8 +257,8 @@ + result = html_inline.DoInline(tmp_dir.GetPath('index.html'), None) + resources = result.inlined_files + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) +- self.failUnlessEqual(expected_inlined, ++ self.assertEqual(resources, source_resources) ++ self.assertEqual(expected_inlined, + util.FixLineEnd(result.inlined_data, '\n')) + + tmp_dir.CleanUp() +@@ -307,8 +307,8 @@ + result = html_inline.DoInline(tmp_dir.GetPath('index.html'), None) + resources = result.inlined_files + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) +- self.failUnlessEqual(expected_inlined, ++ self.assertEqual(resources, source_resources) ++ self.assertEqual(expected_inlined, + util.FixLineEnd(result.inlined_data, '\n')) + tmp_dir.CleanUp() + +@@ -351,7 +351,7 @@ + resources = html_inline.GetResourceFilenames(tmp_dir.GetPath('index.html'), + None) + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) ++ self.assertEqual(resources, source_resources) + tmp_dir.CleanUp() + + def testInlineCSSLinks(self): +@@ -397,8 +397,8 @@ + result = html_inline.DoInline(tmp_dir.GetPath('index.html'), None) + resources = result.inlined_files + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) +- self.failUnlessEqual(expected_inlined, ++ self.assertEqual(resources, source_resources) ++ self.assertEqual(expected_inlined, + util.FixLineEnd(result.inlined_data, '\n')) + tmp_dir.CleanUp() + +@@ -448,8 +448,8 @@ + filename_expansion_function=replacer('WHICH', '1')) + resources = result.inlined_files + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) +- self.failUnlessEqual(expected_inlined, ++ self.assertEqual(resources, source_resources) ++ self.assertEqual(expected_inlined, + util.FixLineEnd(result.inlined_data, '\n')) + + # Test names-only inlining. +@@ -460,7 +460,7 @@ + filename_expansion_function=replacer('WHICH', '1')) + resources = result.inlined_files + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) ++ self.assertEqual(resources, source_resources) + tmp_dir.CleanUp() + + def testWithCloseTags(self): +@@ -525,8 +525,8 @@ + None) + resources = result.inlined_files + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) +- self.failUnlessEqual(expected_inlined, ++ self.assertEqual(resources, source_resources) ++ self.assertEqual(expected_inlined, + util.FixLineEnd(result.inlined_data, '\n')) + tmp_dir.CleanUp() + +@@ -548,8 +548,8 @@ + result = html_inline.DoInline(tmp_dir.GetPath('include.js'), None) + resources = result.inlined_files + resources.add(tmp_dir.GetPath('include.js')) +- self.failUnlessEqual(resources, source_resources) +- self.failUnlessEqual(expected_inlined, ++ self.assertEqual(resources, source_resources) ++ self.assertEqual(expected_inlined, + util.FixLineEnd(result.inlined_data, '\n')) + tmp_dir.CleanUp() + +@@ -589,8 +589,8 @@ + resources = result.inlined_files + + resources.add(tmp_dir.GetPath('if.js')) +- self.failUnlessEqual(resources, source_resources) +- self.failUnlessEqual(expected_inlined, ++ self.assertEqual(resources, source_resources) ++ self.assertEqual(expected_inlined, + util.FixLineEnd(result.inlined_data, '\n')) + tmp_dir.CleanUp() + +@@ -652,8 +652,8 @@ + None) + resources = result.inlined_files + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) +- self.failUnlessEqual(expected_inlined, ++ self.assertEqual(resources, source_resources) ++ self.assertEqual(expected_inlined, + util.FixLineEnd(result.inlined_data, '\n')) + tmp_dir.CleanUp() + +@@ -684,8 +684,8 @@ + result = html_inline.DoInline(tmp_dir.GetPath('index.html'), None) + resources = result.inlined_files + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) +- self.failUnlessEqual(expected_inlined, ++ self.assertEqual(resources, source_resources) ++ self.assertEqual(expected_inlined, + util.FixLineEnd(result.inlined_data, '\n')) + tmp_dir.CleanUp() + +@@ -741,8 +741,8 @@ + result = html_inline.DoInline(tmp_dir.GetPath('index.html'), None) + resources = result.inlined_files + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) +- self.failUnlessEqual(expected_inlined, ++ self.assertEqual(resources, source_resources) ++ self.assertEqual(expected_inlined, + util.FixLineEnd(result.inlined_data, '\n')) + tmp_dir.CleanUp() + +@@ -815,13 +815,13 @@ + FakeGrdNode()) + resources = result.inlined_files + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) ++ self.assertEqual(resources, source_resources) + + # ignore whitespace + expected_inlined = re.sub(r'\s+', ' ', expected_inlined) + actually_inlined = re.sub(r'\s+', ' ', + util.FixLineEnd(result.inlined_data, '\n')) +- self.failUnlessEqual(expected_inlined, actually_inlined); ++ self.assertEqual(expected_inlined, actually_inlined); + tmp_dir.CleanUp() + + def testPreprocessOnlyEvaluatesIncludeAndIf(self): +@@ -869,13 +869,13 @@ + preprocess_only=True) + resources = result.inlined_files + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) ++ self.assertEqual(resources, source_resources) + + # Ignore whitespace + expected_inlined = re.sub(r'\s+', ' ', expected_inlined) + actually_inlined = re.sub(r'\s+', ' ', + util.FixLineEnd(result.inlined_data, '\n')) +- self.failUnlessEqual(expected_inlined, actually_inlined) ++ self.assertEqual(expected_inlined, actually_inlined) + + tmp_dir.CleanUp() + +@@ -913,13 +913,13 @@ + preprocess_only=True) + resources = result.inlined_files + resources.add(tmp_dir.GetPath('index.html')) +- self.failUnlessEqual(resources, source_resources) ++ self.assertEqual(resources, source_resources) + + # Ignore whitespace + expected_inlined = re.sub(r'\s+', ' ', expected_inlined) + actually_inlined = re.sub(r'\s+', ' ', + util.FixLineEnd(result.inlined_data, '\n')) +- self.failUnlessEqual(expected_inlined, actually_inlined) ++ self.assertEqual(expected_inlined, actually_inlined) + + tmp_dir.CleanUp() + +--- a/src/3rdparty/chromium/tools/grit/grit/format/minifier.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/minifier.py 2025-01-16 02:26:08.633595491 +0800 +@@ -3,7 +3,7 @@ + # found in the LICENSE file. + """Framework for stripping whitespace and comments from resource files""" + +-from __future__ import print_function ++ + + from os import path + import subprocess +--- a/src/3rdparty/chromium/tools/grit/grit/format/policy_templates_json.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/policy_templates_json.py 2025-01-16 02:26:08.633595491 +0800 +@@ -5,7 +5,7 @@ + """Translates policy_templates.json files. + """ + +-from __future__ import print_function ++ + + from grit.node import structure + +--- a/src/3rdparty/chromium/tools/grit/grit/format/policy_templates_json_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/policy_templates_json_unittest.py 2025-01-16 02:26:08.633595491 +0800 +@@ -7,7 +7,7 @@ + """Unittest for policy_templates_json.py. + """ + +-from __future__ import print_function ++ + + import os + import sys +@@ -148,7 +148,7 @@ + # Caption and message texts get taken from xtb. + # desc is 'translated' to some pseudo-English + # 'ThïPïs pôPôlïPïcýPý dôéPôés stüPüff'. +- expected = u"""{ ++ expected = """{ + "policy_definitions": [ + { + "caption": "%s", +--- a/src/3rdparty/chromium/tools/grit/grit/format/rc.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/rc.py 2025-01-16 02:26:08.633595491 +0800 +@@ -5,7 +5,7 @@ + '''Support for formatting an RC file for compilation. + ''' + +-from __future__ import print_function ++ + + import os + import re +--- a/src/3rdparty/chromium/tools/grit/grit/format/rc_header.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/rc_header.py 2025-01-16 02:26:08.633595491 +0800 +@@ -5,7 +5,7 @@ + '''Item formatters for RC headers. + ''' + +-from __future__ import print_function ++ + + + def Format(root, lang='en', output_dir='.'): +--- a/src/3rdparty/chromium/tools/grit/grit/format/rc_header_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/rc_header_unittest.py 2025-01-16 02:26:08.633595491 +0800 +@@ -8,7 +8,7 @@ + # GRD samples exceed the 80 character limit. + # pylint: disable-msg=C6310 + +-from __future__ import print_function ++ + + import os + import sys +@@ -44,8 +44,8 @@ + + ''') + output = self.FormatAll(grd) +- self.failUnless(output.count('IDS_GREETING10000')) +- self.failUnless(output.count('ID_LOGO300')) ++ self.assertTrue(output.count('IDS_GREETING10000')) ++ self.assertTrue(output.count('ID_LOGO300')) + + def testOnlyDefineResourcesThatSatisfyOutputCondition(self): + grd = util.ParseGrdForUnittest(''' +@@ -76,10 +76,10 @@ + + ''') + output = self.FormatAll(grd) +- self.failUnless(output.count('IDS_FIRSTPRESENTSTRING10000')) +- self.failIf(output.count('IDS_MISSINGSTRING')) +- self.failUnless(output.count('IDS_LANGUAGESPECIFICSTRING10002')) +- self.failUnless(output.count('IDS_THIRDPRESENTSTRING10003')) ++ self.assertTrue(output.count('IDS_FIRSTPRESENTSTRING10000')) ++ self.assertFalse(output.count('IDS_MISSINGSTRING')) ++ self.assertTrue(output.count('IDS_LANGUAGESPECIFICSTRING10002')) ++ self.assertTrue(output.count('IDS_THIRDPRESENTSTRING10003')) + + def testEmit(self): + grd = util.ParseGrdForUnittest(''' +--- a/src/3rdparty/chromium/tools/grit/grit/format/rc_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/rc_unittest.py 2025-01-16 02:26:08.633595491 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.format.rc''' + +-from __future__ import print_function ++ + + import os + import re +@@ -72,7 +72,7 @@ + buf = StringIO() + build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf) + output = util.StripBlankLinesAndComments(buf.getvalue()) +- self.assertEqual(_PREAMBLE + u'''\ ++ self.assertEqual(_PREAMBLE + '''\ + STRINGTABLE + BEGIN + IDS_BTN_GO "Go!" +@@ -94,7 +94,7 @@ + buf = StringIO() + build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf) + output = util.StripBlankLinesAndComments(buf.getvalue()) +- expected = _PREAMBLE + u'''\ ++ expected = _PREAMBLE + '''\ + IDC_KLONKMENU MENU + BEGIN + POPUP "&File" +@@ -172,8 +172,8 @@ + build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf) + output = util.StripBlankLinesAndComments(buf.getvalue()) + expected = (_PREAMBLE + +- u'IDR_HTML HTML "%s"\n' +- u'IDR_HTML2 HTML "%s"' ++ 'IDR_HTML HTML "%s"\n' ++ 'IDR_HTML2 HTML "%s"' + % (util.normpath('/temp/bingo.html').replace('\\', '\\\\'), + util.normpath('/temp/bingo2.html').replace('\\', '\\\\'))) + # hackety hack to work on win32&lin +@@ -191,8 +191,8 @@ + build.RcBuilder.ProcessNode(root, DummyOutput('rc_all', 'en'), buf) + output = util.StripBlankLinesAndComments(buf.getvalue()) + expected = (_PREAMBLE + +- u'TEXT_ONE TXT "%s"\n' +- u'TEXT_TWO TXT "%s"' ++ 'TEXT_ONE TXT "%s"\n' ++ 'TEXT_TWO TXT "%s"' + % (util.normpath('/temp/bingo.txt').replace('\\', '\\\\'), + 'bingo2.txt')) + # hackety hack to work on win32&lin +@@ -213,7 +213,7 @@ + output = util.StripBlankLinesAndComments(buf.getvalue()) + + expected = (_PREAMBLE + +- u'HTML_FILE1 BINDATA "HTML_FILE1_include_test.html"') ++ 'HTML_FILE1 BINDATA "HTML_FILE1_include_test.html"') + # hackety hack to work on win32&lin + output = re.sub(r'"[c-zC-Z]:', '"', output) + self.assertEqual(expected, output) +@@ -221,19 +221,19 @@ + file_contents = util.ReadFile(output_file, 'utf-8') + + # Check for the content added by the tag. +- self.failUnless(file_contents.find('Hello Include!') != -1) ++ self.assertTrue(file_contents.find('Hello Include!') != -1) + # Check for the content that was removed by if tag. +- self.failUnless(file_contents.find('should be removed') == -1) ++ self.assertTrue(file_contents.find('should be removed') == -1) + # Check for the content that was kept in place by if. +- self.failUnless(file_contents.find('should be kept') != -1) +- self.failUnless(file_contents.find('in the middle...') != -1) +- self.failUnless(file_contents.find('at the end...') != -1) ++ self.assertTrue(file_contents.find('should be kept') != -1) ++ self.assertTrue(file_contents.find('in the middle...') != -1) ++ self.assertTrue(file_contents.find('at the end...') != -1) + # Check for nested content that was kept +- self.failUnless(file_contents.find('nested true should be kept') != -1) +- self.failUnless(file_contents.find('silbing true should be kept') != -1) ++ self.assertTrue(file_contents.find('nested true should be kept') != -1) ++ self.assertTrue(file_contents.find('silbing true should be kept') != -1) + # Check for removed "" and "" tags. +- self.failUnless(file_contents.find('') == -1) ++ self.assertTrue(file_contents.find('') == -1) + os.remove(output_file) + + def testStructureNodeOutputfile(self): +@@ -250,14 +250,14 @@ + + output_dir = tempfile.gettempdir() + en_file = struct.FileForLanguage('en', output_dir) +- self.failUnless(en_file == input_file) ++ self.assertTrue(en_file == input_file) + fr_file = struct.FileForLanguage('fr', output_dir) +- self.failUnless(fr_file == os.path.join(output_dir, 'fr_simple.html')) ++ self.assertTrue(fr_file == os.path.join(output_dir, 'fr_simple.html')) + + contents = util.ReadFile(fr_file, 'utf-8') + +- self.failUnless(contents.find('

') != -1) # should contain the markup +- self.failUnless(contents.find('Hello!') == -1) # should be translated ++ self.assertTrue(contents.find('

') != -1) # should contain the markup ++ self.assertTrue(contents.find('Hello!') == -1) # should be translated + os.remove(fr_file) + + def testChromeHtmlNodeOutputfile(self): +@@ -279,7 +279,7 @@ + buf) + output = util.StripBlankLinesAndComments(buf.getvalue()) + expected = (_PREAMBLE + +- u'HTML_FILE1 BINDATA "HTML_FILE1_chrome_html.html"') ++ 'HTML_FILE1 BINDATA "HTML_FILE1_chrome_html.html"') + # hackety hack to work on win32&lin + output = re.sub(r'"[c-zC-Z]:', '"', output) + self.assertEqual(expected, output) +@@ -287,9 +287,9 @@ + file_contents = util.ReadFile(output_file, 'utf-8') + + # Check for the content added by the tag. +- self.failUnless(file_contents.find('Hello Include!') != -1) ++ self.assertTrue(file_contents.find('Hello Include!') != -1) + # Check for inserted -webkit-image-set. +- self.failUnless(file_contents.find('content: -webkit-image-set') != -1) ++ self.assertTrue(file_contents.find('content: -webkit-image-set') != -1) + os.remove(output_file) + + def testSubstitutionHtml(self): +@@ -311,12 +311,12 @@ + output_dir = tempfile.gettempdir() + struct, = root.GetChildrenOfType(structure.StructureNode) + ar_file = struct.FileForLanguage('ar', output_dir) +- self.failUnless(ar_file == os.path.join(output_dir, ++ self.assertTrue(ar_file == os.path.join(output_dir, + 'ar_toolbar_about.html')) + + contents = util.ReadFile(ar_file, 'utf-8') + +- self.failUnless(contents.find('dir="RTL"') != -1) ++ self.assertTrue(contents.find('dir="RTL"') != -1) + os.remove(ar_file) + + def testFallbackToEnglish(self): +--- a/src/3rdparty/chromium/tools/grit/grit/format/resource_map.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/resource_map.py 2025-01-16 02:26:08.633595491 +0800 +@@ -6,7 +6,7 @@ + resource_map_source files. A resource map is a mapping between resource names + (string) and the internal resource ID.''' + +-from __future__ import print_function ++ + + import os + from functools import partial +--- a/src/3rdparty/chromium/tools/grit/grit/format/resource_map_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/format/resource_map_unittest.py 2025-01-16 02:26:08.633595491 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.format.resource_map''' + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/tools/grit/grit/gather/admin_template.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/admin_template.py 2025-01-16 02:26:08.633595491 +0800 +@@ -5,7 +5,7 @@ + '''Gatherer for administrative template files. + ''' + +-from __future__ import print_function ++ + + import re + +--- a/src/3rdparty/chromium/tools/grit/grit/gather/admin_template_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/admin_template_unittest.py 2025-01-16 02:26:08.633595491 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for the admin template gatherer.''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -33,12 +33,12 @@ + 'gotcha = "bingolabongola "the wise" fingulafongula" \n') + gatherer = admin_template.AdmGatherer(pseudofile) + gatherer.Parse() +- self.failUnless(len(gatherer.GetCliques()) == 2) +- self.failUnless(gatherer.GetCliques()[1].GetMessage().GetRealContent() == ++ self.assertTrue(len(gatherer.GetCliques()) == 2) ++ self.assertTrue(gatherer.GetCliques()[1].GetMessage().GetRealContent() == + 'bingolabongola "the wise" fingulafongula') + + translation = gatherer.Translate('en') +- self.failUnless(translation == gatherer.GetText().strip()) ++ self.assertTrue(translation == gatherer.GetText().strip()) + + def testErrorHandling(self): + pseudofile = StringIO( +@@ -60,10 +60,10 @@ + ) + + def VerifyCliquesFromAdmFile(self, cliques): +- self.failUnless(len(cliques) > 20) ++ self.assertTrue(len(cliques) > 20) + for clique, expected in zip(cliques, self._TRANSLATABLES_FROM_FILE): + text = clique.GetMessage().GetRealContent() +- self.failUnless(text == expected) ++ self.assertTrue(text == expected) + + def testFromFile(self): + fname = util.PathFromRoot('grit/testdata/GoogleDesktop.adm') +@@ -106,8 +106,8 @@ + tool.res = grd + tool.Process() + +- self.failUnless(os.path.isfile(dirname.GetPath('de_GoogleDesktop.adm'))) +- self.failUnless(os.path.isfile(dirname.GetPath('de_README.txt'))) ++ self.assertTrue(os.path.isfile(dirname.GetPath('de_GoogleDesktop.adm'))) ++ self.assertTrue(os.path.isfile(dirname.GetPath('de_README.txt'))) + finally: + dirname.CleanUp() + +--- a/src/3rdparty/chromium/tools/grit/grit/gather/chrome_html.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/chrome_html.py 2025-01-16 02:26:08.634678806 +0800 +@@ -14,7 +14,7 @@ + referencing all available images. + """ + +-from __future__ import print_function ++ + + import os + import re +--- a/src/3rdparty/chromium/tools/grit/grit/gather/chrome_html_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/chrome_html_unittest.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.gather.chrome_html''' + +-from __future__ import print_function ++ + + import os + import re +@@ -65,7 +65,7 @@ + html.SetDefines({'scale_factors': '1.4x,1.8x'}) + html.SetAttributes({'flattenhtml': 'true'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + + +@@ -106,7 +106,7 @@ + html.SetDefines({'scale_factors': '2x'}) + html.SetAttributes({'flattenhtml': 'true'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + + +@@ -138,7 +138,7 @@ + html.SetDefines({'scale_factors': '1.4x,1.8x'}) + html.SetAttributes({'flattenhtml': 'false'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + .image { + background: -webkit-image-set(url('test.png') 1x, url('1.4x/test.png') 1.4x, url('1.8x/test.png') 1.8x); +@@ -167,7 +167,7 @@ + html.SetDefines({'scale_factors': '1.4x,1.8x'}) + html.SetAttributes({'flattenhtml': 'false'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + .image { + background: -webkit-image-set(url('sub/test.png') 1x, url('sub/1.4x/test.png') 1.4x, url('sub/1.8x/test.png') 1.8x); +@@ -197,7 +197,7 @@ + html.SetDefines({'scale_factors': '1.4x,1.8x'}) + html.SetAttributes({'flattenhtml': 'false', 'preprocess': 'true'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + .image { + background: -webkit-image-set(url('test.png') 1x, url('1.4x/test.png') 1.4x, url('1.8x/test.png') 1.8x); +@@ -224,7 +224,7 @@ + html.SetDefines({'scale_factors': '2x'}) + html.SetAttributes({'flattenhtml': 'true'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + .image { + background: -webkit-image-set(url("data:image/png;base64,UE5HIERBVEE=") 1x, url("data:image/png;base64,MnggUE5HIERBVEE=") 2x); +@@ -251,7 +251,7 @@ + html.SetDefines({'scale_factors': '2x'}) + html.SetAttributes({'flattenhtml': 'true'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + .image { + background: -webkit-image-set(url(data:image/png;base64,UE5HIERBVEE=) 1x, url(data:image/png;base64,MnggUE5HIERBVEE=) 2x); +@@ -278,7 +278,7 @@ + html.SetDefines({'scale_factors': '2x'}) + html.SetAttributes({'flattenhtml': 'true'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + .image { + background: -webkit-image-set(url('data:image/png;base64,UE5HIERBVEE=') 1x, url('data:image/png;base64,MnggUE5HIERBVEE=') 2x); +@@ -315,7 +315,7 @@ + html.SetDefines({'scale_factors': '2x'}) + html.SetAttributes({'flattenhtml': 'true'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + + +@@ -352,7 +352,7 @@ + html.SetDefines({'scale_factors': '2x'}) + html.SetAttributes({'flattenhtml': 'true'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + .image { + background: -webkit-image-set(url(data:image/png;base64,UE5HIERBVEE=) 1x, url(data:image/png;base64,MnggUE5HIERBVEE=) 2x), -webkit-image-set(url(data:image/png;base64,UE5HIERBVEE=) 1x, url(data:image/png;base64,MnggUE5HIERBVEE=) 2x); +@@ -380,7 +380,7 @@ + html.SetDefines({'scale_factors': '2x'}) + html.SetAttributes({'flattenhtml': 'true'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + .image { + background: -webkit-image-set(url(data:image/png;base64,UE5HIERBVEE=) 1x, url(data:image/png;base64,MnggUE5HIERBVEE=) 2x), +@@ -411,7 +411,7 @@ + html.SetDefines({'scale_factors': '2x'}) + html.SetAttributes({'flattenhtml': 'true'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + .image { + background: -webkit-image-set(url(data:image/png;base64,UE5HIERBVEE=) 1x, url(data:image/png;base64,MnggUE5HIERBVEE=) 2x), +@@ -440,7 +440,7 @@ + html.SetDefines({'scale_factors': '2x'}) + html.SetAttributes({'flattenhtml': 'true'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + .image { + background: -webkit-image-set(url(data:image/png;base64,UE5HIERBVEE=) 1x, url(data:image/png;base64,MnggUE5HIERBVEE=) 2x); +@@ -476,7 +476,7 @@ + html.SetDefines({'scale_factors': '2x'}) + html.SetAttributes({'flattenhtml': 'true'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + + +@@ -530,7 +530,7 @@ + html.SetDefines({'scale_factors': '1.8x'}) + html.SetAttributes({'flattenhtml': 'true'}) + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + + +@@ -587,7 +587,7 @@ + html.SetAttributes({'flattenhtml': 'true'}) + html.SetFilenameExpansionFunction(replacer('WHICH', '1')); + html.Parse() +- self.failUnlessEqual(StandardizeHtml(html.GetData('en', 'utf-8')), ++ self.assertEqual(StandardizeHtml(html.GetData('en', 'utf-8')), + StandardizeHtml(''' + + +--- a/src/3rdparty/chromium/tools/grit/grit/gather/chrome_scaled_image.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/chrome_scaled_image.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + '''Gatherer for . + ''' + +-from __future__ import print_function ++ + + import os + import struct +--- a/src/3rdparty/chromium/tools/grit/grit/gather/chrome_scaled_image_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/chrome_scaled_image_unittest.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for ChromeScaledImage.''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -63,7 +63,7 @@ + '''Get a set of the files that were actually included in the .rc output. + ''' + data = util.ReadFile(rcname, util.BINARY).decode('utf-16') +- contents = dict((tmp_dir.GetPath(k), v) for k, v in contents.items()) ++ contents = dict((tmp_dir.GetPath(k), v) for k, v in list(contents.items())) + return set(contents[os.path.normpath(m.group(1))] + for m in re.finditer(r'(?m)^\w+\s+BINDATA\s+"([^"]+)"$', data)) + +@@ -108,7 +108,7 @@ + + ''' % (outputs, structures)).encode('utf-8'), + } +- for pngpath, pngdata in inputs.items(): ++ for pngpath, pngdata in list(inputs.items()): + normpath = os.path.normpath('in/' + pngpath) + infiles[normpath] = pngdata + class Options(object): +@@ -121,11 +121,11 @@ + options.verbose = False + options.extra_verbose = False + build.RcBuilder().Run(options, []) +- for context, expected_data in expected_outputs.items(): +- self.assertEquals(expected_data, ++ for context, expected_data in list(expected_outputs.items()): ++ self.assertEqual(expected_data, + _GetFilesInPak(tmp_dir.GetPath('out/%s.pak' % context))) + if not skip_rc: +- self.assertEquals(expected_data, ++ self.assertEqual(expected_data, + _GetFilesInRc(tmp_dir.GetPath('out/%s.rc' % context), + tmp_dir, infiles)) + +--- a/src/3rdparty/chromium/tools/grit/grit/gather/interface.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/interface.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + '''Interface for all gatherers. + ''' + +-from __future__ import print_function ++ + + import os.path + +--- a/src/3rdparty/chromium/tools/grit/grit/gather/json_loader.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/json_loader.py 2025-01-16 02:26:08.634678806 +0800 +@@ -2,7 +2,7 @@ + # Use of this source code is governed by a BSD-style license that can be + # found in the LICENSE file. + +-from __future__ import print_function ++ + + from grit.gather import interface + +--- a/src/3rdparty/chromium/tools/grit/grit/gather/policy_json.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/policy_json.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + '''Support for "policy_templates.json" format used by the policy template + generator as a source for generating ADM,ADMX,etc files.''' + +-from __future__ import print_function ++ + + import json + import sys +@@ -264,7 +264,7 @@ + def _AddMessages(self): + '''Processed and adds the 'messages' section to the output.''' + self._AddNontranslateableChunk(" \"messages\": {\n") +- messages = self.data['messages'].items() ++ messages = list(self.data['messages'].items()) + for count, (name, message) in enumerate(messages, 1): + self._AddNontranslateableChunk(" %s: {\n" % json.dumps(name)) + self._AddNontranslateableChunk(" \"text\": \"") +--- a/src/3rdparty/chromium/tools/grit/grit/gather/policy_json_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/policy_json_unittest.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.gather.policy_json''' + +-from __future__ import print_function ++ + + import json + import os +@@ -24,7 +24,7 @@ + + def GetExpectedOutput(self, original): + expected = eval(original) +- for key, message in expected['messages'].items(): ++ for key, message in list(expected['messages'].items()): + del message['desc'] + return expected + +@@ -36,8 +36,8 @@ + }""" + gatherer = policy_json.PolicyJson(StringIO(original)) + gatherer.Parse() +- self.failUnless(len(gatherer.GetCliques()) == 0) +- self.failUnless(eval(original) == json.loads(gatherer.Translate('en'))) ++ self.assertTrue(len(gatherer.GetCliques()) == 0) ++ self.assertTrue(eval(original) == json.loads(gatherer.Translate('en'))) + + def testGeneralPolicy(self): + original = ( +@@ -65,9 +65,9 @@ + "}") + gatherer = policy_json.PolicyJson(StringIO(original)) + gatherer.Parse() +- self.failUnless(len(gatherer.GetCliques()) == 4) ++ self.assertTrue(len(gatherer.GetCliques()) == 4) + expected = self.GetExpectedOutput(original) +- self.failUnless(expected == json.loads(gatherer.Translate('en'))) ++ self.assertTrue(expected == json.loads(gatherer.Translate('en'))) + + def testEnum(self): + original = ( +@@ -89,9 +89,9 @@ + "}") + gatherer = policy_json.PolicyJson(StringIO(original)) + gatherer.Parse() +- self.failUnless(len(gatherer.GetCliques()) == 1) ++ self.assertTrue(len(gatherer.GetCliques()) == 1) + expected = self.GetExpectedOutput(original) +- self.failUnless(expected == json.loads(gatherer.Translate('en'))) ++ self.assertTrue(expected == json.loads(gatherer.Translate('en'))) + + def testSchema(self): + original = ("{" +@@ -126,9 +126,9 @@ + "}") + gatherer = policy_json.PolicyJson(StringIO(original)) + gatherer.Parse() +- self.failUnless(len(gatherer.GetCliques()) == 4) ++ self.assertTrue(len(gatherer.GetCliques()) == 4) + expected = self.GetExpectedOutput(original) +- self.failUnless(expected == json.loads(gatherer.Translate('en'))) ++ self.assertTrue(expected == json.loads(gatherer.Translate('en'))) + + def testValidationSchema(self): + original = ("{" +@@ -150,9 +150,9 @@ + "}") + gatherer = policy_json.PolicyJson(StringIO(original)) + gatherer.Parse() +- self.failUnless(len(gatherer.GetCliques()) == 1) ++ self.assertTrue(len(gatherer.GetCliques()) == 1) + expected = self.GetExpectedOutput(original) +- self.failUnless(expected == json.loads(gatherer.Translate('en'))) ++ self.assertTrue(expected == json.loads(gatherer.Translate('en'))) + + def testDescriptionSchema(self): + original = ("{" +@@ -174,9 +174,9 @@ + "}") + gatherer = policy_json.PolicyJson(StringIO(original)) + gatherer.Parse() +- self.failUnless(len(gatherer.GetCliques()) == 1) ++ self.assertTrue(len(gatherer.GetCliques()) == 1) + expected = self.GetExpectedOutput(original) +- self.failUnless(expected == json.loads(gatherer.Translate('en'))) ++ self.assertTrue(expected == json.loads(gatherer.Translate('en'))) + + # Keeping for backwards compatibility. + def testSubPolicyOldFormat(self): +@@ -199,9 +199,9 @@ + "}") + gatherer = policy_json.PolicyJson(StringIO(original)) + gatherer.Parse() +- self.failUnless(len(gatherer.GetCliques()) == 1) ++ self.assertTrue(len(gatherer.GetCliques()) == 1) + expected = self.GetExpectedOutput(original) +- self.failUnless(expected == json.loads(gatherer.Translate('en'))) ++ self.assertTrue(expected == json.loads(gatherer.Translate('en'))) + + def testSubPolicyNewFormat(self): + original = ( +@@ -222,9 +222,9 @@ + "}") + gatherer = policy_json.PolicyJson(StringIO(original)) + gatherer.Parse() +- self.failUnless(len(gatherer.GetCliques()) == 1) ++ self.assertTrue(len(gatherer.GetCliques()) == 1) + expected = self.GetExpectedOutput(original) +- self.failUnless(expected == json.loads(gatherer.Translate('en'))) ++ self.assertTrue(expected == json.loads(gatherer.Translate('en'))) + + def testEscapingAndLineBreaks(self): + original = """{ +@@ -266,9 +266,9 @@ + }""" + gatherer = policy_json.PolicyJson(StringIO(original)) + gatherer.Parse() +- self.failUnless(len(gatherer.GetCliques()) == 6) ++ self.assertTrue(len(gatherer.GetCliques()) == 6) + expected = self.GetExpectedOutput(original) +- self.failUnless(expected == json.loads(gatherer.Translate('en'))) ++ self.assertTrue(expected == json.loads(gatherer.Translate('en'))) + + def testPlaceholdersChromium(self): + original = """{ +@@ -285,16 +285,16 @@ + gatherer = policy_json.PolicyJson(StringIO(original)) + gatherer.SetDefines({'_chromium': True}) + gatherer.Parse() +- self.failUnless(len(gatherer.GetCliques()) == 1) ++ self.assertTrue(len(gatherer.GetCliques()) == 1) + expected = json.loads(re.sub('', 'Chromium', original)) +- self.failUnless(expected == json.loads(gatherer.Translate('en'))) +- self.failUnless(gatherer.GetCliques()[0].translateable) ++ self.assertTrue(expected == json.loads(gatherer.Translate('en'))) ++ self.assertTrue(gatherer.GetCliques()[0].translateable) + msg = gatherer.GetCliques()[0].GetMessage() +- self.failUnless(len(msg.GetPlaceholders()) == 1) ++ self.assertTrue(len(msg.GetPlaceholders()) == 1) + ph = msg.GetPlaceholders()[0] +- self.failUnless(ph.GetOriginal() == 'Chromium') +- self.failUnless(ph.GetPresentation() == 'PRODUCT_NAME') +- self.failUnless(ph.GetExample() == 'Google Chrome') ++ self.assertTrue(ph.GetOriginal() == 'Chromium') ++ self.assertTrue(ph.GetPresentation() == 'PRODUCT_NAME') ++ self.assertTrue(ph.GetExample() == 'Google Chrome') + + def testPlaceholdersChrome(self): + original = """{ +@@ -311,33 +311,33 @@ + gatherer = policy_json.PolicyJson(StringIO(original)) + gatherer.SetDefines({'_google_chrome': True}) + gatherer.Parse() +- self.failUnless(len(gatherer.GetCliques()) == 1) ++ self.assertTrue(len(gatherer.GetCliques()) == 1) + expected = json.loads(re.sub('', 'Google Chrome', original)) +- self.failUnless(expected == json.loads(gatherer.Translate('en'))) +- self.failUnless(gatherer.GetCliques()[0].translateable) ++ self.assertTrue(expected == json.loads(gatherer.Translate('en'))) ++ self.assertTrue(gatherer.GetCliques()[0].translateable) + msg = gatherer.GetCliques()[0].GetMessage() +- self.failUnless(len(msg.GetPlaceholders()) == 1) ++ self.assertTrue(len(msg.GetPlaceholders()) == 1) + ph = msg.GetPlaceholders()[0] +- self.failUnless(ph.GetOriginal() == 'Google Chrome') +- self.failUnless(ph.GetPresentation() == 'PRODUCT_NAME') +- self.failUnless(ph.GetExample() == 'Google Chrome') ++ self.assertTrue(ph.GetOriginal() == 'Google Chrome') ++ self.assertTrue(ph.GetPresentation() == 'PRODUCT_NAME') ++ self.assertTrue(ph.GetExample() == 'Google Chrome') + + def testGetDescription(self): + gatherer = policy_json.PolicyJson({}) + gatherer.SetDefines({'_google_chrome': True}) +- self.assertEquals( ++ self.assertEqual( + gatherer._GetDescription({'name': 'Policy1', 'owners': ['a@b']}, + 'policy', None, 'desc'), + 'Description of the policy named Policy1 [owner(s): a@b]') +- self.assertEquals( ++ self.assertEqual( + gatherer._GetDescription({'name': 'Plcy2', 'owners': ['a@b', 'c@d']}, + 'policy', None, 'caption'), + 'Caption of the policy named Plcy2 [owner(s): a@b,c@d]') +- self.assertEquals( ++ self.assertEqual( + gatherer._GetDescription({'name': 'Plcy3', 'owners': ['a@b']}, + 'policy', None, 'label'), + 'Label of the policy named Plcy3 [owner(s): a@b]') +- self.assertEquals( ++ self.assertEqual( + gatherer._GetDescription({'name': 'Item'}, 'enum_item', + {'name': 'Plcy', 'owners': ['a@b']}, 'caption'), + 'Caption of the option named Item in policy Plcy [owner(s): a@b]') +--- a/src/3rdparty/chromium/tools/grit/grit/gather/rc.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/rc.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + '''Support for gathering resources from RC files. + ''' + +-from __future__ import print_function ++ + + import re + +@@ -36,7 +36,7 @@ + } + + # How to unescape certain strings +-_UNESCAPE_CHARS = dict([[value, key] for key, value in _ESCAPE_CHARS.items()]) ++_UNESCAPE_CHARS = dict([[value, key] for key, value in list(_ESCAPE_CHARS.items())]) + + + +--- a/src/3rdparty/chromium/tools/grit/grit/gather/rc_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/rc_unittest.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.gather.rc''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -46,7 +46,7 @@ + + out = rc.Section(f, 'IDC_KLONKACC') + out.ReadSection() +- self.failUnless(out.GetText() == self.part_we_want) ++ self.assertTrue(out.GetText() == self.part_we_want) + + out = rc.Section(util.PathFromRoot(r'grit/testdata/klonk.rc'), + 'IDC_KLONKACC', +@@ -55,7 +55,7 @@ + out_text = out.GetText().replace('\t', '') + out_text = out_text.replace(' ', '') + self.part_we_want = self.part_we_want.replace(' ', '') +- self.failUnless(out_text.strip() == self.part_we_want.strip()) ++ self.assertTrue(out_text.strip() == self.part_we_want.strip()) + + + def testDialog(self): +@@ -77,13 +77,13 @@ + END + '''), 'IDD_ABOUTBOX') + dlg.Parse() +- self.failUnless(len(dlg.GetTextualIds()) == 7) +- self.failUnless(len(dlg.GetCliques()) == 6) +- self.failUnless(dlg.GetCliques()[1].GetMessage().GetRealContent() == ++ self.assertTrue(len(dlg.GetTextualIds()) == 7) ++ self.assertTrue(len(dlg.GetCliques()) == 6) ++ self.assertTrue(dlg.GetCliques()[1].GetMessage().GetRealContent() == + 'klonk Version "yibbee" 1.0') + + transl = dlg.Translate('en') +- self.failUnless(transl.strip() == dlg.GetText().strip()) ++ self.assertTrue(transl.strip() == dlg.GetText().strip()) + + def testAlternateSkeleton(self): + dlg = rc.Dialog(StringIO('''IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75 +@@ -109,9 +109,9 @@ + alt_dlg.Parse() + + transl = dlg.Translate('en', skeleton_gatherer=alt_dlg) +- self.failUnless(transl.count('040704') and ++ self.assertTrue(transl.count('040704') and + transl.count('110978')) +- self.failUnless(transl.count('Yipee skippy')) ++ self.assertTrue(transl.count('Yipee skippy')) + + def testMenu(self): + menu = rc.Menu(StringIO('''IDC_KLONK MENU +@@ -134,13 +134,13 @@ + END'''), 'IDC_KLONK') + + menu.Parse() +- self.failUnless(len(menu.GetTextualIds()) == 6) +- self.failUnless(len(menu.GetCliques()) == 1) +- self.failUnless(len(menu.GetCliques()[0].GetMessage().GetPlaceholders()) == ++ self.assertTrue(len(menu.GetTextualIds()) == 6) ++ self.assertTrue(len(menu.GetCliques()) == 1) ++ self.assertTrue(len(menu.GetCliques()[0].GetMessage().GetPlaceholders()) == + 9) + + transl = menu.Translate('en') +- self.failUnless(transl.strip() == menu.GetText().strip()) ++ self.assertTrue(transl.strip() == menu.GetText().strip()) + + def testVersion(self): + version = rc.Version(StringIO(''' +@@ -178,11 +178,11 @@ + END + '''.strip()), 'VS_VERSION_INFO') + version.Parse() +- self.failUnless(len(version.GetTextualIds()) == 1) +- self.failUnless(len(version.GetCliques()) == 4) ++ self.assertTrue(len(version.GetTextualIds()) == 1) ++ self.assertTrue(len(version.GetCliques()) == 4) + + transl = version.Translate('en') +- self.failUnless(transl.strip() == version.GetText().strip()) ++ self.assertTrue(transl.strip() == version.GetText().strip()) + + + def testRegressionDialogBox(self): +@@ -206,7 +206,7 @@ + BS_AUTORADIOBUTTON,57,144,38,10 + END'''.strip()), 'IDD_SIDEBAR_WEATHER_PANEL_PROPPAGE') + dialog.Parse() +- self.failUnless(len(dialog.GetTextualIds()) == 10) ++ self.assertTrue(len(dialog.GetTextualIds()) == 10) + + + def testRegressionDialogBox2(self): +@@ -226,7 +226,7 @@ + IDC_STATIC,16,18,234,18 + END'''.strip()), 'IDD_SIDEBAR_EMAIL_PANEL_PROPPAGE') + dialog.Parse() +- self.failUnless('IDC_SIDEBAR_EMAIL_HIDDEN' in dialog.GetTextualIds()) ++ self.assertTrue('IDC_SIDEBAR_EMAIL_HIDDEN' in dialog.GetTextualIds()) + + + def testRegressionMenuId(self): +@@ -239,7 +239,7 @@ + END + END'''.strip()), 'IDR_HYPERMENU_FOLDER') + menu.Parse() +- self.failUnless(len(menu.GetTextualIds()) == 2) ++ self.assertTrue(len(menu.GetTextualIds()) == 2) + + def testRegressionNewlines(self): + menu = rc.Menu(StringIO(''' +@@ -253,7 +253,7 @@ + menu.Parse() + transl = menu.Translate('en') + # Shouldn't find \\n (the \n shouldn't be changed to \\n) +- self.failUnless(transl.find('\\\\n') == -1) ++ self.assertTrue(transl.find('\\\\n') == -1) + + def testRegressionTabs(self): + menu = rc.Menu(StringIO(''' +@@ -267,19 +267,19 @@ + menu.Parse() + transl = menu.Translate('en') + # Shouldn't find \\t (the \t shouldn't be changed to \\t) +- self.failUnless(transl.find('\\\\t') == -1) ++ self.assertTrue(transl.find('\\\\t') == -1) + + def testEscapeUnescape(self): + original = 'Hello "bingo"\n How\\are\\you\\n?' + escaped = rc.Section.Escape(original) +- self.failUnless(escaped == 'Hello ""bingo""\\n How\\\\are\\\\you\\\\n?') ++ self.assertTrue(escaped == 'Hello ""bingo""\\n How\\\\are\\\\you\\\\n?') + unescaped = rc.Section.UnEscape(escaped) +- self.failUnless(unescaped == original) ++ self.assertTrue(unescaped == original) + + def testRegressionPathsWithSlashN(self): + original = '..\\\\..\\\\trs\\\\res\\\\nav_first.gif' + unescaped = rc.Section.UnEscape(original) +- self.failUnless(unescaped == '..\\..\\trs\\res\\nav_first.gif') ++ self.assertTrue(unescaped == '..\\..\\trs\\res\\nav_first.gif') + + def testRegressionDialogItemsTextOnly(self): + dialog = rc.Dialog(StringIO('''IDD_OPTIONS_SEARCH DIALOGEX 0, 0, 280, 292 +@@ -299,8 +299,8 @@ + dialog.Parse() + translateables = [c.GetMessage().GetRealContent() + for c in dialog.GetCliques()] +- self.failUnless('Select search buttons and options' in translateables) +- self.failUnless('Use Google site:' in translateables) ++ self.assertTrue('Select search buttons and options' in translateables) ++ self.assertTrue('Use Google site:' in translateables) + + def testAccelerators(self): + acc = rc.Accelerators(StringIO('''\ +@@ -312,11 +312,11 @@ + END + '''), 'IDR_ACCELERATOR1') + acc.Parse() +- self.failUnless(len(acc.GetTextualIds()) == 4) +- self.failUnless(len(acc.GetCliques()) == 0) ++ self.assertTrue(len(acc.GetTextualIds()) == 4) ++ self.assertTrue(len(acc.GetCliques()) == 0) + + transl = acc.Translate('en') +- self.failUnless(transl.strip() == acc.GetText().strip()) ++ self.assertTrue(transl.strip() == acc.GetText().strip()) + + + def testRegressionEmptyString(self): +@@ -339,8 +339,8 @@ + dlg.Parse() + + def Check(): +- self.failUnless(transl.count('IDC_ENABLE_GD_AUTOSTART')) +- self.failUnless(transl.count('END')) ++ self.assertTrue(transl.count('IDC_ENABLE_GD_AUTOSTART')) ++ self.assertTrue(transl.count('END')) + + transl = dlg.Translate('de', pseudo_if_not_available=True, + fallback_to_english=True) +--- a/src/3rdparty/chromium/tools/grit/grit/gather/regexp.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/regexp.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + '''A baseclass for simple gatherers based on regular expressions. + ''' + +-from __future__ import print_function ++ + + from grit.gather import skeleton_gatherer + +--- a/src/3rdparty/chromium/tools/grit/grit/gather/skeleton_gatherer.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/skeleton_gatherer.py 2025-01-16 02:26:08.634678806 +0800 +@@ -6,7 +6,7 @@ + list. + ''' + +-from __future__ import print_function ++ + + import six + +--- a/src/3rdparty/chromium/tools/grit/grit/gather/tr_html.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/tr_html.py 2025-01-16 02:26:08.634678806 +0800 +@@ -49,7 +49,7 @@ + extern.tclib.api.handlers.html.TCHTMLParser. + ''' + +-from __future__ import print_function ++ + + import re + +@@ -680,7 +680,7 @@ + text = self._LoadInputFile() + + # Ignore the BOM character if the document starts with one. +- if text.startswith(u'\ufeff'): ++ if text.startswith('\ufeff'): + text = text[1:] + + self.text_ = text +--- a/src/3rdparty/chromium/tools/grit/grit/gather/tr_html_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/tr_html_unittest.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.gather.tr_html''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -50,19 +50,19 @@ + p = tr_html.HtmlChunks() + chunks = p.Parse('

Hello dear how areyou?

Fine!', + fold_whitespace) +- self.failUnlessEqual(chunks, [ ++ self.assertEqual(chunks, [ + (False, '

', ''), (True, 'Hello dear how areyou?', ''), + (False, '

', ''), (True, 'Fine!', '')]) + + chunks = p.Parse('

Hello dear how areyou?

Fine!', + fold_whitespace) +- self.failUnlessEqual(chunks, [ ++ self.assertEqual(chunks, [ + (False, '

', ''), (True, 'Hello dear how areyou?', ''), + (False, '

', ''), (True, 'Fine!', '')]) + + chunks = p.Parse('

Hello dear how are you?

Fine!', + fold_whitespace) +- self.failUnlessEqual(chunks, [ ++ self.assertEqual(chunks, [ + (False, '

', ''), (True, 'Hello dear how are you?', ''), + (False, '

', ''), (True, 'Fine!', '')]) + +@@ -70,7 +70,7 @@ + # the starting inline tag. + chunks = p.Parse('Hello! how are you?

I am fine.', + fold_whitespace) +- self.failUnlessEqual(chunks, [ ++ self.assertEqual(chunks, [ + (True, 'Hello! how are you?', ''), (False, '

', ''), + (True, 'I am fine.', '')]) + +@@ -78,7 +78,7 @@ + # the ending inline tag. + chunks = p.Parse("Hello! How are you?

I'm fine!", + fold_whitespace) +- self.failUnlessEqual(chunks, [ ++ self.assertEqual(chunks, [ + (True, 'Hello! How are you?', ''), (False, '

', ''), + (True, "I'm fine!", '')]) + +@@ -87,18 +87,18 @@ + # Check capitals and explicit descriptions + chunks = p.Parse('Hello! how are you?

' + 'I am fine.', fold_whitespace) +- self.failUnlessEqual(chunks, [ ++ self.assertEqual(chunks, [ + (True, 'Hello! how are you?', 'bingo!'), (False, '

', ''), + (True, 'I am fine.', '')]) + chunks = p.Parse('Hello! how are you?

' + 'I am fine.', fold_whitespace) +- self.failUnlessEqual(chunks, [ ++ self.assertEqual(chunks, [ + (True, 'Hello! how are you?', 'bingo!'), (False, '

', ''), + (True, 'I am fine.', '')]) + # Linebreaks get handled by the tclib message. + chunks = p.Parse('Hello! how are you?

' + 'I am fine.', fold_whitespace) +- self.failUnlessEqual(chunks, [ ++ self.assertEqual(chunks, [ + (True, 'Hello! how are you?', 'bi\nngo\n!'), (False, '

', ''), + (True, 'I am fine.', '')]) + +@@ -106,7 +106,7 @@ + # translateable, it will actually apply to the second translateable. + chunks = p.Parse('Hello! how are you?

' + 'I am fine.', fold_whitespace) +- self.failUnlessEqual(chunks, [ ++ self.assertEqual(chunks, [ + (True, 'Hello! how are you?', ''), (False, '

', ''), + (True, 'I am fine.', 'bingo!')]) + +@@ -116,7 +116,7 @@ + p = tr_html.HtmlChunks() + chunks = p.Parse('Hello! how are you?

' + 'I am fine.', fold_whitespace) +- self.failUnlessEqual(chunks, [ ++ self.assertEqual(chunks, [ + (True, 'Hello! how are you?', ''), + (False, '

', ''), + (True, 'I am fine.', '')]) +@@ -126,12 +126,12 @@ + p = tr_html.HtmlChunks() + chunks = p.Parse('', + fold_whitespace) +- self.failUnlessEqual(chunks, [(False, '', '')]) + + # ...and that other tags' line breaks are converted to spaces + chunks = p.Parse('

Hello\nthere\nhow\nare\nyou?

', fold_whitespace) +- self.failUnlessEqual(chunks, [(False, '

', ''), ++ self.assertEqual(chunks, [(False, '

', ''), + (True, 'Hello there how are you?', ''), (False, '

', '')]) + + def VerifyChunkingMessageBreak(self, fold_whitespace): +@@ -139,7 +139,7 @@ + # Make sure that message-break comments work properly. + chunks = p.Parse('Break apart ' + 'messages', fold_whitespace) +- self.failUnlessEqual(chunks, [(True, 'Break', ''), ++ self.assertEqual(chunks, [(True, 'Break', ''), + (False, ' ', ''), + (True, 'apart', ''), + (False, ' ', ''), +@@ -148,7 +148,7 @@ + # Make sure message-break comments work in an inline tag. + chunks = p.Parse('
Google' + '', fold_whitespace) +- self.failUnlessEqual(chunks, [(False, '', ''), ++ self.assertEqual(chunks, [(False, '', ''), + (True, 'Google', ''), + (False, '', '')]) + +@@ -157,12 +157,12 @@ + # Make sure that message-no-break comments work properly. + chunks = p.Parse('Please
don\'t break', + fold_whitespace) +- self.failUnlessEqual(chunks, [(True, 'Please ' ++ self.assertEqual(chunks, [(True, 'Please ' + '
don\'t break', '')]) + + chunks = p.Parse('Please
break.
' + 'But not this time.', fold_whitespace) +- self.failUnlessEqual(chunks, [(True, 'Please', ''), ++ self.assertEqual(chunks, [(True, 'Please', ''), + (False, '
', ''), + (True, 'break. ' + '
But not this time.', '')]) +@@ -176,7 +176,7 @@ + '' + '' + '', False) +- self.failUnlessEqual(chunks, [ ++ self.assertEqual(chunks, [ + (False, '', ''), (True, 'hello there', ''),
+       (False, 'Go!')) + html.Parse() + msg = html.GetCliques()[1].GetMessage() +- self.failUnlessEqual(msg.GetDescription(), 'explicit') +- self.failUnlessEqual(msg.GetRealContent(), 'Go!') ++ self.assertEqual(msg.GetDescription(), 'explicit') ++ self.assertEqual(msg.GetRealContent(), 'Go!') + + html = tr_html.TrHtml( + StringIO('Hello [USER]
' + 'Go!')) + html.Parse() + msg = html.GetCliques()[1].GetMessage() +- self.failUnlessEqual(msg.GetDescription(), 'explicit multiline') +- self.failUnlessEqual(msg.GetRealContent(), 'Go!') ++ self.assertEqual(msg.GetDescription(), 'explicit multiline') ++ self.assertEqual(msg.GetRealContent(), 'Go!') + + + def testRegressionInToolbarAbout(self): +@@ -359,7 +359,7 @@ + for cl in cliques: + content = cl.GetMessage().GetRealContent() + if content.count('De parvis grandis acervus erit'): +- self.failIf(content.count('$/translate')) ++ self.assertFalse(content.count('$/translate')) + + + def HtmlFromFileWithManualCheck(self, f): +@@ -381,10 +381,10 @@ + html = self.HtmlFromFileWithManualCheck( + util.PathFromRoot(r'grit/testdata/privacy.html')) + +- self.failUnless(html.skeleton_[1].GetMessage().GetRealContent() == ++ self.assertTrue(html.skeleton_[1].GetMessage().GetRealContent() == + 'Privacy and Google Desktop Search') +- self.failUnless(html.skeleton_[3].startswith('<')) +- self.failUnless(len(html.skeleton_) > 10) ++ self.assertTrue(html.skeleton_[3].startswith('<')) ++ self.assertTrue(len(html.skeleton_) > 10) + + + def testPreferencesHtml(self): +@@ -401,7 +401,7 @@ + item.GetMessage().GetRealContent() == '[ADDIN-DO] [ADDIN-OPTIONS]'): + self.fail() + +- self.failUnless(len(html.skeleton_) > 100) ++ self.assertTrue(len(html.skeleton_) > 100) + + def AssertNumberOfTranslateables(self, files, num): + '''Fails if any of the files in files don't have exactly +@@ -414,7 +414,7 @@ + for f in files: + f = util.PathFromRoot(r'grit/testdata/%s' % f) + html = self.HtmlFromFileWithManualCheck(f) +- self.failUnless(len(html.GetCliques()) == num) ++ self.assertTrue(len(html.GetCliques()) == num) + + def testFewTranslateables(self): + self.AssertNumberOfTranslateables(['browser.html', 'email_thread.html', +@@ -481,29 +481,29 @@ + msg = tr_html.HtmlToMessage( + 'Hello

Howdiebingo', True) + result = msg.GetPresentableContent() +- self.failUnless( ++ self.assertTrue( + result == 'HelloBEGIN_PARAGRAPHHowdieBEGIN_BLOCKbingoEND_BLOCK') + + msg = tr_html.HtmlToMessage( + 'Hello

Howdie', True) + result = msg.GetPresentableContent() +- self.failUnless( ++ self.assertTrue( + result == 'HelloBEGIN_PARAGRAPHHowdieBEGIN_BLOCKbingoEND_BLOCK') + + + def testHtmlToMessageRegressions(self): + msg = tr_html.HtmlToMessage(' - ', True) + result = msg.GetPresentableContent() +- self.failUnless(result == ' - ') ++ self.assertTrue(result == ' - ') + + + def testEscapeUnescaped(self): + text = '©  & "<hello>"' + unescaped = util.UnescapeHtml(text) +- self.failUnless(unescaped == u'\u00a9\u00a0 & ""') ++ self.assertTrue(unescaped == '\u00a9\u00a0 & ""') + escaped_unescaped = util.EscapeHtml(unescaped, True) +- self.failUnless(escaped_unescaped == +- u'\u00a9\u00a0 & "<hello>"') ++ self.assertTrue(escaped_unescaped == ++ '\u00a9\u00a0 & "<hello>"') + + def testRegressionCjkHtmlFile(self): + # TODO(joi) Fix this problem where unquoted attributes that +@@ -512,7 +512,7 @@ + if False: + html = self.HtmlFromFileWithManualCheck(util.PathFromRoot( + r'grit/testdata/ko_oem_enable_bug.html')) +- self.failUnless(True) ++ self.assertTrue(True) + + def testRegressionCpuHang(self): + # If this regression occurs, the unit test will never return +--- a/src/3rdparty/chromium/tools/grit/grit/gather/txt.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/txt.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + '''Supports making amessage from a text file. + ''' + +-from __future__ import print_function ++ + + from grit.gather import interface + from grit import tclib +--- a/src/3rdparty/chromium/tools/grit/grit/gather/txt_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/gather/txt_unittest.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for TxtFile gatherer''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -25,9 +25,9 @@ + input = StringIO('Hello there\nHow are you?') + gatherer = txt.TxtFile(input) + gatherer.Parse() +- self.failUnless(gatherer.GetText() == input.getvalue()) +- self.failUnless(len(gatherer.GetCliques()) == 1) +- self.failUnless(gatherer.GetCliques()[0].GetMessage().GetRealContent() == ++ self.assertTrue(gatherer.GetText() == input.getvalue()) ++ self.assertTrue(len(gatherer.GetCliques()) == 1) ++ self.assertTrue(gatherer.GetCliques()[0].GetMessage().GetRealContent() == + input.getvalue()) + + +--- a/src/3rdparty/chromium/tools/grit/grit/node/base.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/base.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + '''Base types for nodes in a GRIT resource tree. + ''' + +-from __future__ import print_function ++ + + import ast + import os +@@ -49,7 +49,7 @@ + self.mixed_content = [] # A list of u'' and/or child elements (this + # duplicates 'children' but + # is needed to preserve markup-type content). +- self.name = u'' # The name of this element ++ self.name = '' # The name of this element + self.attrs = {} # The set of attributes (keys to values) + self.parent = None # Our parent unless we are the root element. + self.uberclique = None # Allows overriding uberclique for parts of tree +@@ -63,7 +63,7 @@ + + def __exit__(self, exc_type, exc_value, traceback): + if exc_type is not None: +- print(u'Error processing node %s: %s' % (six.text_type(self), exc_value)) ++ print('Error processing node %s: %s' % (six.text_type(self), exc_value)) + + def __iter__(self): + '''A preorder iteration through the tree that this node is the root of.''' +@@ -253,13 +253,13 @@ + def __str__(self): + '''Returns this node and all nodes below it as an XML document in a Unicode + string.''' +- header = u'\n' ++ header = '\n' + return header + self.FormatXml() + + # Some Python 2 glue. + __unicode__ = __str__ + +- def FormatXml(self, indent = u'', one_line = False): ++ def FormatXml(self, indent = '', one_line = False): + '''Returns this node and all nodes below it as an XML + element in a Unicode string. This differs from __unicode__ in that it does + not include the stuff at the top of the string. If one_line is true, +@@ -273,30 +273,30 @@ + inside_content = self.ContentsAsXml(indent, content_one_line) + + # Then the attributes for this node. +- attribs = u'' ++ attribs = '' + default_attribs = self.DefaultAttributes() + for attrib, value in sorted(self.attrs.items()): + # Only print an attribute if it is other than the default value. + if attrib not in default_attribs or value != default_attribs[attrib]: +- attribs += u' %s=%s' % (attrib, saxutils.quoteattr(value)) ++ attribs += ' %s=%s' % (attrib, saxutils.quoteattr(value)) + + # Finally build the XML for our node and return it + if len(inside_content) > 0: + if one_line: +- return u'<%s%s>%s' % (self.name, attribs, inside_content, ++ return '<%s%s>%s' % (self.name, attribs, inside_content, + self.name) + elif content_one_line: +- return u'%s<%s%s>\n%s %s\n%s' % ( ++ return '%s<%s%s>\n%s %s\n%s' % ( + indent, self.name, attribs, + indent, inside_content, + indent, self.name) + else: +- return u'%s<%s%s>\n%s\n%s' % ( ++ return '%s<%s%s>\n%s\n%s' % ( + indent, self.name, attribs, + inside_content, + indent, self.name) + else: +- return u'%s<%s%s />' % (indent, self.name, attribs) ++ return '%s<%s%s />' % (indent, self.name, attribs) + + def ContentsAsXml(self, indent, one_line): + '''Returns the contents of this node (CDATA and child elements) in XML +@@ -308,15 +308,15 @@ + last_item = None + for mixed_item in self.mixed_content: + if isinstance(mixed_item, Node): +- inside_parts.append(mixed_item.FormatXml(indent + u' ', one_line)) ++ inside_parts.append(mixed_item.FormatXml(indent + ' ', one_line)) + if not one_line: +- inside_parts.append(u'\n') ++ inside_parts.append('\n') + else: + message = mixed_item + # If this is the first item and it starts with whitespace, we add + # the ''' delimiter. + if not last_item and message.lstrip() != message: +- message = u"'''" + message ++ message = "'''" + message + inside_parts.append(util.EncodeCdata(message)) + last_item = mixed_item + +@@ -329,9 +329,9 @@ + # we need to add the ''' delimiter. + if (isinstance(last_item, six.string_types) and + last_item.rstrip() != last_item): +- inside_parts[-1] = inside_parts[-1] + u"'''" ++ inside_parts[-1] = inside_parts[-1] + "'''" + +- return u''.join(inside_parts) ++ return ''.join(inside_parts) + + def SubstituteMessages(self, substituter): + '''Applies substitutions to all messages in the tree. +--- a/src/3rdparty/chromium/tools/grit/grit/node/base_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/base_unittest.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for base.Node functionality (as used in various subclasses)''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -24,9 +24,9 @@ + + def MakePlaceholder(phname='BINGO'): + ph = message.PhNode() +- ph.StartParsing(u'ph', None) +- ph.HandleAttribute(u'name', phname) +- ph.AppendContent(u'bongo') ++ ph.StartParsing('ph', None) ++ ph.HandleAttribute('name', phname) ++ ph.AppendContent('bongo') + ph.EndParsing() + return ph + +@@ -35,49 +35,49 @@ + def testWhitespaceHandling(self): + # We test using the Message node type. + node = message.MessageNode() +- node.StartParsing(u'hello', None) +- node.HandleAttribute(u'name', u'bla') +- node.AppendContent(u" ''' two spaces ") ++ node.StartParsing('hello', None) ++ node.HandleAttribute('name', 'bla') ++ node.AppendContent(" ''' two spaces ") + node.EndParsing() +- self.failUnless(node.GetCdata() == u' two spaces') ++ self.assertTrue(node.GetCdata() == ' two spaces') + + node = message.MessageNode() +- node.StartParsing(u'message', None) +- node.HandleAttribute(u'name', u'bla') +- node.AppendContent(u" two spaces ''' ") ++ node.StartParsing('message', None) ++ node.HandleAttribute('name', 'bla') ++ node.AppendContent(" two spaces ''' ") + node.EndParsing() +- self.failUnless(node.GetCdata() == u'two spaces ') ++ self.assertTrue(node.GetCdata() == 'two spaces ') + + def testWhitespaceHandlingWithChildren(self): + # We test using the Message node type. + node = message.MessageNode() +- node.StartParsing(u'message', None) +- node.HandleAttribute(u'name', u'bla') +- node.AppendContent(u" ''' two spaces ") ++ node.StartParsing('message', None) ++ node.HandleAttribute('name', 'bla') ++ node.AppendContent(" ''' two spaces ") + node.AddChild(MakePlaceholder()) +- node.AppendContent(u' space before and after ') ++ node.AppendContent(' space before and after ') + node.AddChild(MakePlaceholder('BONGO')) +- node.AppendContent(u" space before two after '''") ++ node.AppendContent(" space before two after '''") + node.EndParsing() +- self.failUnless(node.mixed_content[0] == u' two spaces ') +- self.failUnless(node.mixed_content[2] == u' space before and after ') +- self.failUnless(node.mixed_content[-1] == u' space before two after ') ++ self.assertTrue(node.mixed_content[0] == ' two spaces ') ++ self.assertTrue(node.mixed_content[2] == ' space before and after ') ++ self.assertTrue(node.mixed_content[-1] == ' space before two after ') + + def testXmlFormatMixedContent(self): + # Again test using the Message node type, because it is the only mixed + # content node. + node = message.MessageNode() +- node.StartParsing(u'message', None) +- node.HandleAttribute(u'name', u'name') +- node.AppendContent(u'Hello ') ++ node.StartParsing('message', None) ++ node.HandleAttribute('name', 'name') ++ node.AppendContent('Hello ') + + ph = message.PhNode() +- ph.StartParsing(u'ph', None) +- ph.HandleAttribute(u'name', u'USERNAME') +- ph.AppendContent(u'$1') ++ ph.StartParsing('ph', None) ++ ph.HandleAttribute('name', 'USERNAME') ++ ph.AppendContent('$1') + ex = message.ExNode() +- ex.StartParsing(u'ex', None) +- ex.AppendContent(u'Joi') ++ ex.StartParsing('ex', None) ++ ex.AppendContent('Joi') + ex.EndParsing() + ph.AddChild(ex) + ph.EndParsing() +@@ -86,51 +86,51 @@ + node.EndParsing() + + non_indented_xml = node.FormatXml() +- self.failUnless(non_indented_xml == u'\n Hello ' +- u'<young> $1Joi' +- u'\n') +- +- indented_xml = node.FormatXml(u' ') +- self.failUnless(indented_xml == u' \n Hello ' +- u'<young> $1Joi' +- u'\n ') ++ self.assertTrue(non_indented_xml == '\n Hello ' ++ '<young> $1Joi' ++ '\n') ++ ++ indented_xml = node.FormatXml(' ') ++ self.assertTrue(indented_xml == ' \n Hello ' ++ '<young> $1Joi' ++ '\n ') + + def testXmlFormatMixedContentWithLeadingWhitespace(self): + # Again test using the Message node type, because it is the only mixed + # content node. + node = message.MessageNode() +- node.StartParsing(u'message', None) +- node.HandleAttribute(u'name', u'name') +- node.AppendContent(u"''' Hello ") ++ node.StartParsing('message', None) ++ node.HandleAttribute('name', 'name') ++ node.AppendContent("''' Hello ") + + ph = message.PhNode() +- ph.StartParsing(u'ph', None) +- ph.HandleAttribute(u'name', u'USERNAME') +- ph.AppendContent(u'$1') ++ ph.StartParsing('ph', None) ++ ph.HandleAttribute('name', 'USERNAME') ++ ph.AppendContent('$1') + ex = message.ExNode() +- ex.StartParsing(u'ex', None) +- ex.AppendContent(u'Joi') ++ ex.StartParsing('ex', None) ++ ex.AppendContent('Joi') + ex.EndParsing() + ph.AddChild(ex) + ph.EndParsing() + + node.AddChild(ph) +- node.AppendContent(u" yessiree '''") ++ node.AppendContent(" yessiree '''") + node.EndParsing() + + non_indented_xml = node.FormatXml() +- self.failUnless(non_indented_xml == +- u"\n ''' Hello" +- u' <young> $1Joi' +- u" yessiree '''\n") +- +- indented_xml = node.FormatXml(u' ') +- self.failUnless(indented_xml == +- u" \n ''' Hello" +- u' <young> $1Joi' +- u" yessiree '''\n ") ++ self.assertTrue(non_indented_xml == ++ "\n ''' Hello" ++ ' <young> $1Joi' ++ " yessiree '''\n") ++ ++ indented_xml = node.FormatXml(' ') ++ self.assertTrue(indented_xml == ++ " \n ''' Hello" ++ ' <young> $1Joi' ++ " yessiree '''\n ") + +- self.failUnless(node.GetNodeById('name')) ++ self.assertTrue(node.GetNodeById('name')) + + def testXmlFormatContentWithEntities(self): + '''Tests a bug where   would not be escaped correctly.''' +@@ -143,29 +143,29 @@ + tclib.Placeholder('END_BOLD', '', 'bla')]), + 'BINGOBONGO') + xml = msg_node.FormatXml() +- self.failUnless(xml.find(' ') == -1, 'should have no entities') ++ self.assertTrue(xml.find(' ') == -1, 'should have no entities') + + def testIter(self): + # First build a little tree of message and ph nodes. + node = message.MessageNode() +- node.StartParsing(u'message', None) +- node.HandleAttribute(u'name', u'bla') +- node.AppendContent(u" ''' two spaces ") +- node.AppendContent(u' space before and after ') ++ node.StartParsing('message', None) ++ node.HandleAttribute('name', 'bla') ++ node.AppendContent(" ''' two spaces ") ++ node.AppendContent(' space before and after ') + ph = message.PhNode() +- ph.StartParsing(u'ph', None) ++ ph.StartParsing('ph', None) + ph.AddChild(message.ExNode()) +- ph.HandleAttribute(u'name', u'BINGO') +- ph.AppendContent(u'bongo') ++ ph.HandleAttribute('name', 'BINGO') ++ ph.AppendContent('bongo') + node.AddChild(ph) + node.AddChild(message.PhNode()) +- node.AppendContent(u" space before two after '''") ++ node.AppendContent(" space before two after '''") + + order = [message.MessageNode, message.PhNode, message.ExNode, message.PhNode] + for n in node: +- self.failUnless(type(n) == order[0]) ++ self.assertTrue(type(n) == order[0]) + order = order[1:] +- self.failUnless(len(order) == 0) ++ self.assertTrue(len(order) == 0) + + def testGetChildrenOfType(self): + xml = ''' +@@ -190,14 +190,14 @@ + util.PathFromRoot('grit/test/data')) + from grit.node import node_io + output_nodes = grd.GetChildrenOfType(node_io.OutputNode) +- self.failUnlessEqual(len(output_nodes), 3) +- self.failUnlessEqual(output_nodes[2].attrs['filename'], ++ self.assertEqual(len(output_nodes), 3) ++ self.assertEqual(output_nodes[2].attrs['filename'], + 'de/generated_resources.rc') + + def testEvaluateExpression(self): + def AssertExpr(expected_value, expr, defs, target_platform, + extra_variables): +- self.failUnlessEqual(expected_value, base.Node.EvaluateExpression( ++ self.assertEqual(expected_value, base.Node.EvaluateExpression( + expr, defs, target_platform, extra_variables)) + + AssertExpr(True, "True", {}, 'linux', {}) +--- a/src/3rdparty/chromium/tools/grit/grit/node/empty.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/empty.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + '''Container nodes that don't have any logic. + ''' + +-from __future__ import print_function ++ + + from grit.node import base + from grit.node import include +--- a/src/3rdparty/chromium/tools/grit/grit/node/include.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/include.py 2025-01-16 02:26:08.634678806 +0800 +@@ -5,7 +5,7 @@ + """Handling of the element. + """ + +-from __future__ import print_function ++ + + import os + +--- a/src/3rdparty/chromium/tools/grit/grit/node/include_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/include_unittest.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for include.IncludeNode''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -40,20 +40,20 @@ + class IncludeNodeUnittest(unittest.TestCase): + def testGetPath(self): + root = misc.GritNode() +- root.StartParsing(u'grit', None) +- root.HandleAttribute(u'latest_public_release', u'0') +- root.HandleAttribute(u'current_release', u'1') +- root.HandleAttribute(u'base_dir', r'..\resource') ++ root.StartParsing('grit', None) ++ root.HandleAttribute('latest_public_release', '0') ++ root.HandleAttribute('current_release', '1') ++ root.HandleAttribute('base_dir', r'..\resource') + release = misc.ReleaseNode() +- release.StartParsing(u'release', root) +- release.HandleAttribute(u'seq', u'1') ++ release.StartParsing('release', root) ++ release.HandleAttribute('seq', '1') + root.AddChild(release) + includes = empty.IncludesNode() +- includes.StartParsing(u'includes', release) ++ includes.StartParsing('includes', release) + release.AddChild(includes) + include_node = include.IncludeNode() +- include_node.StartParsing(u'include', includes) +- include_node.HandleAttribute(u'file', r'flugel\kugel.pdf') ++ include_node.StartParsing('include', includes) ++ include_node.HandleAttribute('file', r'flugel\kugel.pdf') + includes.AddChild(include_node) + root.EndParsing() + +@@ -63,27 +63,27 @@ + + def testGetPathNoBasedir(self): + root = misc.GritNode() +- root.StartParsing(u'grit', None) +- root.HandleAttribute(u'latest_public_release', u'0') +- root.HandleAttribute(u'current_release', u'1') +- root.HandleAttribute(u'base_dir', r'..\resource') ++ root.StartParsing('grit', None) ++ root.HandleAttribute('latest_public_release', '0') ++ root.HandleAttribute('current_release', '1') ++ root.HandleAttribute('base_dir', r'..\resource') + release = misc.ReleaseNode() +- release.StartParsing(u'release', root) +- release.HandleAttribute(u'seq', u'1') ++ release.StartParsing('release', root) ++ release.HandleAttribute('seq', '1') + root.AddChild(release) + includes = empty.IncludesNode() +- includes.StartParsing(u'includes', release) ++ includes.StartParsing('includes', release) + release.AddChild(includes) + include_node = include.IncludeNode() +- include_node.StartParsing(u'include', includes) +- include_node.HandleAttribute(u'file', r'flugel\kugel.pdf') +- include_node.HandleAttribute(u'use_base_dir', u'false') ++ include_node.StartParsing('include', includes) ++ include_node.HandleAttribute('file', r'flugel\kugel.pdf') ++ include_node.HandleAttribute('use_base_dir', 'false') + includes.AddChild(include_node) + root.EndParsing() + + last_dir = os.path.basename(os.getcwd()) + expected_path = util.normpath(os.path.join( +- u'..', last_dir, u'flugel/kugel.pdf')) ++ '..', last_dir, 'flugel/kugel.pdf')) + self.assertEqual(root.ToRealPath(include_node.GetInputPath()), + expected_path) + +--- a/src/3rdparty/chromium/tools/grit/grit/node/mapping.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/mapping.py 2025-01-16 02:26:08.635762121 +0800 +@@ -6,7 +6,7 @@ + When adding a new node type, you add to this mapping. + ''' + +-from __future__ import print_function ++ + + from grit import exception + +--- a/src/3rdparty/chromium/tools/grit/grit/node/message.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/message.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + '''Handling of the element. + ''' + +-from __future__ import print_function ++ + + import re + +@@ -22,7 +22,7 @@ + + # Matches exactly three dots ending a line or followed by whitespace. + _ELLIPSIS_PATTERN = lazy_re.compile(r'(?\s*)(?P.+?)(?P\s*)\Z', +@@ -272,7 +272,7 @@ + if self._replace_ellipsis: + msg = _ELLIPSIS_PATTERN.sub(_ELLIPSIS_SYMBOL, msg) + # Always remove all byte order marks (\uFEFF) https://crbug.com/1033305 +- msg = msg.replace(u'\uFEFF','') ++ msg = msg.replace('\uFEFF','') + return msg.replace('[GRITLANGCODE]', lang) + + def NameOrOffset(self): +--- a/src/3rdparty/chromium/tools/grit/grit/node/message_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/message_unittest.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.node.message''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -31,7 +31,7 @@ + msg, = root.GetChildrenOfType(message.MessageNode) + cliques = msg.GetCliques() + content = cliques[0].GetMessage().GetPresentableContent() +- self.failUnless(content == 'Hello USERNAME, how are you doing today?') ++ self.assertTrue(content == 'Hello USERNAME, how are you doing today?') + + def testMessageWithWhitespace(self): + root = util.ParseGrdForUnittest("""\ +@@ -42,28 +42,28 @@ + """) + msg, = root.GetChildrenOfType(message.MessageNode) + content = msg.GetCliques()[0].GetMessage().GetPresentableContent() +- self.failUnless(content == 'Hello there USERNAME') +- self.failUnless(msg.ws_at_start == ' ') +- self.failUnless(msg.ws_at_end == ' ') ++ self.assertTrue(content == 'Hello there USERNAME') ++ self.assertTrue(msg.ws_at_start == ' ') ++ self.assertTrue(msg.ws_at_end == ' ') + + def testConstruct(self): + msg = tclib.Message(text=" Hello USERNAME, how are you? BINGO\t\t", + placeholders=[tclib.Placeholder('USERNAME', '%s', 'Joi'), + tclib.Placeholder('BINGO', '%d', '11')]) + msg_node = message.MessageNode.Construct(None, msg, 'BINGOBONGO') +- self.failUnless(msg_node.children[0].name == 'ph') +- self.failUnless(msg_node.children[0].children[0].name == 'ex') +- self.failUnless(msg_node.children[0].children[0].GetCdata() == 'Joi') +- self.failUnless(msg_node.children[1].children[0].GetCdata() == '11') +- self.failUnless(msg_node.ws_at_start == ' ') +- self.failUnless(msg_node.ws_at_end == '\t\t') ++ self.assertTrue(msg_node.children[0].name == 'ph') ++ self.assertTrue(msg_node.children[0].children[0].name == 'ex') ++ self.assertTrue(msg_node.children[0].children[0].GetCdata() == 'Joi') ++ self.assertTrue(msg_node.children[1].children[0].GetCdata() == '11') ++ self.assertTrue(msg_node.ws_at_start == ' ') ++ self.assertTrue(msg_node.ws_at_end == '\t\t') + + def testUnicodeConstruct(self): +- text = u'Howdie \u00fe' ++ text = 'Howdie \u00fe' + msg = tclib.Message(text=text) + msg_node = message.MessageNode.Construct(None, msg, 'BINGOBONGO') + msg_from_node = msg_node.GetCdata() +- self.failUnless(msg_from_node == text) ++ self.assertTrue(msg_from_node == text) + + def testFormatterData(self): + root = util.ParseGrdForUnittest("""\ +@@ -80,10 +80,10 @@ + + # Can't use assertDictEqual, not available in Python 2.6, so do it + # by hand. +- self.failUnlessEqual(len(expected_formatter_data), ++ self.assertEqual(len(expected_formatter_data), + len(msg.formatter_data)) + for key in expected_formatter_data: +- self.failUnlessEqual(expected_formatter_data[key], ++ self.assertEqual(expected_formatter_data[key], + msg.formatter_data[key]) + + def testReplaceEllipsis(self): +@@ -96,10 +96,10 @@ + msg, = root.GetChildrenOfType(message.MessageNode) + msg.SetReplaceEllipsis(True) + content = msg.Translate('en') +- self.failUnlessEqual(u'A...B.... %s\u2026 B\u2026 C\u2026', content) ++ self.assertEqual('A...B.... %s\u2026 B\u2026 C\u2026', content) + + def testRemoveByteOrderMark(self): +- root = util.ParseGrdForUnittest(u''' ++ root = util.ParseGrdForUnittest(''' + + + \uFEFFThis\uFEFF i\uFEFFs OK\uFEFF +@@ -107,7 +107,7 @@ + ''') + msg, = root.GetChildrenOfType(message.MessageNode) + content = msg.Translate('en') +- self.failUnlessEqual(u'This is OK', content) ++ self.assertEqual('This is OK', content) + + def testPlaceholderHasTooManyExamples(self): + try: +@@ -250,7 +250,7 @@ + msg, = root.GetChildrenOfType(message.MessageNode) + cliques = msg.GetCliques() + content = cliques[0].GetMessage().GetPresentableContent() +- self.failUnless(content == 'ERROR_COUNT error, WARNING_COUNT warning') ++ self.assertTrue(content == 'ERROR_COUNT error, WARNING_COUNT warning') + + def testMultipleFormattersAreInsidePhNodes(self): + failed = True +--- a/src/3rdparty/chromium/tools/grit/grit/node/misc.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/misc.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + """Miscellaneous node types. + """ + +-from __future__ import print_function ++ + + import os.path + import re +@@ -25,7 +25,7 @@ + # Python 3 doesn't have long() as int() works everywhere. But we really do need + # the long() behavior on Python 2 as our ids are much too large for int(). + try: +- long ++ int + except NameError: + long = int + +@@ -58,7 +58,7 @@ + first_ids_dict['SRCDIR'])) + + def ReplaceVariable(matchobj): +- for key, value in defines.items(): ++ for key, value in list(defines.items()): + if matchobj.group(1) == key: + value = os.path.abspath(value) + return value +@@ -105,7 +105,7 @@ + group = None + last_id = None + predetermined_ids = {value: key +- for key, value in predetermined_tids.items()} ++ for key, value in list(predetermined_tids.items())} + + for item in root: + if isinstance(item, empty.GroupingNode): +@@ -141,7 +141,7 @@ + # Some identifier nodes can provide their own id, + # and we use that id in the generated header in that case. + elif hasattr(item, 'GetId') and item.GetId(): +- id = long(item.GetId()) ++ id = int(item.GetId()) + reason = 'returned by GetId() method' + + elif ('offset' in item.attrs and group and +@@ -150,12 +150,12 @@ + parent_text = group.attrs['first_id'] + + try: +- offset_id = long(offset_text) ++ offset_id = int(offset_text) + except ValueError: + offset_id = tids[offset_text] + + try: +- parent_id = long(parent_text) ++ parent_id = int(parent_text) + except ValueError: + parent_id = tids[parent_text] + +@@ -174,7 +174,7 @@ + elif last_id is None: + # First check if the starting ID is explicitly specified by the parent. + if group and group.attrs.get('first_id', '') != '': +- id = long(group.attrs['first_id']) ++ id = int(group.attrs['first_id']) + reason = "from parent's first_id attribute" + else: + # Automatically generate the ID based on the first clique from the +--- a/src/3rdparty/chromium/tools/grit/grit/node/misc_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/misc_unittest.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for misc.GritNode''' + +-from __future__ import print_function ++ + + import contextlib + import os +@@ -110,7 +110,7 @@ + path in grd.GetInputFiles()] + # Convert path separator for Windows paths. + actual = [path.replace('\\', '/') for path in actual] +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + # Verifies that GetInputFiles() returns the correct list of files + # when files include other files. +@@ -136,7 +136,7 @@ + path in grd.GetInputFiles()] + # Convert path separator for Windows paths. + actual = [path.replace('\\', '/') for path in actual] +- self.assertEquals(expected, actual) ++ self.assertEqual(expected, actual) + + def testNonDefaultEntry(self): + grd = util.ParseGrdForUnittest(''' +@@ -259,33 +259,33 @@ + grd.SetOutputLanguage('fr') + grd.SetDefines({'hello': '1'}) + active = set(grd.ActiveDescendants()) +- self.failUnless(bingo_message not in active) +- self.failUnless(hello_message in active) +- self.failUnless(french_message in active) ++ self.assertTrue(bingo_message not in active) ++ self.assertTrue(hello_message in active) ++ self.assertTrue(french_message in active) + + grd.SetOutputLanguage('en') + grd.SetDefines({'bingo': 1}) + active = set(grd.ActiveDescendants()) +- self.failUnless(bingo_message in active) +- self.failUnless(hello_message not in active) +- self.failUnless(french_message not in active) ++ self.assertTrue(bingo_message in active) ++ self.assertTrue(hello_message not in active) ++ self.assertTrue(french_message not in active) + + grd.SetOutputLanguage('en') + grd.SetDefines({'FORCE_FRENCH': '1', 'bingo': '1'}) + active = set(grd.ActiveDescendants()) +- self.failUnless(bingo_message in active) +- self.failUnless(hello_message not in active) +- self.failUnless(french_message in active) ++ self.assertTrue(bingo_message in active) ++ self.assertTrue(hello_message not in active) ++ self.assertTrue(french_message in active) + + grd.SetOutputLanguage('en') + grd.SetDefines({}) +- self.failUnless(grd.target_platform == sys.platform) ++ self.assertTrue(grd.target_platform == sys.platform) + grd.SetTargetPlatform('darwin') + active = set(grd.ActiveDescendants()) +- self.failUnless(is_win_message not in active) ++ self.assertTrue(is_win_message not in active) + grd.SetTargetPlatform('win32') + active = set(grd.ActiveDescendants()) +- self.failUnless(is_win_message in active) ++ self.assertTrue(is_win_message in active) + + def testElsiness(self): + grd = util.ParseGrdForUnittest(''' +@@ -361,7 +361,7 @@ + grd.SetOutputLanguage('ru') + grd.SetDefines({'hello': '1'}) + outputs = [output.GetFilename() for output in grd.GetOutputFiles()] +- self.assertEquals( ++ self.assertEqual( + outputs, + ['uncond1.rc', 'only_fr.adm', 'only_fr.plist', 'doc.html', + 'uncond2.adm', 'iftest.h']) +@@ -369,14 +369,14 @@ + grd.SetOutputLanguage('ru') + grd.SetDefines({'bingo': '2'}) + outputs = [output.GetFilename() for output in grd.GetOutputFiles()] +- self.assertEquals( ++ self.assertEqual( + outputs, + ['uncond1.rc', 'doc.html', 'uncond2.adm', 'iftest.h']) + + grd.SetOutputLanguage('fr') + grd.SetDefines({'hello': '1'}) + outputs = [output.GetFilename() for output in grd.GetOutputFiles()] +- self.assertEquals( ++ self.assertEqual( + outputs, + ['uncond1.rc', 'only_fr.adm', 'only_fr.plist', 'uncond2.adm', + 'iftest.h']) +@@ -384,12 +384,12 @@ + grd.SetOutputLanguage('en') + grd.SetDefines({'bingo': '1'}) + outputs = [output.GetFilename() for output in grd.GetOutputFiles()] +- self.assertEquals(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h']) ++ self.assertEqual(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h']) + + grd.SetOutputLanguage('fr') + grd.SetDefines({'bingo': '1'}) + outputs = [output.GetFilename() for output in grd.GetOutputFiles()] +- self.assertNotEquals(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h']) ++ self.assertNotEqual(outputs, ['uncond1.rc', 'uncond2.adm', 'iftest.h']) + + def testChildrenAccepted(self): + grd_reader.Parse(StringIO(r''' +@@ -572,10 +572,10 @@ + menu = grd.GetNodeById('IDC_KLONKMENU') + + for node in [hello, aboutbox]: +- self.failUnless(not node.PseudoIsAllowed()) ++ self.assertTrue(not node.PseudoIsAllowed()) + + for node in [bingo, menu]: +- self.failUnless(node.PseudoIsAllowed()) ++ self.assertTrue(node.PseudoIsAllowed()) + + # TODO(benrg): There was a test here that formatting hello and aboutbox with + # a pseudo language should fail, but they do not fail and the test was +--- a/src/3rdparty/chromium/tools/grit/grit/node/node_io.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/node_io.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + '''The and elements. + ''' + +-from __future__ import print_function ++ + + import os + +--- a/src/3rdparty/chromium/tools/grit/grit/node/node_io_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/node_io_unittest.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for node_io.FileNode''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -34,20 +34,20 @@ + class FileNodeUnittest(unittest.TestCase): + def testGetPath(self): + root = misc.GritNode() +- root.StartParsing(u'grit', None) +- root.HandleAttribute(u'latest_public_release', u'0') +- root.HandleAttribute(u'current_release', u'1') +- root.HandleAttribute(u'base_dir', r'..\resource') ++ root.StartParsing('grit', None) ++ root.HandleAttribute('latest_public_release', '0') ++ root.HandleAttribute('current_release', '1') ++ root.HandleAttribute('base_dir', r'..\resource') + translations = empty.TranslationsNode() +- translations.StartParsing(u'translations', root) ++ translations.StartParsing('translations', root) + root.AddChild(translations) + file_node = node_io.FileNode() +- file_node.StartParsing(u'file', translations) +- file_node.HandleAttribute(u'path', r'flugel\kugel.pdf') ++ file_node.StartParsing('file', translations) ++ file_node.HandleAttribute('path', r'flugel\kugel.pdf') + translations.AddChild(file_node) + root.EndParsing() + +- self.failUnless(root.ToRealPath(file_node.GetInputPath()) == ++ self.assertTrue(root.ToRealPath(file_node.GetInputPath()) == + util.normpath( + os.path.join(r'../resource', r'flugel/kugel.pdf'))) + +@@ -153,12 +153,12 @@ + grd.RunGatherers() + outputs = grd.GetChildrenOfType(node_io.OutputNode) + active = set(grd.ActiveDescendants()) +- self.failUnless(outputs[0] in active) +- self.failUnless(outputs[0].GetType() == 'rc_header') +- self.failUnless(outputs[1] in active) +- self.failUnless(outputs[1].GetType() == 'rc_all') +- self.failUnless(outputs[2] not in active) +- self.failUnless(outputs[2].GetType() == 'rc_all') ++ self.assertTrue(outputs[0] in active) ++ self.assertTrue(outputs[0].GetType() == 'rc_header') ++ self.assertTrue(outputs[1] in active) ++ self.assertTrue(outputs[1].GetType() == 'rc_all') ++ self.assertTrue(outputs[2] not in active) ++ self.assertTrue(outputs[2].GetType() == 'rc_all') + + # Verify that 'iw' and 'no' language codes in xtb files are mapped to 'he' and + # 'nb'. +--- a/src/3rdparty/chromium/tools/grit/grit/node/structure.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/structure.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + '''The element. + ''' + +-from __future__ import print_function ++ + + import os + import platform +@@ -239,7 +239,7 @@ + + # Note: Parse() is idempotent, therefore this method is also. + self.gatherer.Parse() +- for skel in self.skeletons.values(): ++ for skel in list(self.skeletons.values()): + skel.Parse() + + def GetSkeletonGatherer(self): +--- a/src/3rdparty/chromium/tools/grit/grit/node/structure_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/structure_unittest.py 2025-01-16 02:26:08.635762121 +0800 +@@ -6,7 +6,7 @@ + '''Unit tests for nodes. + ''' + +-from __future__ import print_function ++ + + import os + import os.path +@@ -55,18 +55,18 @@ + grd.SetOutputLanguage('fr') + grd.RunGatherers() + transl = ''.join(rc.Format(grd, 'fr', '.')) +- self.failUnless(transl.count('040704') and transl.count('110978')) +- self.failUnless(transl.count('2005",IDC_STATIC')) ++ self.assertTrue(transl.count('040704') and transl.count('110978')) ++ self.assertTrue(transl.count('2005",IDC_STATIC')) + + def testRunCommandOnCurrentPlatform(self): + node = structure.StructureNode() + node.attrs = node.DefaultAttributes() +- self.failUnless(node.RunCommandOnCurrentPlatform()) ++ self.assertTrue(node.RunCommandOnCurrentPlatform()) + node.attrs['run_command_on_platforms'] = 'Nosuch' +- self.failIf(node.RunCommandOnCurrentPlatform()) ++ self.assertFalse(node.RunCommandOnCurrentPlatform()) + node.attrs['run_command_on_platforms'] = ( + 'Nosuch,%s,Othernot' % platform.system()) +- self.failUnless(node.RunCommandOnCurrentPlatform()) ++ self.assertTrue(node.RunCommandOnCurrentPlatform()) + + def testVariables(self): + grd = util.ParseGrdForUnittest(''' +@@ -80,7 +80,7 @@ + filepath = os.path.join(tempfile.gettempdir(), filename) + with open(filepath) as f: + result = f.read() +- self.failUnlessEqual(('

Hello!

\n' ++ self.assertEqual(('

Hello!

\n' + 'Some cool things are foo, bar, baz.\n' + 'Did you know that 2+2==4?\n' + '

\n' +--- a/src/3rdparty/chromium/tools/grit/grit/node/variant.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/variant.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + '''The element. + ''' + +-from __future__ import print_function ++ + + from grit.node import base + +--- a/src/3rdparty/chromium/tools/grit/grit/node/custom/filename.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/custom/filename.py 2025-01-16 02:26:08.635762121 +0800 +@@ -4,7 +4,7 @@ + + '''A CustomType for filenames.''' + +-from __future__ import print_function ++ + + from grit import clique + from grit import lazy_re +--- a/src/3rdparty/chromium/tools/grit/grit/node/custom/filename_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/node/custom/filename_unittest.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.node.custom.filename''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -27,7 +27,7 @@ + c.SetCustomType(filename.WindowsFilename()) + translation = tclib.Translation(id=msg.GetId(), text='Bilingo bolongo:') + c.AddTranslation(translation, 'fr') +- self.failUnless(c.MessageForLanguage('fr').GetRealContent() == 'Bilingo bolongo ') ++ self.assertTrue(c.MessageForLanguage('fr').GetRealContent() == 'Bilingo bolongo ') + + + if __name__ == '__main__': +--- a/src/3rdparty/chromium/tools/grit/grit/tool/android2grd.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/android2grd.py 2025-01-16 02:26:08.635762121 +0800 +@@ -4,7 +4,7 @@ + + """The 'grit android2grd' tool.""" + +-from __future__ import print_function ++ + + import getopt + import os.path +@@ -406,7 +406,7 @@ + xtb_file = os.path.normpath(os.path.join( + self.xtb_dir, '%s_%s.xtb' % (self.name, lang))) + fnode = node_io.FileNode() +- fnode.StartParsing(u'file', translations_node) ++ fnode.StartParsing('file', translations_node) + fnode.HandleAttribute('path', xtb_file) + fnode.HandleAttribute('lang', lang) + fnode.EndParsing() +@@ -417,11 +417,11 @@ + """Creates the element corresponding to the generated c header.""" + header_file_name = os.path.join(header_dir, self.name + '.h') + header_node = node_io.OutputNode() +- header_node.StartParsing(u'output', outputs_node) ++ header_node.StartParsing('output', outputs_node) + header_node.HandleAttribute('filename', header_file_name) + header_node.HandleAttribute('type', 'rc_header') + emit_node = node_io.EmitNode() +- emit_node.StartParsing(u'emit', header_node) ++ emit_node.StartParsing('emit', header_node) + emit_node.HandleAttribute('emit_type', 'prepend') + emit_node.EndParsing() + header_node.AddChild(emit_node) +@@ -434,7 +434,7 @@ + rc_file_name = self.name + '_' + lang + ".rc" + rc_path = os.path.join(rc_dir, rc_file_name) + node = node_io.OutputNode() +- node.StartParsing(u'output', outputs_node) ++ node.StartParsing('output', outputs_node) + node.HandleAttribute('filename', rc_path) + node.HandleAttribute('lang', lang) + node.HandleAttribute('type', 'rc_all') +@@ -462,7 +462,7 @@ + xml_res_dir, values, 'strings.xml')) + + node = node_io.OutputNode() +- node.StartParsing(u'output', outputs_node) ++ node.StartParsing('output', outputs_node) + node.HandleAttribute('filename', xml_path) + node.HandleAttribute('lang', locale) + node.HandleAttribute('type', 'android') +--- a/src/3rdparty/chromium/tools/grit/grit/tool/android2grd_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/android2grd_unittest.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.tool.android2grd''' + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/tools/grit/grit/tool/build.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/build.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + '''The 'grit build' tool. + ''' + +-from __future__ import print_function ++ + + import codecs + import filecmp +--- a/src/3rdparty/chromium/tools/grit/grit/tool/build_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/build_unittest.py 2025-01-16 02:26:08.635762121 +0800 +@@ -6,7 +6,7 @@ + '''Unit tests for the 'grit build' tool. + ''' + +-from __future__ import print_function ++ + + import codecs + import os +@@ -58,14 +58,14 @@ + '--depdir', output_dir.GetPath(), + '--depfile', expected_dep_file]) + +- self.failUnless(os.path.isfile(expected_dep_file)) ++ self.assertTrue(os.path.isfile(expected_dep_file)) + with open(expected_dep_file) as f: + line = f.readline() + (dep_output_file, deps_string) = line.split(': ') + deps = deps_string.split(' ') + +- self.failUnlessEqual("default_100_percent.pak", dep_output_file) +- self.failUnlessEqual(deps, [ ++ self.assertEqual("default_100_percent.pak", dep_output_file) ++ self.assertEqual(deps, [ + util.PathFromRoot('grit/testdata/default_100_percent/a.png'), + util.PathFromRoot('grit/testdata/grit_part.grdp'), + util.PathFromRoot('grit/testdata/special_100_percent/a.png'), +@@ -87,17 +87,17 @@ + '--depdir', output_dir.GetPath(), + '--depfile', expected_dep_file]) + +- self.failUnless(os.path.isfile(expected_dep_file)) ++ self.assertTrue(os.path.isfile(expected_dep_file)) + with open(expected_dep_file) as f: + line = f.readline() + (dep_output_file, deps_string) = line.split(': ') + deps = deps_string.split(' ') + +- self.failUnlessEqual("resource.h", dep_output_file) +- self.failUnlessEqual(2, len(deps)) +- self.failUnlessEqual(deps[0], ++ self.assertEqual("resource.h", dep_output_file) ++ self.assertEqual(2, len(deps)) ++ self.assertEqual(deps[0], + util.PathFromRoot('grit/testdata/substitute.xmb')) +- self.failUnlessEqual(deps[1], ++ self.assertEqual(deps[1], + util.PathFromRoot('grit/testdata/resource_ids')) + output_dir.CleanUp() + +@@ -111,7 +111,7 @@ + + # Incomplete output file list should fail. + builder_fail = build.RcBuilder() +- self.failUnlessEqual(2, ++ self.assertEqual(2, + builder_fail.Run(DummyOpts(), [ + '-o', output_dir.GetPath(), + '-a', os.path.abspath( +@@ -119,7 +119,7 @@ + + # Complete output file list should succeed. + builder_ok = build.RcBuilder() +- self.failUnlessEqual(0, ++ self.assertEqual(0, + builder_ok.Run(DummyOpts(), [ + '-o', output_dir.GetPath(), + '-a', os.path.abspath( +@@ -139,7 +139,7 @@ + + # Incomplete output file list should fail. + builder_fail = build.RcBuilder() +- self.failUnlessEqual(2, ++ self.assertEqual(2, + builder_fail.Run(DummyOpts(), [ + '-o', output_dir.GetPath(), + '-E', 'name=foo', +@@ -147,7 +147,7 @@ + + # Complete output file list should succeed. + builder_ok = build.RcBuilder() +- self.failUnlessEqual(0, ++ self.assertEqual(0, + builder_ok.Run(DummyOpts(), [ + '-o', output_dir.GetPath(), + '-E', 'name=foo', +@@ -161,7 +161,7 @@ + allowlisted_ids, + non_allowlisted_ids, + encoding='utf8'): +- self.failUnless(os.path.exists(filename)) ++ self.assertTrue(os.path.exists(filename)) + allowlisted_ids_found = [] + non_allowlisted_ids_found = [] + with codecs.open(filename, encoding=encoding) as f: +@@ -231,8 +231,8 @@ + + # Ensure the resource map header and .pak files exist, but don't verify + # their content. +- self.failUnless(os.path.exists(map_h)) +- self.failUnless(os.path.exists(pak)) ++ self.assertTrue(os.path.exists(map_h)) ++ self.assertTrue(os.path.exists(pak)) + + allowlisted_ids = [ + 'IDR_STRUCTURE_ALLOWLISTED', +@@ -266,19 +266,19 @@ + header = output_dir.GetPath('resource.h') + + builder.Run(DummyOpts(), ['-o', output_dir.GetPath()]) +- self.failUnless(os.path.exists(header)) ++ self.assertTrue(os.path.exists(header)) + first_mtime = os.stat(header).st_mtime + + os.utime(header, (UNCHANGED, UNCHANGED)) + builder.Run(DummyOpts(), + ['-o', output_dir.GetPath(), '--write-only-new', '0']) +- self.failUnless(os.path.exists(header)) ++ self.assertTrue(os.path.exists(header)) + second_mtime = os.stat(header).st_mtime + + os.utime(header, (UNCHANGED, UNCHANGED)) + builder.Run(DummyOpts(), + ['-o', output_dir.GetPath(), '--write-only-new', '1']) +- self.failUnless(os.path.exists(header)) ++ self.assertTrue(os.path.exists(header)) + third_mtime = os.stat(header).st_mtime + + self.assertTrue(abs(second_mtime - UNCHANGED) > 5) +@@ -303,7 +303,7 @@ + '--depdir', output_dir.GetPath(), + '--depfile', expected_dep_file, + '--depend-on-stamp']) +- self.failUnless(os.path.isfile(expected_stamp_file)) ++ self.assertTrue(os.path.isfile(expected_stamp_file)) + first_mtime = os.stat(expected_stamp_file).st_mtime + + # Reset mtime to very old. +@@ -314,21 +314,21 @@ + '--depdir', output_dir.GetPath(), + '--depfile', expected_dep_file, + '--depend-on-stamp']) +- self.failUnless(os.path.isfile(expected_stamp_file)) ++ self.assertTrue(os.path.isfile(expected_stamp_file)) + second_mtime = os.stat(expected_stamp_file).st_mtime + + # Some OS have a 2s stat resolution window, so can't do a direct comparison. + self.assertTrue((second_mtime - OLDTIME) > 5) + self.assertTrue(abs(second_mtime - first_mtime) < 5) + +- self.failUnless(os.path.isfile(expected_dep_file)) ++ self.assertTrue(os.path.isfile(expected_dep_file)) + with open(expected_dep_file) as f: + line = f.readline() + (dep_output_file, deps_string) = line.split(': ') + deps = deps_string.split(' ') + +- self.failUnlessEqual(expected_stamp_file_name, dep_output_file) +- self.failUnlessEqual(deps, [ ++ self.assertEqual(expected_stamp_file_name, dep_output_file) ++ self.assertEqual(deps, [ + util.PathFromRoot('grit/testdata/substitute.xmb'), + ]) + output_dir.CleanUp() +--- a/src/3rdparty/chromium/tools/grit/grit/tool/buildinfo.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/buildinfo.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + """Output the list of files to be generated by GRIT from an input. + """ + +-from __future__ import print_function ++ + + import getopt + import os +@@ -54,7 +54,7 @@ + if output.attrs['lang']: + langs[output.attrs['lang']] = os.path.dirname(output.GetFilename()) + +- for lang, dirname in langs.items(): ++ for lang, dirname in list(langs.items()): + old_output_language = res_tree.output_language + res_tree.SetOutputLanguage(lang) + for node in res_tree.ActiveDescendants(): +--- a/src/3rdparty/chromium/tools/grit/grit/tool/buildinfo_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/buildinfo_unittest.py 2025-01-16 02:26:08.635762121 +0800 +@@ -6,7 +6,7 @@ + """Unit tests for the 'grit buildinfo' tool. + """ + +-from __future__ import print_function ++ + + import os + import sys +@@ -48,17 +48,17 @@ + self.extra_verbose = False + info_object.Run(DummyOpts(), []) + output = self.buf.getvalue().replace('\\', '/') +- self.failUnless(output.count(r'rc_all|sv_sidebar_loading.html')) +- self.failUnless(output.count(r'rc_header|resource.h')) +- self.failUnless(output.count(r'rc_all|en_generated_resources.rc')) +- self.failUnless(output.count(r'rc_all|sv_generated_resources.rc')) +- self.failUnless(output.count(r'input|../grit/testdata/substitute.xmb')) +- self.failUnless(output.count(r'input|../grit/testdata/pr.bmp')) +- self.failUnless(output.count(r'input|../grit/testdata/pr2.bmp')) +- self.failUnless( ++ self.assertTrue(output.count(r'rc_all|sv_sidebar_loading.html')) ++ self.assertTrue(output.count(r'rc_header|resource.h')) ++ self.assertTrue(output.count(r'rc_all|en_generated_resources.rc')) ++ self.assertTrue(output.count(r'rc_all|sv_generated_resources.rc')) ++ self.assertTrue(output.count(r'input|../grit/testdata/substitute.xmb')) ++ self.assertTrue(output.count(r'input|../grit/testdata/pr.bmp')) ++ self.assertTrue(output.count(r'input|../grit/testdata/pr2.bmp')) ++ self.assertTrue( + output.count(r'input|../grit/testdata/sidebar_loading.html')) +- self.failUnless(output.count(r'input|../grit/testdata/transl.rc')) +- self.failUnless(output.count(r'input|../grit/testdata/transl1.rc')) ++ self.assertTrue(output.count(r'input|../grit/testdata/transl.rc')) ++ self.assertTrue(output.count(r'input|../grit/testdata/transl1.rc')) + + def testBuildOutputWithDir(self): + """Find all the inputs and outputs for a GRD file with an output dir.""" +@@ -72,17 +72,17 @@ + self.extra_verbose = False + info_object.Run(DummyOpts(), ['-o', '../grit/testdata']) + output = self.buf.getvalue().replace('\\', '/') +- self.failUnless( ++ self.assertTrue( + output.count(r'rc_all|../grit/testdata/sv_sidebar_loading.html')) +- self.failUnless(output.count(r'rc_header|../grit/testdata/resource.h')) +- self.failUnless( ++ self.assertTrue(output.count(r'rc_header|../grit/testdata/resource.h')) ++ self.assertTrue( + output.count(r'rc_all|../grit/testdata/en_generated_resources.rc')) +- self.failUnless( ++ self.assertTrue( + output.count(r'rc_all|../grit/testdata/sv_generated_resources.rc')) +- self.failUnless(output.count(r'input|../grit/testdata/substitute.xmb')) +- self.failUnlessEqual(0, ++ self.assertTrue(output.count(r'input|../grit/testdata/substitute.xmb')) ++ self.assertEqual(0, + output.count(r'rc_all|../grit/testdata/sv_welcome_toast.html')) +- self.failUnless( ++ self.assertTrue( + output.count(r'rc_all|../grit/testdata/en_welcome_toast.html')) + + +--- a/src/3rdparty/chromium/tools/grit/grit/tool/count.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/count.py 2025-01-16 02:26:08.635762121 +0800 +@@ -4,7 +4,7 @@ + + '''Count number of occurrences of a given message ID.''' + +-from __future__ import print_function ++ + + import getopt + import sys +--- a/src/3rdparty/chromium/tools/grit/grit/tool/diff_structures.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/diff_structures.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + '''The 'grit sdiff' tool. + ''' + +-from __future__ import print_function ++ + + import os + import getopt +--- a/src/3rdparty/chromium/tools/grit/grit/tool/diff_structures_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/diff_structures_unittest.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for the 'grit newgrd' tool.''' + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/tools/grit/grit/tool/interface.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/interface.py 2025-01-16 02:26:08.635762121 +0800 +@@ -5,7 +5,7 @@ + '''Base class and interface for tools. + ''' + +-from __future__ import print_function ++ + + class Tool(object): + '''Base class for all tools. Tools should use their docstring (i.e. the +--- a/src/3rdparty/chromium/tools/grit/grit/tool/menu_from_parts.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/menu_from_parts.py 2025-01-16 02:26:08.635762121 +0800 +@@ -4,7 +4,7 @@ + + '''The 'grit menufromparts' tool.''' + +-from __future__ import print_function ++ + + import six + +--- a/src/3rdparty/chromium/tools/grit/grit/tool/newgrd.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/newgrd.py 2025-01-16 02:26:08.636845436 +0800 +@@ -5,7 +5,7 @@ + '''Tool to create a new, empty .grd file with all the basic sections. + ''' + +-from __future__ import print_function ++ + + import getopt + import sys +--- a/src/3rdparty/chromium/tools/grit/grit/tool/newgrd_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/newgrd_unittest.py 2025-01-16 02:26:08.636845436 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for the 'grit newgrd' tool.''' + +-from __future__ import print_function ++ + + import os + import sys +--- a/src/3rdparty/chromium/tools/grit/grit/tool/postprocess_interface.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/postprocess_interface.py 2025-01-16 02:26:08.636845436 +0800 +@@ -5,7 +5,7 @@ + ''' Base class for postprocessing of RC files. + ''' + +-from __future__ import print_function ++ + + class PostProcessor(object): + ''' Base class for postprocessing of the RC file data before being +--- a/src/3rdparty/chromium/tools/grit/grit/tool/postprocess_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/postprocess_unittest.py 2025-01-16 02:26:08.636845436 +0800 +@@ -8,7 +8,7 @@ + modify the grd data tree, changing the message name attributes. + ''' + +-from __future__ import print_function ++ + + import os + import re +@@ -40,9 +40,9 @@ + tool.post_process = 'grit.tool.postprocess_unittest.DummyPostProcessor' + result = tool.Process(rctext, '.\resource.rc') + +- self.failUnless( ++ self.assertTrue( + result.children[2].children[2].children[0].attrs['name'] == 'SMART_STRING_1') +- self.failUnless( ++ self.assertTrue( + result.children[2].children[2].children[1].attrs['name'] == 'SMART_STRING_2') + + class DummyPostProcessor(grit.tool.postprocess_interface.PostProcessor): +--- a/src/3rdparty/chromium/tools/grit/grit/tool/preprocess_interface.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/preprocess_interface.py 2025-01-16 02:26:08.636845436 +0800 +@@ -5,7 +5,7 @@ + ''' Base class for preprocessing of RC files. + ''' + +-from __future__ import print_function ++ + + class PreProcessor(object): + ''' Base class for preprocessing of the RC file data before being +--- a/src/3rdparty/chromium/tools/grit/grit/tool/preprocess_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/preprocess_unittest.py 2025-01-16 02:26:08.636845436 +0800 +@@ -8,7 +8,7 @@ + provide the actual rctext data. + ''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -32,7 +32,7 @@ + tool.pre_process = 'grit.tool.preprocess_unittest.DummyPreProcessor' + result = tool.Process('', '.\resource.rc') + +- self.failUnless( ++ self.assertTrue( + result.children[2].children[2].children[0].attrs['name'] == 'DUMMY_STRING_1') + + class DummyPreProcessor(grit.tool.preprocess_interface.PreProcessor): +--- a/src/3rdparty/chromium/tools/grit/grit/tool/rc2grd.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/rc2grd.py 2025-01-16 02:26:08.636845436 +0800 +@@ -4,7 +4,7 @@ + + '''The 'grit rc2grd' tool.''' + +-from __future__ import print_function ++ + + import os.path + import getopt +--- a/src/3rdparty/chromium/tools/grit/grit/tool/rc2grd_unittest.py 2023-07-18 22:12:18.000000000 +0800 ++++ b/src/3rdparty/chromium/tools/grit/grit/tool/rc2grd_unittest.py 2025-01-16 02:26:08.636845436 +0800 +@@ -5,7 +5,7 @@ + + '''Unit tests for grit.tool.rc2grd''' + +-from __future__ import print_function ++ + + import os + import sys +@@ -28,16 +28,16 @@ + tool = rc2grd.Rc2Grd() + original = "Hello %s, how are you? I'm $1 years old!" + msg = tool.Placeholderize(original) +- self.failUnless(msg.GetPresentableContent() == "Hello TODO_0001, how are you? I'm TODO_0002 years old!") +- self.failUnless(msg.GetRealContent() == original) ++ self.assertTrue(msg.GetPresentableContent() == "Hello TODO_0001, how are you? I'm TODO_0002 years old!") ++ self.assertTrue(msg.GetRealContent() == original) + + def testHtmlPlaceholderize(self): + tool = rc2grd.Rc2Grd() + original = "Hello [USERNAME], how are you? I'm [AGE] years old!" + msg = tool.Placeholderize(original) +- self.failUnless(msg.GetPresentableContent() == ++ self.assertTrue(msg.GetPresentableContent() == + "Hello BEGIN_BOLDX_USERNAME_XEND_BOLD, how are you? I'm X_AGE_X years old!") +- self.failUnless(msg.GetRealContent() == original) ++ self.assertTrue(msg.GetRealContent() == original) + + def testMenuWithoutWhitespaceRegression(self): + # There was a problem in the original regular expression for parsing out +@@ -62,7 +62,7 @@ + END + + ''' +- self.failUnless(len(rc2grd._MENU.findall(two_menus)) == 2) ++ self.assertTrue(len(rc2grd._MENU.findall(two_menus)) == 2) + + def testRegressionScriptWithTranslateable(self): + tool = rc2grd.Rc2Grd() +@@ -78,11 +78,11 @@ + + rc_text = '''STRINGTABLE\nBEGIN\nID_BINGO ""\nEND\n''' + tool.AddMessages(rc_text, tool.o) +- self.failUnless(tool.o.node.GetCdata().find('Set As Homepage') != -1) ++ self.assertTrue(tool.o.node.GetCdata().find('Set As Homepage') != -1) + + # TODO(joi) Improve the HTML parser to support translateables inside + #