From ef22df9867ba91fbf3f4927d59c165715556ace2 Mon Sep 17 00:00:00 2001 From: U2FsdGVkX1 Date: Thu, 2 Nov 2023 08:04:24 +0800 Subject: [PATCH] Add RISC-V 64 support --- ...ind-3.21.0-Add-riscv64-Linux-support.patch | 2882216 ++++++++++++++ valgrind.spec | 24 +- 2 files changed, 2882237 insertions(+), 3 deletions(-) create mode 100644 valgrind-3.21.0-Add-riscv64-Linux-support.patch diff --git a/valgrind-3.21.0-Add-riscv64-Linux-support.patch b/valgrind-3.21.0-Add-riscv64-Linux-support.patch new file mode 100644 index 0000000..666c3a9 --- /dev/null +++ b/valgrind-3.21.0-Add-riscv64-Linux-support.patch @@ -0,0 +1,2882216 @@ +diff '--color=auto' -ru --new-file valgrind-3.21.0/aclocal.m4 valgrind-riscv64/aclocal.m4 +--- valgrind-3.21.0/aclocal.m4 2023-04-28 23:38:33.000000000 +0800 ++++ valgrind-riscv64/aclocal.m4 1970-01-01 08:00:00.000000000 +0800 +@@ -1,1206 +0,0 @@ +-# generated automatically by aclocal 1.16.5 -*- Autoconf -*- +- +-# Copyright (C) 1996-2021 Free Software Foundation, Inc. +- +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# This program is distributed in the hope that it will be useful, +-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +-# PARTICULAR PURPOSE. +- +-m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) +-m4_ifndef([AC_AUTOCONF_VERSION], +- [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +-m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.71],, +-[m4_warning([this file was generated for autoconf 2.71. +-You have another version of autoconf. It may work, but is not guaranteed to. +-If you have problems, you may need to regenerate the build system entirely. +-To do so, use the procedure documented by the package, typically 'autoreconf'.])]) +- +-# Copyright (C) 2002-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# AM_AUTOMAKE_VERSION(VERSION) +-# ---------------------------- +-# Automake X.Y traces this macro to ensure aclocal.m4 has been +-# generated from the m4 files accompanying Automake X.Y. +-# (This private macro should not be called outside this file.) +-AC_DEFUN([AM_AUTOMAKE_VERSION], +-[am__api_version='1.16' +-dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to +-dnl require some minimum version. Point them to the right macro. +-m4_if([$1], [1.16.5], [], +- [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl +-]) +- +-# _AM_AUTOCONF_VERSION(VERSION) +-# ----------------------------- +-# aclocal traces this macro to find the Autoconf version. +-# This is a private macro too. Using m4_define simplifies +-# the logic in aclocal, which can simply ignore this definition. +-m4_define([_AM_AUTOCONF_VERSION], []) +- +-# AM_SET_CURRENT_AUTOMAKE_VERSION +-# ------------------------------- +-# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. +-# This function is AC_REQUIREd by AM_INIT_AUTOMAKE. +-AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], +-[AM_AUTOMAKE_VERSION([1.16.5])dnl +-m4_ifndef([AC_AUTOCONF_VERSION], +- [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +-_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) +- +-# Figure out how to run the assembler. -*- Autoconf -*- +- +-# Copyright (C) 2001-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# AM_PROG_AS +-# ---------- +-AC_DEFUN([AM_PROG_AS], +-[# By default we simply use the C compiler to build assembly code. +-AC_REQUIRE([AC_PROG_CC]) +-test "${CCAS+set}" = set || CCAS=$CC +-test "${CCASFLAGS+set}" = set || CCASFLAGS=$CFLAGS +-AC_ARG_VAR([CCAS], [assembler compiler command (defaults to CC)]) +-AC_ARG_VAR([CCASFLAGS], [assembler compiler flags (defaults to CFLAGS)]) +-_AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl +-]) +- +-# AM_AUX_DIR_EXPAND -*- Autoconf -*- +- +-# Copyright (C) 2001-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets +-# $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to +-# '$srcdir', '$srcdir/..', or '$srcdir/../..'. +-# +-# Of course, Automake must honor this variable whenever it calls a +-# tool from the auxiliary directory. The problem is that $srcdir (and +-# therefore $ac_aux_dir as well) can be either absolute or relative, +-# depending on how configure is run. This is pretty annoying, since +-# it makes $ac_aux_dir quite unusable in subdirectories: in the top +-# source directory, any form will work fine, but in subdirectories a +-# relative path needs to be adjusted first. +-# +-# $ac_aux_dir/missing +-# fails when called from a subdirectory if $ac_aux_dir is relative +-# $top_srcdir/$ac_aux_dir/missing +-# fails if $ac_aux_dir is absolute, +-# fails when called from a subdirectory in a VPATH build with +-# a relative $ac_aux_dir +-# +-# The reason of the latter failure is that $top_srcdir and $ac_aux_dir +-# are both prefixed by $srcdir. In an in-source build this is usually +-# harmless because $srcdir is '.', but things will broke when you +-# start a VPATH build or use an absolute $srcdir. +-# +-# So we could use something similar to $top_srcdir/$ac_aux_dir/missing, +-# iff we strip the leading $srcdir from $ac_aux_dir. That would be: +-# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` +-# and then we would define $MISSING as +-# MISSING="\${SHELL} $am_aux_dir/missing" +-# This will work as long as MISSING is not called from configure, because +-# unfortunately $(top_srcdir) has no meaning in configure. +-# However there are other variables, like CC, which are often used in +-# configure, and could therefore not use this "fixed" $ac_aux_dir. +-# +-# Another solution, used here, is to always expand $ac_aux_dir to an +-# absolute PATH. The drawback is that using absolute paths prevent a +-# configured tree to be moved without reconfiguration. +- +-AC_DEFUN([AM_AUX_DIR_EXPAND], +-[AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl +-# Expand $ac_aux_dir to an absolute path. +-am_aux_dir=`cd "$ac_aux_dir" && pwd` +-]) +- +-# AM_CONDITIONAL -*- Autoconf -*- +- +-# Copyright (C) 1997-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# AM_CONDITIONAL(NAME, SHELL-CONDITION) +-# ------------------------------------- +-# Define a conditional. +-AC_DEFUN([AM_CONDITIONAL], +-[AC_PREREQ([2.52])dnl +- m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], +- [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl +-AC_SUBST([$1_TRUE])dnl +-AC_SUBST([$1_FALSE])dnl +-_AM_SUBST_NOTMAKE([$1_TRUE])dnl +-_AM_SUBST_NOTMAKE([$1_FALSE])dnl +-m4_define([_AM_COND_VALUE_$1], [$2])dnl +-if $2; then +- $1_TRUE= +- $1_FALSE='#' +-else +- $1_TRUE='#' +- $1_FALSE= +-fi +-AC_CONFIG_COMMANDS_PRE( +-[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then +- AC_MSG_ERROR([[conditional "$1" was never defined. +-Usually this means the macro was only invoked conditionally.]]) +-fi])]) +- +-# Copyright (C) 1999-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +- +-# There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be +-# written in clear, in which case automake, when reading aclocal.m4, +-# will think it sees a *use*, and therefore will trigger all it's +-# C support machinery. Also note that it means that autoscan, seeing +-# CC etc. in the Makefile, will ask for an AC_PROG_CC use... +- +- +-# _AM_DEPENDENCIES(NAME) +-# ---------------------- +-# See how the compiler implements dependency checking. +-# NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC". +-# We try a few techniques and use that to set a single cache variable. +-# +-# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was +-# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular +-# dependency, and given that the user is not expected to run this macro, +-# just rely on AC_PROG_CC. +-AC_DEFUN([_AM_DEPENDENCIES], +-[AC_REQUIRE([AM_SET_DEPDIR])dnl +-AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl +-AC_REQUIRE([AM_MAKE_INCLUDE])dnl +-AC_REQUIRE([AM_DEP_TRACK])dnl +- +-m4_if([$1], [CC], [depcc="$CC" am_compiler_list=], +- [$1], [CXX], [depcc="$CXX" am_compiler_list=], +- [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'], +- [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'], +- [$1], [UPC], [depcc="$UPC" am_compiler_list=], +- [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'], +- [depcc="$$1" am_compiler_list=]) +- +-AC_CACHE_CHECK([dependency style of $depcc], +- [am_cv_$1_dependencies_compiler_type], +-[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then +- # We make a subdir and do the tests there. Otherwise we can end up +- # making bogus files that we don't know about and never remove. For +- # instance it was reported that on HP-UX the gcc test will end up +- # making a dummy file named 'D' -- because '-MD' means "put the output +- # in D". +- rm -rf conftest.dir +- mkdir conftest.dir +- # Copy depcomp to subdir because otherwise we won't find it if we're +- # using a relative directory. +- cp "$am_depcomp" conftest.dir +- cd conftest.dir +- # We will build objects and dependencies in a subdirectory because +- # it helps to detect inapplicable dependency modes. For instance +- # both Tru64's cc and ICC support -MD to output dependencies as a +- # side effect of compilation, but ICC will put the dependencies in +- # the current directory while Tru64 will put them in the object +- # directory. +- mkdir sub +- +- am_cv_$1_dependencies_compiler_type=none +- if test "$am_compiler_list" = ""; then +- am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` +- fi +- am__universal=false +- m4_case([$1], [CC], +- [case " $depcc " in #( +- *\ -arch\ *\ -arch\ *) am__universal=true ;; +- esac], +- [CXX], +- [case " $depcc " in #( +- *\ -arch\ *\ -arch\ *) am__universal=true ;; +- esac]) +- +- for depmode in $am_compiler_list; do +- # Setup a source with many dependencies, because some compilers +- # like to wrap large dependency lists on column 80 (with \), and +- # we should not choose a depcomp mode which is confused by this. +- # +- # We need to recreate these files for each test, as the compiler may +- # overwrite some of them when testing with obscure command lines. +- # This happens at least with the AIX C compiler. +- : > sub/conftest.c +- for i in 1 2 3 4 5 6; do +- echo '#include "conftst'$i'.h"' >> sub/conftest.c +- # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with +- # Solaris 10 /bin/sh. +- echo '/* dummy */' > sub/conftst$i.h +- done +- echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf +- +- # We check with '-c' and '-o' for the sake of the "dashmstdout" +- # mode. It turns out that the SunPro C++ compiler does not properly +- # handle '-M -o', and we need to detect this. Also, some Intel +- # versions had trouble with output in subdirs. +- am__obj=sub/conftest.${OBJEXT-o} +- am__minus_obj="-o $am__obj" +- case $depmode in +- gcc) +- # This depmode causes a compiler race in universal mode. +- test "$am__universal" = false || continue +- ;; +- nosideeffect) +- # After this tag, mechanisms are not by side-effect, so they'll +- # only be used when explicitly requested. +- if test "x$enable_dependency_tracking" = xyes; then +- continue +- else +- break +- fi +- ;; +- msvc7 | msvc7msys | msvisualcpp | msvcmsys) +- # This compiler won't grok '-c -o', but also, the minuso test has +- # not run yet. These depmodes are late enough in the game, and +- # so weak that their functioning should not be impacted. +- am__obj=conftest.${OBJEXT-o} +- am__minus_obj= +- ;; +- none) break ;; +- esac +- if depmode=$depmode \ +- source=sub/conftest.c object=$am__obj \ +- depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ +- $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ +- >/dev/null 2>conftest.err && +- grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && +- grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && +- grep $am__obj sub/conftest.Po > /dev/null 2>&1 && +- ${MAKE-make} -s -f confmf > /dev/null 2>&1; then +- # icc doesn't choke on unknown options, it will just issue warnings +- # or remarks (even with -Werror). So we grep stderr for any message +- # that says an option was ignored or not supported. +- # When given -MP, icc 7.0 and 7.1 complain thusly: +- # icc: Command line warning: ignoring option '-M'; no argument required +- # The diagnosis changed in icc 8.0: +- # icc: Command line remark: option '-MP' not supported +- if (grep 'ignoring option' conftest.err || +- grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else +- am_cv_$1_dependencies_compiler_type=$depmode +- break +- fi +- fi +- done +- +- cd .. +- rm -rf conftest.dir +-else +- am_cv_$1_dependencies_compiler_type=none +-fi +-]) +-AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) +-AM_CONDITIONAL([am__fastdep$1], [ +- test "x$enable_dependency_tracking" != xno \ +- && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) +-]) +- +- +-# AM_SET_DEPDIR +-# ------------- +-# Choose a directory name for dependency files. +-# This macro is AC_REQUIREd in _AM_DEPENDENCIES. +-AC_DEFUN([AM_SET_DEPDIR], +-[AC_REQUIRE([AM_SET_LEADING_DOT])dnl +-AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl +-]) +- +- +-# AM_DEP_TRACK +-# ------------ +-AC_DEFUN([AM_DEP_TRACK], +-[AC_ARG_ENABLE([dependency-tracking], [dnl +-AS_HELP_STRING( +- [--enable-dependency-tracking], +- [do not reject slow dependency extractors]) +-AS_HELP_STRING( +- [--disable-dependency-tracking], +- [speeds up one-time build])]) +-if test "x$enable_dependency_tracking" != xno; then +- am_depcomp="$ac_aux_dir/depcomp" +- AMDEPBACKSLASH='\' +- am__nodep='_no' +-fi +-AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) +-AC_SUBST([AMDEPBACKSLASH])dnl +-_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl +-AC_SUBST([am__nodep])dnl +-_AM_SUBST_NOTMAKE([am__nodep])dnl +-]) +- +-# Generate code to set up dependency tracking. -*- Autoconf -*- +- +-# Copyright (C) 1999-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# _AM_OUTPUT_DEPENDENCY_COMMANDS +-# ------------------------------ +-AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], +-[{ +- # Older Autoconf quotes --file arguments for eval, but not when files +- # are listed without --file. Let's play safe and only enable the eval +- # if we detect the quoting. +- # TODO: see whether this extra hack can be removed once we start +- # requiring Autoconf 2.70 or later. +- AS_CASE([$CONFIG_FILES], +- [*\'*], [eval set x "$CONFIG_FILES"], +- [*], [set x $CONFIG_FILES]) +- shift +- # Used to flag and report bootstrapping failures. +- am_rc=0 +- for am_mf +- do +- # Strip MF so we end up with the name of the file. +- am_mf=`AS_ECHO(["$am_mf"]) | sed -e 's/:.*$//'` +- # Check whether this is an Automake generated Makefile which includes +- # dependency-tracking related rules and includes. +- # Grep'ing the whole file directly is not great: AIX grep has a line +- # limit of 2048, but all sed's we know have understand at least 4000. +- sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ +- || continue +- am_dirpart=`AS_DIRNAME(["$am_mf"])` +- am_filepart=`AS_BASENAME(["$am_mf"])` +- AM_RUN_LOG([cd "$am_dirpart" \ +- && sed -e '/# am--include-marker/d' "$am_filepart" \ +- | $MAKE -f - am--depfiles]) || am_rc=$? +- done +- if test $am_rc -ne 0; then +- AC_MSG_FAILURE([Something went wrong bootstrapping makefile fragments +- for automatic dependency tracking. If GNU make was not used, consider +- re-running the configure script with MAKE="gmake" (or whatever is +- necessary). You can also try re-running configure with the +- '--disable-dependency-tracking' option to at least be able to build +- the package (albeit without support for automatic dependency tracking).]) +- fi +- AS_UNSET([am_dirpart]) +- AS_UNSET([am_filepart]) +- AS_UNSET([am_mf]) +- AS_UNSET([am_rc]) +- rm -f conftest-deps.mk +-} +-])# _AM_OUTPUT_DEPENDENCY_COMMANDS +- +- +-# AM_OUTPUT_DEPENDENCY_COMMANDS +-# ----------------------------- +-# This macro should only be invoked once -- use via AC_REQUIRE. +-# +-# This code is only required when automatic dependency tracking is enabled. +-# This creates each '.Po' and '.Plo' makefile fragment that we'll need in +-# order to bootstrap the dependency handling code. +-AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], +-[AC_CONFIG_COMMANDS([depfiles], +- [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], +- [AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}"])]) +- +-# Do all the work for Automake. -*- Autoconf -*- +- +-# Copyright (C) 1996-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# This macro actually does too much. Some checks are only needed if +-# your package does certain things. But this isn't really a big deal. +- +-dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O. +-m4_define([AC_PROG_CC], +-m4_defn([AC_PROG_CC]) +-[_AM_PROG_CC_C_O +-]) +- +-# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) +-# AM_INIT_AUTOMAKE([OPTIONS]) +-# ----------------------------------------------- +-# The call with PACKAGE and VERSION arguments is the old style +-# call (pre autoconf-2.50), which is being phased out. PACKAGE +-# and VERSION should now be passed to AC_INIT and removed from +-# the call to AM_INIT_AUTOMAKE. +-# We support both call styles for the transition. After +-# the next Automake release, Autoconf can make the AC_INIT +-# arguments mandatory, and then we can depend on a new Autoconf +-# release and drop the old call support. +-AC_DEFUN([AM_INIT_AUTOMAKE], +-[AC_PREREQ([2.65])dnl +-m4_ifdef([_$0_ALREADY_INIT], +- [m4_fatal([$0 expanded multiple times +-]m4_defn([_$0_ALREADY_INIT]))], +- [m4_define([_$0_ALREADY_INIT], m4_expansion_stack)])dnl +-dnl Autoconf wants to disallow AM_ names. We explicitly allow +-dnl the ones we care about. +-m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl +-AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl +-AC_REQUIRE([AC_PROG_INSTALL])dnl +-if test "`cd $srcdir && pwd`" != "`pwd`"; then +- # Use -I$(srcdir) only when $(srcdir) != ., so that make's output +- # is not polluted with repeated "-I." +- AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl +- # test to see if srcdir already configured +- if test -f $srcdir/config.status; then +- AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) +- fi +-fi +- +-# test whether we have cygpath +-if test -z "$CYGPATH_W"; then +- if (cygpath --version) >/dev/null 2>/dev/null; then +- CYGPATH_W='cygpath -w' +- else +- CYGPATH_W=echo +- fi +-fi +-AC_SUBST([CYGPATH_W]) +- +-# Define the identity of the package. +-dnl Distinguish between old-style and new-style calls. +-m4_ifval([$2], +-[AC_DIAGNOSE([obsolete], +- [$0: two- and three-arguments forms are deprecated.]) +-m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl +- AC_SUBST([PACKAGE], [$1])dnl +- AC_SUBST([VERSION], [$2])], +-[_AM_SET_OPTIONS([$1])dnl +-dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. +-m4_if( +- m4_ifset([AC_PACKAGE_NAME], [ok]):m4_ifset([AC_PACKAGE_VERSION], [ok]), +- [ok:ok],, +- [m4_fatal([AC_INIT should be called with package and version arguments])])dnl +- AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl +- AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl +- +-_AM_IF_OPTION([no-define],, +-[AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package]) +- AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl +- +-# Some tools Automake needs. +-AC_REQUIRE([AM_SANITY_CHECK])dnl +-AC_REQUIRE([AC_ARG_PROGRAM])dnl +-AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}]) +-AM_MISSING_PROG([AUTOCONF], [autoconf]) +-AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}]) +-AM_MISSING_PROG([AUTOHEADER], [autoheader]) +-AM_MISSING_PROG([MAKEINFO], [makeinfo]) +-AC_REQUIRE([AM_PROG_INSTALL_SH])dnl +-AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl +-AC_REQUIRE([AC_PROG_MKDIR_P])dnl +-# For better backward compatibility. To be removed once Automake 1.9.x +-# dies out for good. For more background, see: +-# +-# +-AC_SUBST([mkdir_p], ['$(MKDIR_P)']) +-# We need awk for the "check" target (and possibly the TAP driver). The +-# system "awk" is bad on some platforms. +-AC_REQUIRE([AC_PROG_AWK])dnl +-AC_REQUIRE([AC_PROG_MAKE_SET])dnl +-AC_REQUIRE([AM_SET_LEADING_DOT])dnl +-_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], +- [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], +- [_AM_PROG_TAR([v7])])]) +-_AM_IF_OPTION([no-dependencies],, +-[AC_PROVIDE_IFELSE([AC_PROG_CC], +- [_AM_DEPENDENCIES([CC])], +- [m4_define([AC_PROG_CC], +- m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl +-AC_PROVIDE_IFELSE([AC_PROG_CXX], +- [_AM_DEPENDENCIES([CXX])], +- [m4_define([AC_PROG_CXX], +- m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl +-AC_PROVIDE_IFELSE([AC_PROG_OBJC], +- [_AM_DEPENDENCIES([OBJC])], +- [m4_define([AC_PROG_OBJC], +- m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl +-AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], +- [_AM_DEPENDENCIES([OBJCXX])], +- [m4_define([AC_PROG_OBJCXX], +- m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl +-]) +-# Variables for tags utilities; see am/tags.am +-if test -z "$CTAGS"; then +- CTAGS=ctags +-fi +-AC_SUBST([CTAGS]) +-if test -z "$ETAGS"; then +- ETAGS=etags +-fi +-AC_SUBST([ETAGS]) +-if test -z "$CSCOPE"; then +- CSCOPE=cscope +-fi +-AC_SUBST([CSCOPE]) +- +-AC_REQUIRE([AM_SILENT_RULES])dnl +-dnl The testsuite driver may need to know about EXEEXT, so add the +-dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This +-dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below. +-AC_CONFIG_COMMANDS_PRE(dnl +-[m4_provide_if([_AM_COMPILER_EXEEXT], +- [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl +- +-# POSIX will say in a future version that running "rm -f" with no argument +-# is OK; and we want to be able to make that assumption in our Makefile +-# recipes. So use an aggressive probe to check that the usage we want is +-# actually supported "in the wild" to an acceptable degree. +-# See automake bug#10828. +-# To make any issue more visible, cause the running configure to be aborted +-# by default if the 'rm' program in use doesn't match our expectations; the +-# user can still override this though. +-if rm -f && rm -fr && rm -rf; then : OK; else +- cat >&2 <<'END' +-Oops! +- +-Your 'rm' program seems unable to run without file operands specified +-on the command line, even when the '-f' option is present. This is contrary +-to the behaviour of most rm programs out there, and not conforming with +-the upcoming POSIX standard: +- +-Please tell bug-automake@gnu.org about your system, including the value +-of your $PATH and any error possibly output before this message. This +-can help us improve future automake versions. +- +-END +- if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then +- echo 'Configuration will proceed anyway, since you have set the' >&2 +- echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 +- echo >&2 +- else +- cat >&2 <<'END' +-Aborting the configuration process, to ensure you take notice of the issue. +- +-You can download and install GNU coreutils to get an 'rm' implementation +-that behaves properly: . +- +-If you want to complete the configuration process using your problematic +-'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM +-to "yes", and re-run configure. +- +-END +- AC_MSG_ERROR([Your 'rm' program is bad, sorry.]) +- fi +-fi +-dnl The trailing newline in this macro's definition is deliberate, for +-dnl backward compatibility and to allow trailing 'dnl'-style comments +-dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841. +-]) +- +-dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not +-dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further +-dnl mangled by Autoconf and run in a shell conditional statement. +-m4_define([_AC_COMPILER_EXEEXT], +-m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) +- +-# When config.status generates a header, we must update the stamp-h file. +-# This file resides in the same directory as the config header +-# that is generated. The stamp files are numbered to have different names. +- +-# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the +-# loop where config.status creates the headers, so we can generate +-# our stamp files there. +-AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], +-[# Compute $1's index in $config_headers. +-_am_arg=$1 +-_am_stamp_count=1 +-for _am_header in $config_headers :; do +- case $_am_header in +- $_am_arg | $_am_arg:* ) +- break ;; +- * ) +- _am_stamp_count=`expr $_am_stamp_count + 1` ;; +- esac +-done +-echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) +- +-# Copyright (C) 2001-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# AM_PROG_INSTALL_SH +-# ------------------ +-# Define $install_sh. +-AC_DEFUN([AM_PROG_INSTALL_SH], +-[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +-if test x"${install_sh+set}" != xset; then +- case $am_aux_dir in +- *\ * | *\ *) +- install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; +- *) +- install_sh="\${SHELL} $am_aux_dir/install-sh" +- esac +-fi +-AC_SUBST([install_sh])]) +- +-# Copyright (C) 2003-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# Check whether the underlying file-system supports filenames +-# with a leading dot. For instance MS-DOS doesn't. +-AC_DEFUN([AM_SET_LEADING_DOT], +-[rm -rf .tst 2>/dev/null +-mkdir .tst 2>/dev/null +-if test -d .tst; then +- am__leading_dot=. +-else +- am__leading_dot=_ +-fi +-rmdir .tst 2>/dev/null +-AC_SUBST([am__leading_dot])]) +- +-# Add --enable-maintainer-mode option to configure. -*- Autoconf -*- +-# From Jim Meyering +- +-# Copyright (C) 1996-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# AM_MAINTAINER_MODE([DEFAULT-MODE]) +-# ---------------------------------- +-# Control maintainer-specific portions of Makefiles. +-# Default is to disable them, unless 'enable' is passed literally. +-# For symmetry, 'disable' may be passed as well. Anyway, the user +-# can override the default with the --enable/--disable switch. +-AC_DEFUN([AM_MAINTAINER_MODE], +-[m4_case(m4_default([$1], [disable]), +- [enable], [m4_define([am_maintainer_other], [disable])], +- [disable], [m4_define([am_maintainer_other], [enable])], +- [m4_define([am_maintainer_other], [enable]) +- m4_warn([syntax], [unexpected argument to AM@&t@_MAINTAINER_MODE: $1])]) +-AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles]) +- dnl maintainer-mode's default is 'disable' unless 'enable' is passed +- AC_ARG_ENABLE([maintainer-mode], +- [AS_HELP_STRING([--]am_maintainer_other[-maintainer-mode], +- am_maintainer_other[ make rules and dependencies not useful +- (and sometimes confusing) to the casual installer])], +- [USE_MAINTAINER_MODE=$enableval], +- [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes])) +- AC_MSG_RESULT([$USE_MAINTAINER_MODE]) +- AM_CONDITIONAL([MAINTAINER_MODE], [test $USE_MAINTAINER_MODE = yes]) +- MAINT=$MAINTAINER_MODE_TRUE +- AC_SUBST([MAINT])dnl +-] +-) +- +-# Check to see how 'make' treats includes. -*- Autoconf -*- +- +-# Copyright (C) 2001-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# AM_MAKE_INCLUDE() +-# ----------------- +-# Check whether make has an 'include' directive that can support all +-# the idioms we need for our automatic dependency tracking code. +-AC_DEFUN([AM_MAKE_INCLUDE], +-[AC_MSG_CHECKING([whether ${MAKE-make} supports the include directive]) +-cat > confinc.mk << 'END' +-am__doit: +- @echo this is the am__doit target >confinc.out +-.PHONY: am__doit +-END +-am__include="#" +-am__quote= +-# BSD make does it like this. +-echo '.include "confinc.mk" # ignored' > confmf.BSD +-# Other make implementations (GNU, Solaris 10, AIX) do it like this. +-echo 'include confinc.mk # ignored' > confmf.GNU +-_am_result=no +-for s in GNU BSD; do +- AM_RUN_LOG([${MAKE-make} -f confmf.$s && cat confinc.out]) +- AS_CASE([$?:`cat confinc.out 2>/dev/null`], +- ['0:this is the am__doit target'], +- [AS_CASE([$s], +- [BSD], [am__include='.include' am__quote='"'], +- [am__include='include' am__quote=''])]) +- if test "$am__include" != "#"; then +- _am_result="yes ($s style)" +- break +- fi +-done +-rm -f confinc.* confmf.* +-AC_MSG_RESULT([${_am_result}]) +-AC_SUBST([am__include])]) +-AC_SUBST([am__quote])]) +- +-# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- +- +-# Copyright (C) 1997-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# AM_MISSING_PROG(NAME, PROGRAM) +-# ------------------------------ +-AC_DEFUN([AM_MISSING_PROG], +-[AC_REQUIRE([AM_MISSING_HAS_RUN]) +-$1=${$1-"${am_missing_run}$2"} +-AC_SUBST($1)]) +- +-# AM_MISSING_HAS_RUN +-# ------------------ +-# Define MISSING if not defined so far and test if it is modern enough. +-# If it is, set am_missing_run to use it, otherwise, to nothing. +-AC_DEFUN([AM_MISSING_HAS_RUN], +-[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +-AC_REQUIRE_AUX_FILE([missing])dnl +-if test x"${MISSING+set}" != xset; then +- MISSING="\${SHELL} '$am_aux_dir/missing'" +-fi +-# Use eval to expand $SHELL +-if eval "$MISSING --is-lightweight"; then +- am_missing_run="$MISSING " +-else +- am_missing_run= +- AC_MSG_WARN(['missing' script is too old or missing]) +-fi +-]) +- +-# Helper functions for option handling. -*- Autoconf -*- +- +-# Copyright (C) 2001-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# _AM_MANGLE_OPTION(NAME) +-# ----------------------- +-AC_DEFUN([_AM_MANGLE_OPTION], +-[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) +- +-# _AM_SET_OPTION(NAME) +-# -------------------- +-# Set option NAME. Presently that only means defining a flag for this option. +-AC_DEFUN([_AM_SET_OPTION], +-[m4_define(_AM_MANGLE_OPTION([$1]), [1])]) +- +-# _AM_SET_OPTIONS(OPTIONS) +-# ------------------------ +-# OPTIONS is a space-separated list of Automake options. +-AC_DEFUN([_AM_SET_OPTIONS], +-[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) +- +-# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) +-# ------------------------------------------- +-# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +-AC_DEFUN([_AM_IF_OPTION], +-[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) +- +-# Copyright (C) 1999-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# _AM_PROG_CC_C_O +-# --------------- +-# Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC +-# to automatically call this. +-AC_DEFUN([_AM_PROG_CC_C_O], +-[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +-AC_REQUIRE_AUX_FILE([compile])dnl +-AC_LANG_PUSH([C])dnl +-AC_CACHE_CHECK( +- [whether $CC understands -c and -o together], +- [am_cv_prog_cc_c_o], +- [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])]) +- # Make sure it works both with $CC and with simple cc. +- # Following AC_PROG_CC_C_O, we do the test twice because some +- # compilers refuse to overwrite an existing .o file with -o, +- # though they will create one. +- am_cv_prog_cc_c_o=yes +- for am_i in 1 2; do +- if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \ +- && test -f conftest2.$ac_objext; then +- : OK +- else +- am_cv_prog_cc_c_o=no +- break +- fi +- done +- rm -f core conftest* +- unset am_i]) +-if test "$am_cv_prog_cc_c_o" != yes; then +- # Losing compiler, so override with the script. +- # FIXME: It is wrong to rewrite CC. +- # But if we don't then we get into trouble of one sort or another. +- # A longer-term fix would be to have automake use am__CC in this case, +- # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" +- CC="$am_aux_dir/compile $CC" +-fi +-AC_LANG_POP([C])]) +- +-# For backward compatibility. +-AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])]) +- +-# Copyright (C) 2001-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# AM_RUN_LOG(COMMAND) +-# ------------------- +-# Run COMMAND, save the exit status in ac_status, and log it. +-# (This has been adapted from Autoconf's _AC_RUN_LOG macro.) +-AC_DEFUN([AM_RUN_LOG], +-[{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD +- ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD +- ac_status=$? +- echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD +- (exit $ac_status); }]) +- +-# Check to make sure that the build environment is sane. -*- Autoconf -*- +- +-# Copyright (C) 1996-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# AM_SANITY_CHECK +-# --------------- +-AC_DEFUN([AM_SANITY_CHECK], +-[AC_MSG_CHECKING([whether build environment is sane]) +-# Reject unsafe characters in $srcdir or the absolute working directory +-# name. Accept space and tab only in the latter. +-am_lf=' +-' +-case `pwd` in +- *[[\\\"\#\$\&\'\`$am_lf]]*) +- AC_MSG_ERROR([unsafe absolute working directory name]);; +-esac +-case $srcdir in +- *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) +- AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);; +-esac +- +-# Do 'set' in a subshell so we don't clobber the current shell's +-# arguments. Must try -L first in case configure is actually a +-# symlink; some systems play weird games with the mod time of symlinks +-# (eg FreeBSD returns the mod time of the symlink's containing +-# directory). +-if ( +- am_has_slept=no +- for am_try in 1 2; do +- echo "timestamp, slept: $am_has_slept" > conftest.file +- set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` +- if test "$[*]" = "X"; then +- # -L didn't work. +- set X `ls -t "$srcdir/configure" conftest.file` +- fi +- if test "$[*]" != "X $srcdir/configure conftest.file" \ +- && test "$[*]" != "X conftest.file $srcdir/configure"; then +- +- # If neither matched, then we have a broken ls. This can happen +- # if, for instance, CONFIG_SHELL is bash and it inherits a +- # broken ls alias from the environment. This has actually +- # happened. Such a system could not be considered "sane". +- AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken +- alias in your environment]) +- fi +- if test "$[2]" = conftest.file || test $am_try -eq 2; then +- break +- fi +- # Just in case. +- sleep 1 +- am_has_slept=yes +- done +- test "$[2]" = conftest.file +- ) +-then +- # Ok. +- : +-else +- AC_MSG_ERROR([newly created file is older than distributed files! +-Check your system clock]) +-fi +-AC_MSG_RESULT([yes]) +-# If we didn't sleep, we still need to ensure time stamps of config.status and +-# generated files are strictly newer. +-am_sleep_pid= +-if grep 'slept: no' conftest.file >/dev/null 2>&1; then +- ( sleep 1 ) & +- am_sleep_pid=$! +-fi +-AC_CONFIG_COMMANDS_PRE( +- [AC_MSG_CHECKING([that generated files are newer than configure]) +- if test -n "$am_sleep_pid"; then +- # Hide warnings about reused PIDs. +- wait $am_sleep_pid 2>/dev/null +- fi +- AC_MSG_RESULT([done])]) +-rm -f conftest.file +-]) +- +-# Copyright (C) 2009-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# AM_SILENT_RULES([DEFAULT]) +-# -------------------------- +-# Enable less verbose build rules; with the default set to DEFAULT +-# ("yes" being less verbose, "no" or empty being verbose). +-AC_DEFUN([AM_SILENT_RULES], +-[AC_ARG_ENABLE([silent-rules], [dnl +-AS_HELP_STRING( +- [--enable-silent-rules], +- [less verbose build output (undo: "make V=1")]) +-AS_HELP_STRING( +- [--disable-silent-rules], +- [verbose build output (undo: "make V=0")])dnl +-]) +-case $enable_silent_rules in @%:@ ((( +- yes) AM_DEFAULT_VERBOSITY=0;; +- no) AM_DEFAULT_VERBOSITY=1;; +- *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);; +-esac +-dnl +-dnl A few 'make' implementations (e.g., NonStop OS and NextStep) +-dnl do not support nested variable expansions. +-dnl See automake bug#9928 and bug#10237. +-am_make=${MAKE-make} +-AC_CACHE_CHECK([whether $am_make supports nested variables], +- [am_cv_make_support_nested_variables], +- [if AS_ECHO([['TRUE=$(BAR$(V)) +-BAR0=false +-BAR1=true +-V=1 +-am__doit: +- @$(TRUE) +-.PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then +- am_cv_make_support_nested_variables=yes +-else +- am_cv_make_support_nested_variables=no +-fi]) +-if test $am_cv_make_support_nested_variables = yes; then +- dnl Using '$V' instead of '$(V)' breaks IRIX make. +- AM_V='$(V)' +- AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +-else +- AM_V=$AM_DEFAULT_VERBOSITY +- AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY +-fi +-AC_SUBST([AM_V])dnl +-AM_SUBST_NOTMAKE([AM_V])dnl +-AC_SUBST([AM_DEFAULT_V])dnl +-AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl +-AC_SUBST([AM_DEFAULT_VERBOSITY])dnl +-AM_BACKSLASH='\' +-AC_SUBST([AM_BACKSLASH])dnl +-_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl +-]) +- +-# Copyright (C) 2001-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# AM_PROG_INSTALL_STRIP +-# --------------------- +-# One issue with vendor 'install' (even GNU) is that you can't +-# specify the program used to strip binaries. This is especially +-# annoying in cross-compiling environments, where the build's strip +-# is unlikely to handle the host's binaries. +-# Fortunately install-sh will honor a STRIPPROG variable, so we +-# always use install-sh in "make install-strip", and initialize +-# STRIPPROG with the value of the STRIP variable (set by the user). +-AC_DEFUN([AM_PROG_INSTALL_STRIP], +-[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl +-# Installed binaries are usually stripped using 'strip' when the user +-# run "make install-strip". However 'strip' might not be the right +-# tool to use in cross-compilation environments, therefore Automake +-# will honor the 'STRIP' environment variable to overrule this program. +-dnl Don't test for $cross_compiling = yes, because it might be 'maybe'. +-if test "$cross_compiling" != no; then +- AC_CHECK_TOOL([STRIP], [strip], :) +-fi +-INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" +-AC_SUBST([INSTALL_STRIP_PROGRAM])]) +- +-# Copyright (C) 2006-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# _AM_SUBST_NOTMAKE(VARIABLE) +-# --------------------------- +-# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. +-# This macro is traced by Automake. +-AC_DEFUN([_AM_SUBST_NOTMAKE]) +- +-# AM_SUBST_NOTMAKE(VARIABLE) +-# -------------------------- +-# Public sister of _AM_SUBST_NOTMAKE. +-AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) +- +-# Check how to create a tarball. -*- Autoconf -*- +- +-# Copyright (C) 2004-2021 Free Software Foundation, Inc. +-# +-# This file is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# _AM_PROG_TAR(FORMAT) +-# -------------------- +-# Check how to create a tarball in format FORMAT. +-# FORMAT should be one of 'v7', 'ustar', or 'pax'. +-# +-# Substitute a variable $(am__tar) that is a command +-# writing to stdout a FORMAT-tarball containing the directory +-# $tardir. +-# tardir=directory && $(am__tar) > result.tar +-# +-# Substitute a variable $(am__untar) that extract such +-# a tarball read from stdin. +-# $(am__untar) < result.tar +-# +-AC_DEFUN([_AM_PROG_TAR], +-[# Always define AMTAR for backward compatibility. Yes, it's still used +-# in the wild :-( We should find a proper way to deprecate it ... +-AC_SUBST([AMTAR], ['$${TAR-tar}']) +- +-# We'll loop over all known methods to create a tar archive until one works. +-_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' +- +-m4_if([$1], [v7], +- [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], +- +- [m4_case([$1], +- [ustar], +- [# The POSIX 1988 'ustar' format is defined with fixed-size fields. +- # There is notably a 21 bits limit for the UID and the GID. In fact, +- # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343 +- # and bug#13588). +- am_max_uid=2097151 # 2^21 - 1 +- am_max_gid=$am_max_uid +- # The $UID and $GID variables are not portable, so we need to resort +- # to the POSIX-mandated id(1) utility. Errors in the 'id' calls +- # below are definitely unexpected, so allow the users to see them +- # (that is, avoid stderr redirection). +- am_uid=`id -u || echo unknown` +- am_gid=`id -g || echo unknown` +- AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format]) +- if test $am_uid -le $am_max_uid; then +- AC_MSG_RESULT([yes]) +- else +- AC_MSG_RESULT([no]) +- _am_tools=none +- fi +- AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format]) +- if test $am_gid -le $am_max_gid; then +- AC_MSG_RESULT([yes]) +- else +- AC_MSG_RESULT([no]) +- _am_tools=none +- fi], +- +- [pax], +- [], +- +- [m4_fatal([Unknown tar format])]) +- +- AC_MSG_CHECKING([how to create a $1 tar archive]) +- +- # Go ahead even if we have the value already cached. We do so because we +- # need to set the values for the 'am__tar' and 'am__untar' variables. +- _am_tools=${am_cv_prog_tar_$1-$_am_tools} +- +- for _am_tool in $_am_tools; do +- case $_am_tool in +- gnutar) +- for _am_tar in tar gnutar gtar; do +- AM_RUN_LOG([$_am_tar --version]) && break +- done +- am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' +- am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' +- am__untar="$_am_tar -xf -" +- ;; +- plaintar) +- # Must skip GNU tar: if it does not support --format= it doesn't create +- # ustar tarball either. +- (tar --version) >/dev/null 2>&1 && continue +- am__tar='tar chf - "$$tardir"' +- am__tar_='tar chf - "$tardir"' +- am__untar='tar xf -' +- ;; +- pax) +- am__tar='pax -L -x $1 -w "$$tardir"' +- am__tar_='pax -L -x $1 -w "$tardir"' +- am__untar='pax -r' +- ;; +- cpio) +- am__tar='find "$$tardir" -print | cpio -o -H $1 -L' +- am__tar_='find "$tardir" -print | cpio -o -H $1 -L' +- am__untar='cpio -i -H $1 -d' +- ;; +- none) +- am__tar=false +- am__tar_=false +- am__untar=false +- ;; +- esac +- +- # If the value was cached, stop now. We just wanted to have am__tar +- # and am__untar set. +- test -n "${am_cv_prog_tar_$1}" && break +- +- # tar/untar a dummy directory, and stop if the command works. +- rm -rf conftest.dir +- mkdir conftest.dir +- echo GrepMe > conftest.dir/file +- AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) +- rm -rf conftest.dir +- if test -s conftest.tar; then +- AM_RUN_LOG([$am__untar /dev/null 2>&1 && break +- fi +- done +- rm -rf conftest.dir +- +- AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) +- AC_MSG_RESULT([$am_cv_prog_tar_$1])]) +- +-AC_SUBST([am__tar]) +-AC_SUBST([am__untar]) +-]) # _AM_PROG_TAR +- +diff '--color=auto' -ru --new-file valgrind-3.21.0/autogen.sh valgrind-riscv64/autogen.sh +--- valgrind-3.21.0/autogen.sh 2023-04-28 19:19:01.000000000 +0800 ++++ valgrind-riscv64/autogen.sh 2022-09-21 06:23:46.000000000 +0800 +@@ -15,11 +15,3 @@ + run autoheader + run automake -a + run autoconf +- +-# Valgrind-specific Git configuration, if appropriate. +-if git rev-parse --is-inside-work-tree > /dev/null 2>&1 ; then +- echo "running: git configuration" +- git config blame.ignoreRevsFile .git-blame-ignore-revs +-else +- echo "skipping: git configuration" +-fi +diff '--color=auto' -ru --new-file valgrind-3.21.0/auxprogs/build-gcc valgrind-riscv64/auxprogs/build-gcc +--- valgrind-3.21.0/auxprogs/build-gcc 1970-01-01 08:00:00.000000000 +0800 ++++ valgrind-riscv64/auxprogs/build-gcc 2022-09-21 06:23:46.000000000 +0800 +@@ -0,0 +1,55 @@ ++#!/bin/sh -e ++ ++# Simple script to build GCC natively including its prerequisites. ++# ++# Depending on your needs you maybe able to speed up the GCC build: ++# ++# (a) Do not build a c++ compiler ++# c++ is only needed for "make check" and running regression tests ++# --> choose LANGUEGES=c below ++# (b) Do not build a compiler that can produce 32-bit executables ++# on a 64-bit platform ++# --> choose MULTILIB=--disable-multilib below ++# ++# Define the following 5 variables: ++ ++BUILD_DIR=/tmp/build-gcc ++INSTALL_DIR=/tmp/install ++ ++GCC_VERSION=5.1.0 ++LANGUAGES=c,c++ ++MULTILIB= ++#LANGUAGES=c ++#MULTILIB=--disable-multilib ++ ++#----------------------------------------------------------- ++# No changes should be needed below this line ++#----------------------------------------------------------- ++ ++# Create build directory ++echo "...creating build directory $BUILD_DIR" ++mkdir -p $BUILD_DIR ++cd $BUILD_DIR ++ ++# Download tarballs ++echo "...downloading tarball" ++wget ftp://ftp.gnu.org/gnu/gcc/gcc-$GCC_VERSION/gcc-$GCC_VERSION.tar.bz2 ++ ++# Build GCC ++echo "...building GCC" ++rm -rf gcc-$GCC_VERSION ++tar xf gcc-$GCC_VERSION.tar.bz2 ++cd gcc-$GCC_VERSION ++./contrib/download_prerequisites ++cd .. ++rm -rf objdir ++mkdir objdir ++cd objdir ++../gcc-$GCC_VERSION/configure --prefix=$INSTALL_DIR --disable-bootstrap \ ++ $MULTILIB --enable-languages=$LANGUAGES 2>&1 > gcc-config.log ++make -s 2>&1 > gcc-make.log ++make -s install 2>&1 > gcc-install.log ++mv gcc-config.log gcc-make.log gcc-install.log .. ++ ++# done ++echo "...done" +diff '--color=auto' -ru --new-file valgrind-3.21.0/auxprogs/compare-build-logs valgrind-riscv64/auxprogs/compare-build-logs +--- valgrind-3.21.0/auxprogs/compare-build-logs 1970-01-01 08:00:00.000000000 +0800 ++++ valgrind-riscv64/auxprogs/compare-build-logs 2022-09-21 06:23:46.000000000 +0800 +@@ -0,0 +1,196 @@ ++#!/usr/bin/env perl ++ ++# Lame script to compare two build logs. ++# ++# The script intercepts directory changes and compiler invocations and ++# compares the compiler invocations for equality. Equality is defined ++# as "same options in both invocations ignoring order". So we only test ++# a necessary condition. ++# ++# Both builds must be configured with the same --prefix. Otherwise, ++# the value of -DVG_LIBDIR= will differ. They also need to use the same ++# compiler. ++ ++use Getopt::Long; ++use strict; ++use warnings; ++ ++my $prog_name = "compare-build-logs"; ++ ++my $usage=< sub { $compiler = "gcc" }, ++ "clang" => sub { $compiler = "clang" }, ++ "v" => sub { $verbose = 1 }, ++ ) || die $usage; ++ ++my $num_arg = $#ARGV + 1; ++ ++if ($num_arg != 2) { ++ die $usage; ++} ++ ++my $log1 = $ARGV[0]; ++my $log2 = $ARGV[1]; ++ ++# Hashes: relative filename -> compiler invocation ++my %cmd1 = read_log_file($log1); ++my %cmd2 = read_log_file($log2); ++ ++# Compare the log files ++ ++foreach my $file (keys %cmd1) { ++ if (! $cmd2{$file}) { ++ print "*** $file missing in $log2\n"; ++ } else { ++ compare_invocations($file, $cmd1{$file }, $cmd2{$file}); ++ } ++} ++foreach my $file (keys %cmd2) { ++ if (! $cmd1{$file}) { ++ print "*** $file missing in $log1\n"; ++ } ++} ++ ++exit 0; ++ ++# Compare two lines |c1| and |c2| which are compiler invocations for |file|. ++# Basically, we look at a compiler invocation as a sequence of blank-separated ++# options. ++sub compare_invocations { ++ my ($file, $c1, $c2) = @_; ++ my ($found1, $found2); ++# print "COMPARE $file\n"; ++ ++# Remove stuff that has embedded spaces ++ ++# Remove: `test -f 'whatever.c' || echo './'` ++ $c1 =~ s|(`[^`]*`)||; ++ $found1 = $1; ++ $c2 =~ s|(`[^`]*`)||; ++ $found2 = $1; ++ if ($found1 && $found2) { ++ die if ($found1 ne $found2); ++ } ++ ++# Remove: -o whatever ++ $c1 =~ s|-o[ ][ ]*([^ ][^ ]*)||; ++ $found1 = $1; ++ $c2 =~ s|-o[ ][ ]*([^ ][^ ]*)||; ++ $found2 = $1; ++ if ($found1 && $found2) { ++ die if ($found1 ne $found2); ++ } ++ ++# The remaining lines are considered to be blank-separated options and file ++# names. They should be identical in both invocations (but in any order). ++ my %o1 = (); ++ my %o2 = (); ++ foreach my $k (split /\s+/,$c1) { ++ $o1{$k} = 1; ++ } ++ foreach my $k (split /\s+/,$c2) { ++ $o2{$k} = 1; ++ } ++# foreach my $zz (keys %o1) { ++# print "$zz\n"; ++# } ++ foreach my $k (keys %o1) { ++ if (! $o2{$k}) { ++ print "*** '$k' is missing in compilation of '$file' in $log2\n"; ++ } ++ } ++ foreach my $k (keys %o2) { ++ if (! $o1{$k}) { ++ print "*** '$k' is missing in compilation of '$file' in $log1\n"; ++ } ++ } ++} ++ ++# Read a compiler log file. ++# Return hash: relative (to build root) file name -> compiler invocation ++sub read_log_file ++{ ++ my ($log) = @_; ++ my $dir = ""; ++ my $root = ""; ++ my %cmd = (); ++ ++ print "...reading $log\n" if ($verbose); ++ ++ open(LOG, "$log") || die "cannot open $log\n"; ++ while (my $line = ) { ++ chomp $line; ++ if ($line =~ /^make/) { ++ if ($line =~ /Entering directory/) { ++ $dir = $line; ++ $dir =~ s/^.*`//; ++ $dir =~ s/'.*//; ++ if ($root eq "") { ++ $root = $dir; ++ # Append a slash if not present ++ $root = "$root/" if (! ($root =~ /\/$/)); ++ print "...build root is $root\n" if ($verbose); ++ } ++# print "DIR = $dir\n"; ++ next; ++ } ++ } ++ if ($line =~ /^\s*[^\s]*$compiler\s/) { ++ # If line ends in \ read continuation line. ++ while ($line =~ /\\$/) { ++ my $next = ; ++ chomp($next); ++ $line =~ s/\\$//; ++ $line .= $next; ++ } ++ ++ my $file = extract_file($line); ++ $file = "$dir/$file"; # make absolute ++ $file =~ s/$root//; # remove build root ++# print "FILE $file\n"; ++ $cmd{"$file"} = $line; ++ } ++ } ++ close(LOG); ++ ++ my $num_invocations = int(keys %cmd); ++ ++ if ($num_invocations == 0) { ++ print "*** File $log does not contain any compiler invocations\n"; ++ } else { ++ print "...found $num_invocations invocations\n" if ($verbose); ++ } ++ return %cmd; ++} ++ ++# Extract a file name from the command line. Assume there is a -o filename ++# present. If not, issue a warning. ++# ++sub extract_file { ++ my($line) = @_; ++# Look for -o executable ++ if ($line =~ /-o[ ][ ]*([^ ][^ ]*)/) { ++ return $1; ++ } else { ++ print "*** Could not extract file name from $line\n"; ++ return "UNKNOWN"; ++ } ++} +diff '--color=auto' -ru --new-file valgrind-3.21.0/auxprogs/Makefile.am valgrind-riscv64/auxprogs/Makefile.am +--- valgrind-3.21.0/auxprogs/Makefile.am 2023-03-22 17:10:13.000000000 +0800 ++++ valgrind-riscv64/auxprogs/Makefile.am 2022-09-21 06:23:46.000000000 +0800 +@@ -169,7 +169,7 @@ + # We need to autoreconf to make sure to get config.guess, config.sub + # and libtool for newer architectures. + $(GSL_SRC_DIR)/gsl-patched: $(GSL_TAR) +- echo "$(GSL_SHA256_SUM) $(GSL_TAR)" | @SHA256SUM@ --check - ++ echo "$(GSL_SHA256_SUM) $(GSL_TAR)" | sha256sum --check - + (cd $(AUX_CHECK_DIR) && \ + tar zxf $(GSL_TAR_NAME) && \ + cd $(GSL_DIR_NAME) && \ +@@ -183,9 +183,9 @@ + $(GSL_BUILD_DIR)/gsl-build: $(GSL_SRC_DIR)/gsl-patched + mkdir -p $(GSL_BUILD_DIR) + (cd $(GSL_BUILD_DIR) && \ +- $(GSL_SRC_DIR)/configure CC="${CC}" CXX="${CXX}" CFLAGS="$(GSL_CFLAGS)" && \ +- ${MAKE} -j $(nproc) && \ +- ${MAKE} check -k || true) ++ $(GSL_SRC_DIR)/configure CFLAGS="$(GSL_CFLAGS)" && \ ++ make -j $(nproc) && \ ++ make check -k || true) + touch $@ + + # We hope all tests PASS (so don't produce output except for the test names). +diff '--color=auto' -ru --new-file valgrind-3.21.0/auxprogs/Makefile.in valgrind-riscv64/auxprogs/Makefile.in +--- valgrind-3.21.0/auxprogs/Makefile.in 2023-04-28 23:38:34.000000000 +0800 ++++ valgrind-riscv64/auxprogs/Makefile.in 1970-01-01 08:00:00.000000000 +0800 +@@ -1,1295 +0,0 @@ +-# Makefile.in generated by automake 1.16.5 from Makefile.am. +-# @configure_input@ +- +-# Copyright (C) 1994-2021 Free Software Foundation, Inc. +- +-# This Makefile.in is free software; the Free Software Foundation +-# gives unlimited permission to copy and/or distribute it, +-# with or without modifications, as long as this notice is preserved. +- +-# This program is distributed in the hope that it will be useful, +-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +-# PARTICULAR PURPOSE. +- +-@SET_MAKE@ +- +-# This file should be included (directly or indirectly) by every +-# Makefile.am that builds programs. And also the top-level Makefile.am. +- +-#---------------------------------------------------------------------------- +-# Global stuff +-#---------------------------------------------------------------------------- +- +- +-VPATH = @srcdir@ +-am__is_gnu_make = { \ +- if test -z '$(MAKELEVEL)'; then \ +- false; \ +- elif test -n '$(MAKE_HOST)'; then \ +- true; \ +- elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ +- true; \ +- else \ +- false; \ +- fi; \ +-} +-am__make_running_with_option = \ +- case $${target_option-} in \ +- ?) ;; \ +- *) echo "am__make_running_with_option: internal error: invalid" \ +- "target option '$${target_option-}' specified" >&2; \ +- exit 1;; \ +- esac; \ +- has_opt=no; \ +- sane_makeflags=$$MAKEFLAGS; \ +- if $(am__is_gnu_make); then \ +- sane_makeflags=$$MFLAGS; \ +- else \ +- case $$MAKEFLAGS in \ +- *\\[\ \ ]*) \ +- bs=\\; \ +- sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ +- | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ +- esac; \ +- fi; \ +- skip_next=no; \ +- strip_trailopt () \ +- { \ +- flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ +- }; \ +- for flg in $$sane_makeflags; do \ +- test $$skip_next = yes && { skip_next=no; continue; }; \ +- case $$flg in \ +- *=*|--*) continue;; \ +- -*I) strip_trailopt 'I'; skip_next=yes;; \ +- -*I?*) strip_trailopt 'I';; \ +- -*O) strip_trailopt 'O'; skip_next=yes;; \ +- -*O?*) strip_trailopt 'O';; \ +- -*l) strip_trailopt 'l'; skip_next=yes;; \ +- -*l?*) strip_trailopt 'l';; \ +- -[dEDm]) skip_next=yes;; \ +- -[JT]) skip_next=yes;; \ +- esac; \ +- case $$flg in \ +- *$$target_option*) has_opt=yes; break;; \ +- esac; \ +- done; \ +- test $$has_opt = yes +-am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +-am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +-pkgdatadir = $(datadir)/@PACKAGE@ +-pkgincludedir = $(includedir)/@PACKAGE@ +-pkglibdir = $(libdir)/@PACKAGE@ +-pkglibexecdir = $(libexecdir)/@PACKAGE@ +-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +-install_sh_DATA = $(install_sh) -c -m 644 +-install_sh_PROGRAM = $(install_sh) -c +-install_sh_SCRIPT = $(install_sh) -c +-INSTALL_HEADER = $(INSTALL_DATA) +-transform = $(program_transform_name) +-NORMAL_INSTALL = : +-PRE_INSTALL = : +-POST_INSTALL = : +-NORMAL_UNINSTALL = : +-PRE_UNINSTALL = : +-POST_UNINSTALL = : +-build_triplet = @build@ +-host_triplet = @host@ +-@COMPILER_IS_CLANG_TRUE@am__append_1 = -Wno-cast-align -Wno-self-assign \ +-@COMPILER_IS_CLANG_TRUE@ -Wno-tautological-compare +- +-@COMPILER_IS_CLANG_TRUE@@VGCONF_OS_IS_DARWIN_FALSE@@VGCONF_OS_IS_FREEBSD_TRUE@am__append_2 = @FLAG_W_NO_EXPANSION_TO_DEFINED@ +-@VGCONF_HAVE_ABI_TRUE@am__append_3 = -DVGABI_@VGCONF_ABI@ +-@VGCONF_HAVE_ABI_TRUE@@VGCONF_HAVE_PLATFORM_SEC_TRUE@am__append_4 = -DVGABI_@VGCONF_ABI@ +-@SOLARIS_XPG_SYMBOLS_PRESENT_TRUE@am__append_5 = -Wl,-M,$(top_srcdir)/solaris/vgpreload-solaris.mapfile +- +-# The Android toolchain includes all kinds of stdlib helpers present in +-# bionic which is bad because we are not linking with it and the Android +-# linker will panic. +-@VGCONF_PLATVARIANT_IS_ANDROID_TRUE@am__append_6 = -nostdlib +-bin_PROGRAMS = valgrind-listener$(EXEEXT) valgrind-di-server$(EXEEXT) +-@VGCONF_PLATVARIANT_IS_ANDROID_TRUE@am__append_7 = -static +-# If there is no secondary platform, and the platforms include x86-darwin, +-# then the primary platform must be x86-darwin. Hence: +-@VGCONF_HAVE_PLATFORM_SEC_FALSE@@VGCONF_PLATFORMS_INCLUDE_X86_DARWIN_TRUE@am__append_8 = -Wl,-read_only_relocs -Wl,suppress +-@VGCONF_PLATVARIANT_IS_ANDROID_TRUE@am__append_9 = -static +-# If there is no secondary platform, and the platforms include x86-darwin, +-# then the primary platform must be x86-darwin. Hence: +-@VGCONF_HAVE_PLATFORM_SEC_FALSE@@VGCONF_PLATFORMS_INCLUDE_X86_DARWIN_TRUE@am__append_10 = -Wl,-read_only_relocs -Wl,suppress +-noinst_PROGRAMS = getoff-@VGCONF_ARCH_PRI@-@VGCONF_OS@$(EXEEXT) \ +- $(am__EXEEXT_1) +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@am__append_11 = getoff-@VGCONF_ARCH_SEC@-@VGCONF_OS@ +-# If there is no secondary platform, and the platforms include x86-darwin, +-# then the primary platform must be x86-darwin. Hence: +-@VGCONF_HAVE_PLATFORM_SEC_FALSE@@VGCONF_PLATFORMS_INCLUDE_X86_DARWIN_TRUE@am__append_12 = -Wl,-read_only_relocs -Wl,suppress +-# If there is a secondary platform, and the platforms include x86-darwin, +-# then the primary platform must be amd64-darwin and the secondary platform +-# must be x86-darwin. Hence: +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@@VGCONF_PLATFORMS_INCLUDE_X86_DARWIN_TRUE@am__append_13 = -Wl,-read_only_relocs -Wl,suppress +-# i386 needs sse to get rounding for floating point correct. +-# But we only want this if the primary isn't AMD64 +-@VGCONF_ARCHS_INCLUDE_AMD64_FALSE@@VGCONF_ARCHS_INCLUDE_X86_TRUE@am__append_14 = -mfpmath=sse -msse2 +-subdir = auxprogs +-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +-am__aclocal_m4_deps = $(top_srcdir)/configure.ac +-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ +- $(ACLOCAL_M4) +-DIST_COMMON = $(srcdir)/Makefile.am $(dist_noinst_SCRIPTS) \ +- $(am__DIST_COMMON) +-mkinstalldirs = $(install_sh) -d +-CONFIG_HEADER = $(top_builddir)/config.h +-CONFIG_CLEAN_FILES = +-CONFIG_CLEAN_VPATH_FILES = +-am__installdirs = "$(DESTDIR)$(bindir)" +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@am__EXEEXT_1 = getoff-@VGCONF_ARCH_SEC@-@VGCONF_OS@$(EXEEXT) +-PROGRAMS = $(bin_PROGRAMS) $(noinst_PROGRAMS) +-am_getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_OBJECTS = \ +- getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.$(OBJEXT) +-getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_OBJECTS = \ +- $(am_getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_OBJECTS) +-getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_DEPENDENCIES = +-getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_LINK = $(CCLD) \ +- $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_CFLAGS) $(CFLAGS) \ +- $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_LDFLAGS) $(LDFLAGS) -o \ +- $@ +-am__getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_SOURCES_DIST = getoff.c +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@am_getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_OBJECTS = getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.$(OBJEXT) +-getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_OBJECTS = \ +- $(am_getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_OBJECTS) +-getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_DEPENDENCIES = +-getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_LINK = $(CCLD) \ +- $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_CFLAGS) $(CFLAGS) \ +- $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_LDFLAGS) $(LDFLAGS) -o \ +- $@ +-am_valgrind_di_server_OBJECTS = \ +- valgrind_di_server-valgrind-di-server.$(OBJEXT) +-valgrind_di_server_OBJECTS = $(am_valgrind_di_server_OBJECTS) +-valgrind_di_server_DEPENDENCIES = +-valgrind_di_server_LINK = $(CCLD) $(valgrind_di_server_CFLAGS) \ +- $(CFLAGS) $(valgrind_di_server_LDFLAGS) $(LDFLAGS) -o $@ +-am_valgrind_listener_OBJECTS = \ +- valgrind_listener-valgrind-listener.$(OBJEXT) +-valgrind_listener_OBJECTS = $(am_valgrind_listener_OBJECTS) +-valgrind_listener_DEPENDENCIES = +-valgrind_listener_LINK = $(CCLD) $(valgrind_listener_CFLAGS) $(CFLAGS) \ +- $(valgrind_listener_LDFLAGS) $(LDFLAGS) -o $@ +-SCRIPTS = $(dist_noinst_SCRIPTS) +-AM_V_P = $(am__v_P_@AM_V@) +-am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +-am__v_P_0 = false +-am__v_P_1 = : +-AM_V_GEN = $(am__v_GEN_@AM_V@) +-am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +-am__v_GEN_0 = @echo " GEN " $@; +-am__v_GEN_1 = +-AM_V_at = $(am__v_at_@AM_V@) +-am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +-am__v_at_0 = @ +-am__v_at_1 = +-DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) +-depcomp = $(SHELL) $(top_srcdir)/depcomp +-am__maybe_remake_depfiles = depfiles +-am__depfiles_remade = \ +- ./$(DEPDIR)/getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.Po \ +- ./$(DEPDIR)/getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.Po \ +- ./$(DEPDIR)/valgrind_di_server-valgrind-di-server.Po \ +- ./$(DEPDIR)/valgrind_listener-valgrind-listener.Po +-am__mv = mv -f +-AM_V_lt = $(am__v_lt_@AM_V@) +-am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +-am__v_lt_0 = --silent +-am__v_lt_1 = +-COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ +- $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +-AM_V_CC = $(am__v_CC_@AM_V@) +-am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) +-am__v_CC_0 = @echo " CC " $@; +-am__v_CC_1 = +-CCLD = $(CC) +-LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +-AM_V_CCLD = $(am__v_CCLD_@AM_V@) +-am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) +-am__v_CCLD_0 = @echo " CCLD " $@; +-am__v_CCLD_1 = +-SOURCES = $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_SOURCES) \ +- $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_SOURCES) \ +- $(valgrind_di_server_SOURCES) $(valgrind_listener_SOURCES) +-DIST_SOURCES = $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_SOURCES) \ +- $(am__getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_SOURCES_DIST) \ +- $(valgrind_di_server_SOURCES) $(valgrind_listener_SOURCES) +-am__can_run_installinfo = \ +- case $$AM_UPDATE_INFO_DIR in \ +- n|no|NO) false;; \ +- *) (install-info --version) >/dev/null 2>&1;; \ +- esac +-am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +-# Read a list of newline-separated strings from the standard input, +-# and print each of them once, without duplicates. Input order is +-# *not* preserved. +-am__uniquify_input = $(AWK) '\ +- BEGIN { nonempty = 0; } \ +- { items[$$0] = 1; nonempty = 1; } \ +- END { if (nonempty) { for (i in items) print i; }; } \ +-' +-# Make sure the list of sources is unique. This is necessary because, +-# e.g., the same source file might be shared among _SOURCES variables +-# for different programs/libraries. +-am__define_uniq_tagged_files = \ +- list='$(am__tagged_files)'; \ +- unique=`for i in $$list; do \ +- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ +- done | $(am__uniquify_input)` +-am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/Makefile.all.am \ +- $(top_srcdir)/depcomp +-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +-ACLOCAL = @ACLOCAL@ +-AMTAR = @AMTAR@ +-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +-AR = @AR@ +-AUTOCONF = @AUTOCONF@ +-AUTOHEADER = @AUTOHEADER@ +-AUTOMAKE = @AUTOMAKE@ +-AWK = @AWK@ +-BOOST_CFLAGS = @BOOST_CFLAGS@ +-BOOST_LIBS = @BOOST_LIBS@ +-CC = @CC@ +-CCAS = @CCAS@ +-CCASDEPMODE = @CCASDEPMODE@ +-CCASFLAGS = @CCASFLAGS@ +-CCDEPMODE = @CCDEPMODE@ +-CFLAGS = @CFLAGS@ +-CFLAGS_MPI = @CFLAGS_MPI@ +-CPP = @CPP@ +-CPPFLAGS = @CPPFLAGS@ +-CSCOPE = @CSCOPE@ +-CTAGS = @CTAGS@ +-CXX = @CXX@ +-CXXDEPMODE = @CXXDEPMODE@ +-CXXFLAGS = @CXXFLAGS@ +-CYGPATH_W = @CYGPATH_W@ +-DEFAULT_SUPP = @DEFAULT_SUPP@ +-DEFS = @DEFS@ +-DEPDIR = @DEPDIR@ +-DIFF = @DIFF@ +-DIS_PATH = @DIS_PATH@ +-ECHO_C = @ECHO_C@ +-ECHO_N = @ECHO_N@ +-ECHO_T = @ECHO_T@ +-EGREP = @EGREP@ +-ETAGS = @ETAGS@ +-EXEEXT = @EXEEXT@ +-FLAG_FALIGNED_NEW = @FLAG_FALIGNED_NEW@ +-FLAG_FINLINE_FUNCTIONS = @FLAG_FINLINE_FUNCTIONS@ +-FLAG_FNO_IPA_ICF = @FLAG_FNO_IPA_ICF@ +-FLAG_FNO_STACK_PROTECTOR = @FLAG_FNO_STACK_PROTECTOR@ +-FLAG_FSANITIZE = @FLAG_FSANITIZE@ +-FLAG_FSIZED_DEALLOCATION = @FLAG_FSIZED_DEALLOCATION@ +-FLAG_M32 = @FLAG_M32@ +-FLAG_M64 = @FLAG_M64@ +-FLAG_MLONG_DOUBLE_128 = @FLAG_MLONG_DOUBLE_128@ +-FLAG_MMMX = @FLAG_MMMX@ +-FLAG_MSA = @FLAG_MSA@ +-FLAG_MSSE = @FLAG_MSSE@ +-FLAG_NO_BUILD_ID = @FLAG_NO_BUILD_ID@ +-FLAG_NO_PIE = @FLAG_NO_PIE@ +-FLAG_OCTEON = @FLAG_OCTEON@ +-FLAG_OCTEON2 = @FLAG_OCTEON2@ +-FLAG_PIE = @FLAG_PIE@ +-FLAG_T_TEXT = @FLAG_T_TEXT@ +-FLAG_UNLIMITED_INLINE_UNIT_GROWTH = @FLAG_UNLIMITED_INLINE_UNIT_GROWTH@ +-FLAG_W_CAST_ALIGN = @FLAG_W_CAST_ALIGN@ +-FLAG_W_CAST_QUAL = @FLAG_W_CAST_QUAL@ +-FLAG_W_EMPTY_BODY = @FLAG_W_EMPTY_BODY@ +-FLAG_W_ENUM_CONVERSION = @FLAG_W_ENUM_CONVERSION@ +-FLAG_W_EXTRA = @FLAG_W_EXTRA@ +-FLAG_W_FORMAT = @FLAG_W_FORMAT@ +-FLAG_W_FORMAT_SECURITY = @FLAG_W_FORMAT_SECURITY@ +-FLAG_W_FORMAT_SIGNEDNESS = @FLAG_W_FORMAT_SIGNEDNESS@ +-FLAG_W_IGNORED_QUALIFIERS = @FLAG_W_IGNORED_QUALIFIERS@ +-FLAG_W_IMPLICIT_FALLTHROUGH = @FLAG_W_IMPLICIT_FALLTHROUGH@ +-FLAG_W_LOGICAL_OP = @FLAG_W_LOGICAL_OP@ +-FLAG_W_MISSING_PARAMETER_TYPE = @FLAG_W_MISSING_PARAMETER_TYPE@ +-FLAG_W_NO_ALLOC_SIZE_LARGER_THAN = @FLAG_W_NO_ALLOC_SIZE_LARGER_THAN@ +-FLAG_W_NO_BUILTIN_MEMCPY_CHK_SIZE = @FLAG_W_NO_BUILTIN_MEMCPY_CHK_SIZE@ +-FLAG_W_NO_EXPANSION_TO_DEFINED = @FLAG_W_NO_EXPANSION_TO_DEFINED@ +-FLAG_W_NO_FORMAT_OVERFLOW = @FLAG_W_NO_FORMAT_OVERFLOW@ +-FLAG_W_NO_FORTIFY_SOURCE = @FLAG_W_NO_FORTIFY_SOURCE@ +-FLAG_W_NO_FREE_NONHEAP_OBJECT = @FLAG_W_NO_FREE_NONHEAP_OBJECT@ +-FLAG_W_NO_INCOMPATIBLE_POINTER_TYPES_DISCARDS_QUALIFIERS = @FLAG_W_NO_INCOMPATIBLE_POINTER_TYPES_DISCARDS_QUALIFIERS@ +-FLAG_W_NO_INFINITE_RECURSION = @FLAG_W_NO_INFINITE_RECURSION@ +-FLAG_W_NO_MAYBE_UNINITIALIZED = @FLAG_W_NO_MAYBE_UNINITIALIZED@ +-FLAG_W_NO_MEMSET_TRANSPOSED_ARGS = @FLAG_W_NO_MEMSET_TRANSPOSED_ARGS@ +-FLAG_W_NO_MISMATCHED_NEW_DELETE = @FLAG_W_NO_MISMATCHED_NEW_DELETE@ +-FLAG_W_NO_NONNULL = @FLAG_W_NO_NONNULL@ +-FLAG_W_NO_NON_POWER_OF_TWO_ALIGNMENT = @FLAG_W_NO_NON_POWER_OF_TWO_ALIGNMENT@ +-FLAG_W_NO_OVERFLOW = @FLAG_W_NO_OVERFLOW@ +-FLAG_W_NO_POINTER_SIGN = @FLAG_W_NO_POINTER_SIGN@ +-FLAG_W_NO_SIGN_COMPARE = @FLAG_W_NO_SIGN_COMPARE@ +-FLAG_W_NO_STATIC_LOCAL_IN_INLINE = @FLAG_W_NO_STATIC_LOCAL_IN_INLINE@ +-FLAG_W_NO_STRINGOP_OVERFLOW = @FLAG_W_NO_STRINGOP_OVERFLOW@ +-FLAG_W_NO_STRINGOP_OVERREAD = @FLAG_W_NO_STRINGOP_OVERREAD@ +-FLAG_W_NO_STRINGOP_TRUNCATION = @FLAG_W_NO_STRINGOP_TRUNCATION@ +-FLAG_W_NO_SUSPICIOUS_BZERO = @FLAG_W_NO_SUSPICIOUS_BZERO@ +-FLAG_W_NO_UNINITIALIZED = @FLAG_W_NO_UNINITIALIZED@ +-FLAG_W_NO_UNUSED_BUT_SET_VARIABLE = @FLAG_W_NO_UNUSED_BUT_SET_VARIABLE@ +-FLAG_W_NO_UNUSED_FUNCTION = @FLAG_W_NO_UNUSED_FUNCTION@ +-FLAG_W_NO_USE_AFTER_FREE = @FLAG_W_NO_USE_AFTER_FREE@ +-FLAG_W_OLD_STYLE_DECLARATION = @FLAG_W_OLD_STYLE_DECLARATION@ +-FLAG_W_WRITE_STRINGS = @FLAG_W_WRITE_STRINGS@ +-GDB = @GDB@ +-GLIBC_LIBC_PATH = @GLIBC_LIBC_PATH@ +-GLIBC_LIBPTHREAD_PATH = @GLIBC_LIBPTHREAD_PATH@ +-GLIBC_VERSION = @GLIBC_VERSION@ +-GREP = @GREP@ +-HWCAP_HAS_ALTIVEC = @HWCAP_HAS_ALTIVEC@ +-HWCAP_HAS_DFP = @HWCAP_HAS_DFP@ +-HWCAP_HAS_HTM = @HWCAP_HAS_HTM@ +-HWCAP_HAS_ISA_2_05 = @HWCAP_HAS_ISA_2_05@ +-HWCAP_HAS_ISA_2_06 = @HWCAP_HAS_ISA_2_06@ +-HWCAP_HAS_ISA_2_07 = @HWCAP_HAS_ISA_2_07@ +-HWCAP_HAS_ISA_3_00 = @HWCAP_HAS_ISA_3_00@ +-HWCAP_HAS_ISA_3_1 = @HWCAP_HAS_ISA_3_1@ +-HWCAP_HAS_MMA = @HWCAP_HAS_MMA@ +-HWCAP_HAS_VSX = @HWCAP_HAS_VSX@ +-INSTALL = @INSTALL@ +-INSTALL_DATA = @INSTALL_DATA@ +-INSTALL_PROGRAM = @INSTALL_PROGRAM@ +-INSTALL_SCRIPT = @INSTALL_SCRIPT@ +-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +-LDFLAGS = @LDFLAGS@ +-LDFLAGS_MPI = @LDFLAGS_MPI@ +-LIBOBJS = @LIBOBJS@ +-LIBS = @LIBS@ +-LIB_UBSAN = @LIB_UBSAN@ +-LN_S = @LN_S@ +-LTLIBOBJS = @LTLIBOBJS@ +-LTO_AR = @LTO_AR@ +-LTO_CFLAGS = @LTO_CFLAGS@ +-LTO_RANLIB = @LTO_RANLIB@ +-MAINT = @MAINT@ +-MAKEINFO = @MAKEINFO@ +-MKDIR_P = @MKDIR_P@ +-MPI_CC = @MPI_CC@ +-OBJEXT = @OBJEXT@ +-PACKAGE = @PACKAGE@ +-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +-PACKAGE_NAME = @PACKAGE_NAME@ +-PACKAGE_STRING = @PACKAGE_STRING@ +-PACKAGE_TARNAME = @PACKAGE_TARNAME@ +-PACKAGE_URL = @PACKAGE_URL@ +-PACKAGE_VERSION = @PACKAGE_VERSION@ +-PATH_SEPARATOR = @PATH_SEPARATOR@ +-PERL = @PERL@ +-PREFERRED_STACK_BOUNDARY_2 = @PREFERRED_STACK_BOUNDARY_2@ +-RANLIB = @RANLIB@ +-SED = @SED@ +-SET_MAKE = @SET_MAKE@ +-SHA256SUM = @SHA256SUM@ +-SHELL = @SHELL@ +-SOLARIS_UNDEF_LARGESOURCE = @SOLARIS_UNDEF_LARGESOURCE@ +-STRIP = @STRIP@ +-VALT_LOAD_ADDRESS_PRI = @VALT_LOAD_ADDRESS_PRI@ +-VALT_LOAD_ADDRESS_SEC = @VALT_LOAD_ADDRESS_SEC@ +-VERSION = @VERSION@ +-VGCONF_ABI = @VGCONF_ABI@ +-VGCONF_ARCH_PRI = @VGCONF_ARCH_PRI@ +-VGCONF_ARCH_SEC = @VGCONF_ARCH_SEC@ +-VGCONF_OS = @VGCONF_OS@ +-VGCONF_PLATFORM_PRI_CAPS = @VGCONF_PLATFORM_PRI_CAPS@ +-VGCONF_PLATFORM_SEC_CAPS = @VGCONF_PLATFORM_SEC_CAPS@ +-VGCONF_PLATVARIANT = @VGCONF_PLATVARIANT@ +-VG_DATE = @VG_DATE@ +-VG_TMPDIR = @VG_TMPDIR@ +-VG_VER_MAJOR = @VG_VER_MAJOR@ +-VG_VER_MINOR = @VG_VER_MINOR@ +-XCODE_DIR = @XCODE_DIR@ +-XCRUN = @XCRUN@ +-abs_builddir = @abs_builddir@ +-abs_srcdir = @abs_srcdir@ +-abs_top_builddir = @abs_top_builddir@ +-abs_top_srcdir = @abs_top_srcdir@ +-ac_ct_CC = @ac_ct_CC@ +-ac_ct_CXX = @ac_ct_CXX@ +-am__include = @am__include@ +-am__leading_dot = @am__leading_dot@ +-am__quote = @am__quote@ +-am__tar = @am__tar@ +-am__untar = @am__untar@ +-bindir = @bindir@ +-build = @build@ +-build_alias = @build_alias@ +-build_cpu = @build_cpu@ +-build_os = @build_os@ +-build_vendor = @build_vendor@ +-builddir = @builddir@ +-datadir = @datadir@ +-datarootdir = @datarootdir@ +-docdir = @docdir@ +-dvidir = @dvidir@ +-exec_prefix = @exec_prefix@ +-host = @host@ +-host_alias = @host_alias@ +-host_cpu = @host_cpu@ +-host_os = @host_os@ +-host_vendor = @host_vendor@ +-htmldir = @htmldir@ +-includedir = @includedir@ +-infodir = @infodir@ +-install_sh = @install_sh@ +-libdir = @libdir@ +-libexecdir = @libexecdir@ +-localedir = @localedir@ +-localstatedir = @localstatedir@ +-mandir = @mandir@ +-mkdir_p = @mkdir_p@ +-oldincludedir = @oldincludedir@ +-pdfdir = @pdfdir@ +-prefix = @prefix@ +-program_transform_name = @program_transform_name@ +-psdir = @psdir@ +-runstatedir = @runstatedir@ +-sbindir = @sbindir@ +-sharedstatedir = @sharedstatedir@ +-srcdir = @srcdir@ +-sysconfdir = @sysconfdir@ +-target_alias = @target_alias@ +-top_build_prefix = @top_build_prefix@ +-top_builddir = @top_builddir@ +-top_srcdir = @top_srcdir@ +-inplacedir = $(top_builddir)/.in_place +- +-#---------------------------------------------------------------------------- +-# Flags +-#---------------------------------------------------------------------------- +- +-# Baseline flags for all compilations. Aim here is to maximise +-# performance and get whatever useful warnings we can out of gcc. +-# -fno-builtin is important for defeating LLVM's idiom recognition +-# that somehow causes VG_(memset) to get into infinite recursion. +-AM_CFLAGS_BASE = -O2 -g -Wall -Wmissing-prototypes -Wshadow \ +- -Wpointer-arith -Wstrict-prototypes -Wmissing-declarations \ +- @FLAG_W_CAST_ALIGN@ @FLAG_W_CAST_QUAL@ @FLAG_W_WRITE_STRINGS@ \ +- @FLAG_W_EMPTY_BODY@ @FLAG_W_FORMAT@ @FLAG_W_FORMAT_SIGNEDNESS@ \ +- @FLAG_W_FORMAT_SECURITY@ @FLAG_W_IGNORED_QUALIFIERS@ \ +- @FLAG_W_MISSING_PARAMETER_TYPE@ @FLAG_W_LOGICAL_OP@ \ +- @FLAG_W_ENUM_CONVERSION@ @FLAG_W_IMPLICIT_FALLTHROUGH@ \ +- @FLAG_W_OLD_STYLE_DECLARATION@ @FLAG_FINLINE_FUNCTIONS@ \ +- @FLAG_FNO_STACK_PROTECTOR@ @FLAG_FSANITIZE@ \ +- -fno-strict-aliasing -fno-builtin $(am__append_1) \ +- $(am__append_2) +-@HAS_DARN_FALSE@@HAS_XSCVHPDP_TRUE@ISA_3_0_BUILD_FLAG = -DHAS_XSCVHPDP -DHAS_ISA_3_00 +- +-# Power ISA flag for use by guest_ppc_helpers.c +-@HAS_DARN_TRUE@@HAS_XSCVHPDP_TRUE@ISA_3_0_BUILD_FLAG = -DHAS_XSCVHPDP -DHAS_DARN -DHAS_ISA_3_00 +-@HAS_XSCVHPDP_FALSE@ISA_3_0_BUILD_FLAG = +-@VGCONF_OS_IS_DARWIN_FALSE@@VGCONF_OS_IS_FREEBSD_FALSE@AM_CFLAGS_PSO_BASE = -O -g -fno-omit-frame-pointer -fno-strict-aliasing \ +-@VGCONF_OS_IS_DARWIN_FALSE@@VGCONF_OS_IS_FREEBSD_FALSE@ -fpic -fno-builtin @FLAG_FNO_IPA_ICF@ +- +-@VGCONF_OS_IS_DARWIN_FALSE@@VGCONF_OS_IS_FREEBSD_TRUE@AM_CFLAGS_PSO_BASE = -O -g -fno-omit-frame-pointer -fno-strict-aliasing \ +-@VGCONF_OS_IS_DARWIN_FALSE@@VGCONF_OS_IS_FREEBSD_TRUE@ -fpic -fPIC -fno-builtin +- +- +-# These flags are used for building the preload shared objects (PSOs). +-# The aim is to give reasonable performance but also to have good +-# stack traces, since users often see stack traces extending +-# into (and through) the preloads. Also, we must use any +-# -mpreferred-stack-boundary flag to build the preload shared +-# objects, since that risks misaligning the client's stack and +-# results in segfaults like (eg) #324050. +-@VGCONF_OS_IS_DARWIN_TRUE@AM_CFLAGS_PSO_BASE = -dynamic \ +-@VGCONF_OS_IS_DARWIN_TRUE@ -O -g -fno-omit-frame-pointer -fno-strict-aliasing \ +-@VGCONF_OS_IS_DARWIN_TRUE@ -fpic -fPIC -fno-builtin @FLAG_FNO_IPA_ICF@ +- +- +-# Flags for specific targets. +-# +-# Nb: the AM_CPPFLAGS_* values are suitable for building tools and auxprogs. +-# For building the core, coregrind/Makefile.am files add some extra things. +-AM_CPPFLAGS_@VGCONF_PLATFORM_PRI_CAPS@ = -I$(top_srcdir) \ +- -I$(top_srcdir)/include -I$(top_builddir)/include \ +- -I$(top_srcdir)/VEX/pub -I$(top_builddir)/VEX/pub \ +- -DVGA_@VGCONF_ARCH_PRI@=1 -DVGO_@VGCONF_OS@=1 \ +- -DVGP_@VGCONF_ARCH_PRI@_@VGCONF_OS@=1 \ +- -DVGPV_@VGCONF_ARCH_PRI@_@VGCONF_OS@_@VGCONF_PLATVARIANT@=1 \ +- $(am__append_3) +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@AM_CPPFLAGS_@VGCONF_PLATFORM_SEC_CAPS@ = \ +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -I$(top_srcdir) \ +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -I$(top_srcdir)/include \ +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -I$(top_builddir)/include \ +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -I$(top_srcdir)/VEX/pub \ +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -I$(top_builddir)/VEX/pub \ +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -DVGA_@VGCONF_ARCH_SEC@=1 \ +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -DVGO_@VGCONF_OS@=1 \ +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -DVGP_@VGCONF_ARCH_SEC@_@VGCONF_OS@=1 \ +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@ -DVGPV_@VGCONF_ARCH_SEC@_@VGCONF_OS@_@VGCONF_PLATVARIANT@=1 \ +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@ $(am__append_4) +-AM_FLAG_M3264_X86_LINUX = @FLAG_M32@ +-AM_CFLAGS_X86_LINUX = @FLAG_M32@ @PREFERRED_STACK_BOUNDARY_2@ \ +- $(AM_CFLAGS_BASE) -fomit-frame-pointer +- +-AM_CFLAGS_PSO_X86_LINUX = @FLAG_M32@ $(AM_CFLAGS_BASE) $(AM_CFLAGS_PSO_BASE) +-AM_CCASFLAGS_X86_LINUX = @FLAG_M32@ -g +-AM_FLAG_M3264_AMD64_LINUX = @FLAG_M64@ +-AM_CFLAGS_AMD64_LINUX = @FLAG_M64@ \ +- $(AM_CFLAGS_BASE) -fomit-frame-pointer +- +-AM_CFLAGS_PSO_AMD64_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE) $(AM_CFLAGS_PSO_BASE) +-AM_CCASFLAGS_AMD64_LINUX = @FLAG_M64@ -g +-AM_FLAG_M3264_PPC32_LINUX = @FLAG_M32@ +-AM_CFLAGS_PPC32_LINUX = @FLAG_M32@ $(AM_CFLAGS_BASE) +-AM_CFLAGS_PSO_PPC32_LINUX = @FLAG_M32@ $(AM_CFLAGS_BASE) $(AM_CFLAGS_PSO_BASE) +-AM_CCASFLAGS_PPC32_LINUX = @FLAG_M32@ -g +-AM_FLAG_M3264_PPC64BE_LINUX = @FLAG_M64@ +-AM_CFLAGS_PPC64BE_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE) +-AM_CFLAGS_PSO_PPC64BE_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE) $(AM_CFLAGS_PSO_BASE) +-AM_CCASFLAGS_PPC64BE_LINUX = @FLAG_M64@ -g +-AM_FLAG_M3264_PPC64LE_LINUX = @FLAG_M64@ +-AM_CFLAGS_PPC64LE_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE) $(ISA_3_0_BUILD_FLAG) +-AM_CFLAGS_PSO_PPC64LE_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE) $(AM_CFLAGS_PSO_BASE) +-AM_CCASFLAGS_PPC64LE_LINUX = @FLAG_M64@ -g +-AM_FLAG_M3264_X86_FREEBSD = @FLAG_M32@ +-AM_CFLAGS_X86_FREEBSD = @FLAG_M32@ @PREFERRED_STACK_BOUNDARY_2@ \ +- $(AM_CFLAGS_BASE) -fomit-frame-pointer +- +-AM_CFLAGS_PSO_X86_FREEBSD = @FLAG_M32@ $(AM_CFLAGS_BASE) $(AM_CFLAGS_PSO_BASE) +-AM_CCASFLAGS_X86_FREEBSD = @FLAG_M32@ -g +-AM_FLAG_M3264_ARM_LINUX = @FLAG_M32@ +-AM_CFLAGS_ARM_LINUX = @FLAG_M32@ \ +- $(AM_CFLAGS_BASE) -marm -mcpu=cortex-a8 +- +-AM_CFLAGS_PSO_ARM_LINUX = @FLAG_M32@ $(AM_CFLAGS_BASE) \ +- -marm -mcpu=cortex-a8 $(AM_CFLAGS_PSO_BASE) +- +-AM_CCASFLAGS_ARM_LINUX = @FLAG_M32@ \ +- -marm -mcpu=cortex-a8 -g +- +-AM_FLAG_M3264_ARM64_LINUX = @FLAG_M64@ +-AM_CFLAGS_ARM64_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE) +-AM_CFLAGS_PSO_ARM64_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE) $(AM_CFLAGS_PSO_BASE) +-AM_CCASFLAGS_ARM64_LINUX = @FLAG_M64@ -g +-AM_FLAG_M3264_AMD64_FREEBSD = @FLAG_M64@ +-AM_CFLAGS_AMD64_FREEBSD = @FLAG_M64@ \ +- $(AM_CFLAGS_BASE) -fomit-frame-pointer +- +-AM_CFLAGS_PSO_AMD64_FREEBSD = @FLAG_M64@ $(AM_CFLAGS_BASE) $(AM_CFLAGS_PSO_BASE) +-AM_CCASFLAGS_AMD64_FREEBSD = @FLAG_M64@ -g +-AM_FLAG_M3264_X86_DARWIN = -arch i386 +-AM_CFLAGS_X86_DARWIN = $(WERROR) -arch i386 $(AM_CFLAGS_BASE) \ +- -mmacosx-version-min=10.6 \ +- -fno-pic -fno-PIC +- +-AM_CFLAGS_PSO_X86_DARWIN = $(AM_CFLAGS_X86_DARWIN) $(AM_CFLAGS_PSO_BASE) +-AM_CCASFLAGS_X86_DARWIN = -arch i386 -g +-AM_FLAG_M3264_AMD64_DARWIN = -arch x86_64 +-AM_CFLAGS_AMD64_DARWIN = $(WERROR) -arch x86_64 $(AM_CFLAGS_BASE) \ +- -mmacosx-version-min=10.6 +- +-AM_CFLAGS_PSO_AMD64_DARWIN = $(AM_CFLAGS_AMD64_DARWIN) $(AM_CFLAGS_PSO_BASE) +-AM_CCASFLAGS_AMD64_DARWIN = -arch x86_64 -g +-AM_FLAG_M3264_S390X_LINUX = @FLAG_M64@ +-AM_CFLAGS_S390X_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE) -fomit-frame-pointer +-AM_CFLAGS_PSO_S390X_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE) $(AM_CFLAGS_PSO_BASE) +-AM_CCASFLAGS_S390X_LINUX = @FLAG_M64@ -g -mzarch -march=z900 +-AM_FLAG_M3264_MIPS32_LINUX = @FLAG_M32@ +-AM_CFLAGS_MIPS32_LINUX = @FLAG_M32@ $(AM_CFLAGS_BASE) +-AM_CFLAGS_PSO_MIPS32_LINUX = @FLAG_M32@ $(AM_CFLAGS_BASE) \ +- $(AM_CFLAGS_PSO_BASE) +- +-AM_CCASFLAGS_MIPS32_LINUX = @FLAG_M32@ -g +-AM_FLAG_M3264_NANOMIPS_LINUX = @FLAG_M32@ +-AM_CFLAGS_NANOMIPS_LINUX = @FLAG_M32@ $(AM_CFLAGS_BASE) -mno-jump-table-opt +-AM_CFLAGS_PSO_NANOMIPS_LINUX = @FLAG_M32@ $(AM_CFLAGS_BASE) \ +- $(AM_CFLAGS_PSO_BASE) +- +-AM_CCASFLAGS_NANOMIPS_LINUX = @FLAG_M32@ -g +-AM_FLAG_M3264_MIPS64_LINUX = @FLAG_M64@ +-AM_CFLAGS_MIPS64_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE) +-AM_CFLAGS_PSO_MIPS64_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE) \ +- $(AM_CFLAGS_PSO_BASE) +- +-AM_CCASFLAGS_MIPS64_LINUX = @FLAG_M64@ -g +-AM_FLAG_M3264_X86_SOLARIS = @FLAG_M32@ +-AM_CFLAGS_X86_SOLARIS = @FLAG_M32@ @PREFERRED_STACK_BOUNDARY_2@ \ +- $(AM_CFLAGS_BASE) -fomit-frame-pointer @SOLARIS_UNDEF_LARGESOURCE@ +- +-AM_CFLAGS_PSO_X86_SOLARIS = @FLAG_M32@ $(AM_CFLAGS_BASE) $(AM_CFLAGS_PSO_BASE) +-AM_CCASFLAGS_X86_SOLARIS = @FLAG_M32@ -g -D_ASM +-AM_FLAG_M3264_AMD64_SOLARIS = @FLAG_M64@ +-AM_CFLAGS_AMD64_SOLARIS = @FLAG_M64@ \ +- $(AM_CFLAGS_BASE) -fomit-frame-pointer +- +-AM_CFLAGS_PSO_AMD64_SOLARIS = @FLAG_M64@ $(AM_CFLAGS_BASE) $(AM_CFLAGS_PSO_BASE) +-AM_CCASFLAGS_AMD64_SOLARIS = @FLAG_M64@ -g -D_ASM +- +-# Flags for the primary target. These must be used to build the +-# regtests and performance tests. In fact, these must be used to +-# build anything which is built only once on a dual-arch build. +-# +-AM_FLAG_M3264_PRI = $(AM_FLAG_M3264_@VGCONF_PLATFORM_PRI_CAPS@) +-AM_CPPFLAGS_PRI = $(AM_CPPFLAGS_@VGCONF_PLATFORM_PRI_CAPS@) +-AM_CFLAGS_PRI = $(AM_CFLAGS_@VGCONF_PLATFORM_PRI_CAPS@) +-AM_CCASFLAGS_PRI = $(AM_CCASFLAGS_@VGCONF_PLATFORM_PRI_CAPS@) +-@VGCONF_HAVE_PLATFORM_SEC_FALSE@AM_FLAG_M3264_SEC = +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@AM_FLAG_M3264_SEC = $(AM_FLAG_M3264_@VGCONF_PLATFORM_SEC_CAPS@) +- +-# Baseline link flags for making vgpreload shared objects. +-# +-PRELOAD_LDFLAGS_COMMON_LINUX = -nodefaultlibs -shared \ +- -Wl,-z,interpose,-z,initfirst $(am__append_6) +-PRELOAD_LDFLAGS_COMMON_FREEBSD = -nodefaultlibs -shared -Wl,-z,interpose,-z,initfirst +-PRELOAD_LDFLAGS_COMMON_DARWIN = -dynamic -dynamiclib -all_load +-PRELOAD_LDFLAGS_COMMON_SOLARIS = -nodefaultlibs -shared \ +- -Wl,-z,interpose,-z,initfirst $(am__append_5) +-PRELOAD_LDFLAGS_X86_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M32@ +-PRELOAD_LDFLAGS_AMD64_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@ +-PRELOAD_LDFLAGS_PPC32_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M32@ +-PRELOAD_LDFLAGS_PPC64BE_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@ +-PRELOAD_LDFLAGS_PPC64LE_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@ +-PRELOAD_LDFLAGS_ARM_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M32@ +-PRELOAD_LDFLAGS_ARM64_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@ +-PRELOAD_LDFLAGS_X86_FREEBSD = $(PRELOAD_LDFLAGS_COMMON_FREEBSD) @FLAG_M32@ +-PRELOAD_LDFLAGS_AMD64_FREEBSD = $(PRELOAD_LDFLAGS_COMMON_FREEBSD) @FLAG_M64@ +-PRELOAD_LDFLAGS_X86_DARWIN = $(PRELOAD_LDFLAGS_COMMON_DARWIN) -arch i386 +-PRELOAD_LDFLAGS_AMD64_DARWIN = $(PRELOAD_LDFLAGS_COMMON_DARWIN) -arch x86_64 +-PRELOAD_LDFLAGS_S390X_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@ +-PRELOAD_LDFLAGS_MIPS32_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M32@ +-PRELOAD_LDFLAGS_NANOMIPS_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M32@ +-PRELOAD_LDFLAGS_MIPS64_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@ +-PRELOAD_LDFLAGS_X86_SOLARIS = $(PRELOAD_LDFLAGS_COMMON_SOLARIS) @FLAG_M32@ +-PRELOAD_LDFLAGS_AMD64_SOLARIS = $(PRELOAD_LDFLAGS_COMMON_SOLARIS) @FLAG_M64@ +-dist_noinst_SCRIPTS = \ +- change-copyright-year \ +- dump_insn_ppc.sh \ +- gen-mdg \ +- gsl19test \ +- make_or_upd_vgversion_h \ +- nightly-build-summary \ +- update-demangler \ +- posixtestsuite-1.5.1-diff-results +- +-EXTRA_DIST = \ +- docs/valgrind-listener-manpage.xml \ +- docs/valgrind-di-server-manpage.xml \ +- gsl-1.6.patch \ +- gsl-1.6.supp \ +- gsl-1.6.out.x86.exp \ +- posixtestsuite-1.5.1-diff.txt \ +- ppcfround.c \ +- ppc64shifts.c \ +- primes.c +- +-valgrind_listener_SOURCES = valgrind-listener.c +-valgrind_listener_CPPFLAGS = $(AM_CPPFLAGS_PRI) -I$(top_srcdir)/coregrind +-valgrind_listener_CFLAGS = $(AM_CFLAGS_PRI) $(am__append_7) +-valgrind_listener_CCASFLAGS = $(AM_CCASFLAGS_PRI) +-valgrind_listener_LDFLAGS = $(AM_CFLAGS_PRI) $(am__append_8) +-@VGCONF_OS_IS_SOLARIS_TRUE@valgrind_listener_LDADD = -lsocket -lnsl +-valgrind_di_server_SOURCES = valgrind-di-server.c +-valgrind_di_server_CPPFLAGS = $(AM_CPPFLAGS_PRI) -I$(top_srcdir)/coregrind +-valgrind_di_server_CFLAGS = $(AM_CFLAGS_PRI) $(am__append_9) +-valgrind_di_server_CCASFLAGS = $(AM_CCASFLAGS_PRI) +-valgrind_di_server_LDFLAGS = $(AM_CFLAGS_PRI) $(am__append_10) +-@VGCONF_OS_IS_SOLARIS_TRUE@valgrind_di_server_LDADD = -lsocket -lnsl +- +-# The link flags for this are tricky, because we want to build it for +-# both the primary and secondary platforms, and add +-# "-Wl,-read_only_relocs -Wl,suppress" to whichever of those is x86-darwin, +-# if any. Hence there's a double-nested conditional that adds to the +-# LDFLAGS in both cases. +-getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_SOURCES = getoff.c +-getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_CPPFLAGS = $(AM_CPPFLAGS_@VGCONF_PLATFORM_PRI_CAPS@) +-getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_CFLAGS = $(AM_CFLAGS_@VGCONF_PLATFORM_PRI_CAPS@) +-getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_CCASFLAGS = $(AM_CCASFLAGS_PRI) +-getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_LDFLAGS = $(AM_CFLAGS_PRI) \ +- @LIB_UBSAN@ $(am__append_12) +-@HAVE_DLINFO_RTLD_DI_TLS_MODID_TRUE@getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_LDADD = $(LDADD) -ldl +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_SOURCES = getoff.c +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_CPPFLAGS = $(AM_CPPFLAGS_@VGCONF_PLATFORM_SEC_CAPS@) +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_CFLAGS = $(AM_CFLAGS_@VGCONF_PLATFORM_SEC_CAPS@) +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_CCASFLAGS = $(AM_CCASFLAGS_SEC) +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_LDFLAGS = \ +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@ $(AM_CFLAGS_SEC) \ +-@VGCONF_HAVE_PLATFORM_SEC_TRUE@ $(am__append_13) +-@HAVE_DLINFO_RTLD_DI_TLS_MODID_TRUE@@VGCONF_HAVE_PLATFORM_SEC_TRUE@getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_LDADD = $(LDADD) -ldl +- +-# You can override AUX_CHECK_DIR to use a previous download/build. +-# Must be an absolute path. +-# e.g. make auxchecks AUX_CHECK_DIR=$HOME/valgrind-auxtests +-AUX_CHECK_DIR = $(abs_builddir)/auxchecks +- +-# GNU Scientific Library 1.6 +-GSL_DIR_NAME = gsl-1.6 +-GSL_TAR_NAME = $(GSL_DIR_NAME).tar.gz +-GSL_URL = https://ftpmirror.gnu.org/gsl/$(GSL_TAR_NAME) +-GSL_SHA256_SUM = 52e097b5228a617fef788d54eba6855c1addc62b8f68a1dfb5895cad25594f1f +-GSL_TAR = $(AUX_CHECK_DIR)/$(GSL_TAR_NAME) +-GSL_SRC_DIR = $(AUX_CHECK_DIR)/$(GSL_DIR_NAME) +-# By default we like -O3 to hopefully get some loop vectorization +-# You can also override GSL_CFLAGS if you want e.g. -march=core-avx2 +-# Different GSL_CFLAGS will result in different build dirs (under AUX_CHECK_DIR) +-GSL_CFLAGS = -g -O3 $(am__append_14) +- +-# Trick to get a literal space to use in substitutions +-sp := $(subst ,, ) +- +-# Filter out spaces from GSL_CFLAGS to get unique build dir +-GSL_BUILD_DIR = $(AUX_CHECK_DIR)/gsl-build$(subst $(sp),,$(GSL_CFLAGS)) +- +-# These are all the tests, except siman and randist which can take minutes. +-GSL_TESTS = block cblas cdf cheb combination complex const deriv dht diff \ +- eigen err fft fit histogram ieee-utils integration interpolation \ +- linalg matrix min monte multifit multimin multiroots ntuple \ +- ode-initval permutation poly qrng rng roots sort specfunc \ +- statistics sum sys vector wavelet +- +-all: all-am +- +-.SUFFIXES: +-.SUFFIXES: .c .o .obj +-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/Makefile.all.am $(am__configure_deps) +- @for dep in $?; do \ +- case '$(am__configure_deps)' in \ +- *$$dep*) \ +- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ +- && { if test -f $@; then exit 0; else break; fi; }; \ +- exit 1;; \ +- esac; \ +- done; \ +- echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign auxprogs/Makefile'; \ +- $(am__cd) $(top_srcdir) && \ +- $(AUTOMAKE) --foreign auxprogs/Makefile +-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status +- @case '$?' in \ +- *config.status*) \ +- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ +- *) \ +- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ +- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ +- esac; +-$(top_srcdir)/Makefile.all.am $(am__empty): +- +-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) +- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +- +-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) +- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) +- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +-$(am__aclocal_m4_deps): +-install-binPROGRAMS: $(bin_PROGRAMS) +- @$(NORMAL_INSTALL) +- @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ +- if test -n "$$list"; then \ +- echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ +- $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ +- fi; \ +- for p in $$list; do echo "$$p $$p"; done | \ +- sed 's/$(EXEEXT)$$//' | \ +- while read p p1; do if test -f $$p \ +- ; then echo "$$p"; echo "$$p"; else :; fi; \ +- done | \ +- sed -e 'p;s,.*/,,;n;h' \ +- -e 's|.*|.|' \ +- -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ +- sed 'N;N;N;s,\n, ,g' | \ +- $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ +- { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ +- if ($$2 == $$4) files[d] = files[d] " " $$1; \ +- else { print "f", $$3 "/" $$4, $$1; } } \ +- END { for (d in files) print "f", d, files[d] }' | \ +- while read type dir files; do \ +- if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ +- test -z "$$files" || { \ +- echo " $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ +- $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ +- } \ +- ; done +- +-uninstall-binPROGRAMS: +- @$(NORMAL_UNINSTALL) +- @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ +- files=`for p in $$list; do echo "$$p"; done | \ +- sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ +- -e 's/$$/$(EXEEXT)/' \ +- `; \ +- test -n "$$list" || exit 0; \ +- echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ +- cd "$(DESTDIR)$(bindir)" && rm -f $$files +- +-clean-binPROGRAMS: +- -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS) +- +-clean-noinstPROGRAMS: +- -test -z "$(noinst_PROGRAMS)" || rm -f $(noinst_PROGRAMS) +- +-getoff-@VGCONF_ARCH_PRI@-@VGCONF_OS@$(EXEEXT): $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_OBJECTS) $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_DEPENDENCIES) $(EXTRA_getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_DEPENDENCIES) +- @rm -f getoff-@VGCONF_ARCH_PRI@-@VGCONF_OS@$(EXEEXT) +- $(AM_V_CCLD)$(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_LINK) $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_OBJECTS) $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_LDADD) $(LIBS) +- +-getoff-@VGCONF_ARCH_SEC@-@VGCONF_OS@$(EXEEXT): $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_OBJECTS) $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_DEPENDENCIES) $(EXTRA_getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_DEPENDENCIES) +- @rm -f getoff-@VGCONF_ARCH_SEC@-@VGCONF_OS@$(EXEEXT) +- $(AM_V_CCLD)$(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_LINK) $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_OBJECTS) $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_LDADD) $(LIBS) +- +-valgrind-di-server$(EXEEXT): $(valgrind_di_server_OBJECTS) $(valgrind_di_server_DEPENDENCIES) $(EXTRA_valgrind_di_server_DEPENDENCIES) +- @rm -f valgrind-di-server$(EXEEXT) +- $(AM_V_CCLD)$(valgrind_di_server_LINK) $(valgrind_di_server_OBJECTS) $(valgrind_di_server_LDADD) $(LIBS) +- +-valgrind-listener$(EXEEXT): $(valgrind_listener_OBJECTS) $(valgrind_listener_DEPENDENCIES) $(EXTRA_valgrind_listener_DEPENDENCIES) +- @rm -f valgrind-listener$(EXEEXT) +- $(AM_V_CCLD)$(valgrind_listener_LINK) $(valgrind_listener_OBJECTS) $(valgrind_listener_LDADD) $(LIBS) +- +-mostlyclean-compile: +- -rm -f *.$(OBJEXT) +- +-distclean-compile: +- -rm -f *.tab.c +- +-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.Po@am__quote@ # am--include-marker +-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.Po@am__quote@ # am--include-marker +-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/valgrind_di_server-valgrind-di-server.Po@am__quote@ # am--include-marker +-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/valgrind_listener-valgrind-listener.Po@am__quote@ # am--include-marker +- +-$(am__depfiles_remade): +- @$(MKDIR_P) $(@D) +- @echo '# dummy' >$@-t && $(am__mv) $@-t $@ +- +-am--depfiles: $(am__depfiles_remade) +- +-.c.o: +-@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +-@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +-@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< +- +-.c.obj: +-@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +-@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +-@am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` +- +-getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.o: getoff.c +-@am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_CPPFLAGS) $(CPPFLAGS) $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_CFLAGS) $(CFLAGS) -MT getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.o -MD -MP -MF $(DEPDIR)/getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.Tpo -c -o getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.o `test -f 'getoff.c' || echo '$(srcdir)/'`getoff.c +-@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.Tpo $(DEPDIR)/getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.Po +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='getoff.c' object='getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.o' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_CPPFLAGS) $(CPPFLAGS) $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_CFLAGS) $(CFLAGS) -c -o getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.o `test -f 'getoff.c' || echo '$(srcdir)/'`getoff.c +- +-getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.obj: getoff.c +-@am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_CPPFLAGS) $(CPPFLAGS) $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_CFLAGS) $(CFLAGS) -MT getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.obj -MD -MP -MF $(DEPDIR)/getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.Tpo -c -o getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.obj `if test -f 'getoff.c'; then $(CYGPATH_W) 'getoff.c'; else $(CYGPATH_W) '$(srcdir)/getoff.c'; fi` +-@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.Tpo $(DEPDIR)/getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.Po +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='getoff.c' object='getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.obj' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_CPPFLAGS) $(CPPFLAGS) $(getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@_CFLAGS) $(CFLAGS) -c -o getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.obj `if test -f 'getoff.c'; then $(CYGPATH_W) 'getoff.c'; else $(CYGPATH_W) '$(srcdir)/getoff.c'; fi` +- +-getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.o: getoff.c +-@am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_CPPFLAGS) $(CPPFLAGS) $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_CFLAGS) $(CFLAGS) -MT getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.o -MD -MP -MF $(DEPDIR)/getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.Tpo -c -o getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.o `test -f 'getoff.c' || echo '$(srcdir)/'`getoff.c +-@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.Tpo $(DEPDIR)/getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.Po +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='getoff.c' object='getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.o' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_CPPFLAGS) $(CPPFLAGS) $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_CFLAGS) $(CFLAGS) -c -o getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.o `test -f 'getoff.c' || echo '$(srcdir)/'`getoff.c +- +-getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.obj: getoff.c +-@am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_CPPFLAGS) $(CPPFLAGS) $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_CFLAGS) $(CFLAGS) -MT getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.obj -MD -MP -MF $(DEPDIR)/getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.Tpo -c -o getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.obj `if test -f 'getoff.c'; then $(CYGPATH_W) 'getoff.c'; else $(CYGPATH_W) '$(srcdir)/getoff.c'; fi` +-@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.Tpo $(DEPDIR)/getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.Po +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='getoff.c' object='getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.obj' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_CPPFLAGS) $(CPPFLAGS) $(getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@_CFLAGS) $(CFLAGS) -c -o getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.obj `if test -f 'getoff.c'; then $(CYGPATH_W) 'getoff.c'; else $(CYGPATH_W) '$(srcdir)/getoff.c'; fi` +- +-valgrind_di_server-valgrind-di-server.o: valgrind-di-server.c +-@am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(valgrind_di_server_CPPFLAGS) $(CPPFLAGS) $(valgrind_di_server_CFLAGS) $(CFLAGS) -MT valgrind_di_server-valgrind-di-server.o -MD -MP -MF $(DEPDIR)/valgrind_di_server-valgrind-di-server.Tpo -c -o valgrind_di_server-valgrind-di-server.o `test -f 'valgrind-di-server.c' || echo '$(srcdir)/'`valgrind-di-server.c +-@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/valgrind_di_server-valgrind-di-server.Tpo $(DEPDIR)/valgrind_di_server-valgrind-di-server.Po +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='valgrind-di-server.c' object='valgrind_di_server-valgrind-di-server.o' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(valgrind_di_server_CPPFLAGS) $(CPPFLAGS) $(valgrind_di_server_CFLAGS) $(CFLAGS) -c -o valgrind_di_server-valgrind-di-server.o `test -f 'valgrind-di-server.c' || echo '$(srcdir)/'`valgrind-di-server.c +- +-valgrind_di_server-valgrind-di-server.obj: valgrind-di-server.c +-@am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(valgrind_di_server_CPPFLAGS) $(CPPFLAGS) $(valgrind_di_server_CFLAGS) $(CFLAGS) -MT valgrind_di_server-valgrind-di-server.obj -MD -MP -MF $(DEPDIR)/valgrind_di_server-valgrind-di-server.Tpo -c -o valgrind_di_server-valgrind-di-server.obj `if test -f 'valgrind-di-server.c'; then $(CYGPATH_W) 'valgrind-di-server.c'; else $(CYGPATH_W) '$(srcdir)/valgrind-di-server.c'; fi` +-@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/valgrind_di_server-valgrind-di-server.Tpo $(DEPDIR)/valgrind_di_server-valgrind-di-server.Po +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='valgrind-di-server.c' object='valgrind_di_server-valgrind-di-server.obj' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(valgrind_di_server_CPPFLAGS) $(CPPFLAGS) $(valgrind_di_server_CFLAGS) $(CFLAGS) -c -o valgrind_di_server-valgrind-di-server.obj `if test -f 'valgrind-di-server.c'; then $(CYGPATH_W) 'valgrind-di-server.c'; else $(CYGPATH_W) '$(srcdir)/valgrind-di-server.c'; fi` +- +-valgrind_listener-valgrind-listener.o: valgrind-listener.c +-@am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(valgrind_listener_CPPFLAGS) $(CPPFLAGS) $(valgrind_listener_CFLAGS) $(CFLAGS) -MT valgrind_listener-valgrind-listener.o -MD -MP -MF $(DEPDIR)/valgrind_listener-valgrind-listener.Tpo -c -o valgrind_listener-valgrind-listener.o `test -f 'valgrind-listener.c' || echo '$(srcdir)/'`valgrind-listener.c +-@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/valgrind_listener-valgrind-listener.Tpo $(DEPDIR)/valgrind_listener-valgrind-listener.Po +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='valgrind-listener.c' object='valgrind_listener-valgrind-listener.o' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(valgrind_listener_CPPFLAGS) $(CPPFLAGS) $(valgrind_listener_CFLAGS) $(CFLAGS) -c -o valgrind_listener-valgrind-listener.o `test -f 'valgrind-listener.c' || echo '$(srcdir)/'`valgrind-listener.c +- +-valgrind_listener-valgrind-listener.obj: valgrind-listener.c +-@am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(valgrind_listener_CPPFLAGS) $(CPPFLAGS) $(valgrind_listener_CFLAGS) $(CFLAGS) -MT valgrind_listener-valgrind-listener.obj -MD -MP -MF $(DEPDIR)/valgrind_listener-valgrind-listener.Tpo -c -o valgrind_listener-valgrind-listener.obj `if test -f 'valgrind-listener.c'; then $(CYGPATH_W) 'valgrind-listener.c'; else $(CYGPATH_W) '$(srcdir)/valgrind-listener.c'; fi` +-@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/valgrind_listener-valgrind-listener.Tpo $(DEPDIR)/valgrind_listener-valgrind-listener.Po +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='valgrind-listener.c' object='valgrind_listener-valgrind-listener.obj' libtool=no @AMDEPBACKSLASH@ +-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +-@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(valgrind_listener_CPPFLAGS) $(CPPFLAGS) $(valgrind_listener_CFLAGS) $(CFLAGS) -c -o valgrind_listener-valgrind-listener.obj `if test -f 'valgrind-listener.c'; then $(CYGPATH_W) 'valgrind-listener.c'; else $(CYGPATH_W) '$(srcdir)/valgrind-listener.c'; fi` +- +-ID: $(am__tagged_files) +- $(am__define_uniq_tagged_files); mkid -fID $$unique +-tags: tags-am +-TAGS: tags +- +-tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) +- set x; \ +- here=`pwd`; \ +- $(am__define_uniq_tagged_files); \ +- shift; \ +- if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ +- test -n "$$unique" || unique=$$empty_fix; \ +- if test $$# -gt 0; then \ +- $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ +- "$$@" $$unique; \ +- else \ +- $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ +- $$unique; \ +- fi; \ +- fi +-ctags: ctags-am +- +-CTAGS: ctags +-ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) +- $(am__define_uniq_tagged_files); \ +- test -z "$(CTAGS_ARGS)$$unique" \ +- || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ +- $$unique +- +-GTAGS: +- here=`$(am__cd) $(top_builddir) && pwd` \ +- && $(am__cd) $(top_srcdir) \ +- && gtags -i $(GTAGS_ARGS) "$$here" +-cscopelist: cscopelist-am +- +-cscopelist-am: $(am__tagged_files) +- list='$(am__tagged_files)'; \ +- case "$(srcdir)" in \ +- [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ +- *) sdir=$(subdir)/$(srcdir) ;; \ +- esac; \ +- for i in $$list; do \ +- if test -f "$$i"; then \ +- echo "$(subdir)/$$i"; \ +- else \ +- echo "$$sdir/$$i"; \ +- fi; \ +- done >> $(top_builddir)/cscope.files +- +-distclean-tags: +- -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +-distdir: $(BUILT_SOURCES) +- $(MAKE) $(AM_MAKEFLAGS) distdir-am +- +-distdir-am: $(DISTFILES) +- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ +- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ +- list='$(DISTFILES)'; \ +- dist_files=`for file in $$list; do echo $$file; done | \ +- sed -e "s|^$$srcdirstrip/||;t" \ +- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ +- case $$dist_files in \ +- */*) $(MKDIR_P) `echo "$$dist_files" | \ +- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ +- sort -u` ;; \ +- esac; \ +- for file in $$dist_files; do \ +- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ +- if test -d $$d/$$file; then \ +- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ +- if test -d "$(distdir)/$$file"; then \ +- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ +- fi; \ +- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ +- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ +- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ +- fi; \ +- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ +- else \ +- test -f "$(distdir)/$$file" \ +- || cp -p $$d/$$file "$(distdir)/$$file" \ +- || exit 1; \ +- fi; \ +- done +-check-am: all-am +-check: check-am +-all-am: Makefile $(PROGRAMS) $(SCRIPTS) all-local +-installdirs: +- for dir in "$(DESTDIR)$(bindir)"; do \ +- test -z "$$dir" || $(MKDIR_P) "$$dir"; \ +- done +-install: install-am +-install-exec: install-exec-am +-install-data: install-data-am +-uninstall: uninstall-am +- +-install-am: all-am +- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am +- +-installcheck: installcheck-am +-install-strip: +- if test -z '$(STRIP)'; then \ +- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ +- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ +- install; \ +- else \ +- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ +- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ +- "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ +- fi +-mostlyclean-generic: +- +-clean-generic: +- +-distclean-generic: +- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) +- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) +- +-maintainer-clean-generic: +- @echo "This command is intended for maintainers to use" +- @echo "it deletes files that may require special tools to rebuild." +-clean: clean-am +- +-clean-am: clean-binPROGRAMS clean-generic clean-local \ +- clean-noinstPROGRAMS mostlyclean-am +- +-distclean: distclean-am +- -rm -f ./$(DEPDIR)/getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.Po +- -rm -f ./$(DEPDIR)/getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.Po +- -rm -f ./$(DEPDIR)/valgrind_di_server-valgrind-di-server.Po +- -rm -f ./$(DEPDIR)/valgrind_listener-valgrind-listener.Po +- -rm -f Makefile +-distclean-am: clean-am distclean-compile distclean-generic \ +- distclean-tags +- +-dvi: dvi-am +- +-dvi-am: +- +-html: html-am +- +-html-am: +- +-info: info-am +- +-info-am: +- +-install-data-am: +- +-install-dvi: install-dvi-am +- +-install-dvi-am: +- +-install-exec-am: install-binPROGRAMS install-exec-local +- +-install-html: install-html-am +- +-install-html-am: +- +-install-info: install-info-am +- +-install-info-am: +- +-install-man: +- +-install-pdf: install-pdf-am +- +-install-pdf-am: +- +-install-ps: install-ps-am +- +-install-ps-am: +- +-installcheck-am: +- +-maintainer-clean: maintainer-clean-am +- -rm -f ./$(DEPDIR)/getoff_@VGCONF_ARCH_PRI@_@VGCONF_OS@-getoff.Po +- -rm -f ./$(DEPDIR)/getoff_@VGCONF_ARCH_SEC@_@VGCONF_OS@-getoff.Po +- -rm -f ./$(DEPDIR)/valgrind_di_server-valgrind-di-server.Po +- -rm -f ./$(DEPDIR)/valgrind_listener-valgrind-listener.Po +- -rm -f Makefile +-maintainer-clean-am: distclean-am maintainer-clean-generic +- +-mostlyclean: mostlyclean-am +- +-mostlyclean-am: mostlyclean-compile mostlyclean-generic +- +-pdf: pdf-am +- +-pdf-am: +- +-ps: ps-am +- +-ps-am: +- +-uninstall-am: uninstall-binPROGRAMS uninstall-local +- +-.MAKE: install-am install-strip +- +-.PHONY: CTAGS GTAGS TAGS all all-am all-local am--depfiles check \ +- check-am clean clean-binPROGRAMS clean-generic clean-local \ +- clean-noinstPROGRAMS cscopelist-am ctags ctags-am distclean \ +- distclean-compile distclean-generic distclean-tags distdir dvi \ +- dvi-am html html-am info info-am install install-am \ +- install-binPROGRAMS install-data install-data-am install-dvi \ +- install-dvi-am install-exec install-exec-am install-exec-local \ +- install-html install-html-am install-info install-info-am \ +- install-man install-pdf install-pdf-am install-ps \ +- install-ps-am install-strip installcheck installcheck-am \ +- installdirs maintainer-clean maintainer-clean-generic \ +- mostlyclean mostlyclean-compile mostlyclean-generic pdf pdf-am \ +- ps ps-am tags tags-am uninstall uninstall-am \ +- uninstall-binPROGRAMS uninstall-local +- +-.PRECIOUS: Makefile +- +- +-# This used to be required when Vex had a handwritten Makefile. It +-# shouldn't be needed any more, though. +- +-#---------------------------------------------------------------------------- +-# noinst_PROGRAMS and noinst_DSYMS targets +-#---------------------------------------------------------------------------- +- +-# On Darwin, for a program 'p', the DWARF debug info is stored in the +-# directory 'p.dSYM'. This must be generated after the executable is +-# created, with 'dsymutil p'. We could redefine LINK with a script that +-# executes 'dsymutil' after linking, but that's a pain. Instead we use this +-# hook so that every time "make check" is run, we subsequently invoke +-# 'dsymutil' on all the executables that lack a .dSYM directory, or that are +-# newer than their corresponding .dSYM directory. +-build-noinst_DSYMS: $(noinst_DSYMS) +- for f in $(noinst_DSYMS); do \ +- if [ ! -e $$f.dSYM -o $$f -nt $$f.dSYM ] ; then \ +- echo "dsymutil $$f"; \ +- dsymutil $$f; \ +- fi; \ +- done +- +-# This is used by coregrind/Makefile.am and Makefile.tool.am for doing +-# "in-place" installs. It copies $(noinst_PROGRAMS) into $inplacedir. +-# It needs to be depended on by an 'all-local' rule. +-inplace-noinst_PROGRAMS: $(noinst_PROGRAMS) +- mkdir -p $(inplacedir); \ +- for f in $(noinst_PROGRAMS) ; do \ +- rm -f $(inplacedir)/$$f; \ +- ln -f -s ../$(subdir)/$$f $(inplacedir); \ +- done +- +-# Similar to inplace-noinst_PROGRAMS +-inplace-noinst_DSYMS: build-noinst_DSYMS +- mkdir -p $(inplacedir); \ +- for f in $(noinst_DSYMS); do \ +- rm -f $(inplacedir)/$$f.dSYM; \ +- ln -f -s ../$(subdir)/$$f.dSYM $(inplacedir); \ +- done +- +-# This is used by coregrind/Makefile.am and by /Makefile.am for doing +-# "make install". It copies $(noinst_PROGRAMS) into $prefix/libexec/valgrind/. +-# It needs to be depended on by an 'install-exec-local' rule. +-install-noinst_PROGRAMS: $(noinst_PROGRAMS) +- $(mkinstalldirs) $(DESTDIR)$(pkglibexecdir); \ +- for f in $(noinst_PROGRAMS); do \ +- $(INSTALL_PROGRAM) $$f $(DESTDIR)$(pkglibexecdir); \ +- done +- +-# This is used by coregrind/Makefile.am and by /Makefile.am for doing +-# "make uninstall". It removes $(noinst_PROGRAMS) from $prefix/libexec/valgrind/. +-# It needs to be depended on by an 'uninstall-local' rule. +-uninstall-noinst_PROGRAMS: +- for f in $(noinst_PROGRAMS); do \ +- rm -f $(DESTDIR)$(pkglibexecdir)/$$f; \ +- done +- +-# Similar to install-noinst_PROGRAMS. +-# Nb: we don't use $(INSTALL_PROGRAM) here because it doesn't work with +-# directories. XXX: not sure whether the resulting permissions will be +-# correct when using 'cp -R'... +-install-noinst_DSYMS: build-noinst_DSYMS +- $(mkinstalldirs) $(DESTDIR)$(pkglibexecdir); \ +- for f in $(noinst_DSYMS); do \ +- cp -R $$f.dSYM $(DESTDIR)$(pkglibexecdir); \ +- done +- +-# Similar to uninstall-noinst_PROGRAMS. +-uninstall-noinst_DSYMS: +- for f in $(noinst_DSYMS); do \ +- rm -f $(DESTDIR)$(pkglibexecdir)/$$f.dSYM; \ +- done +- +-# This needs to be depended on by a 'clean-local' rule. +-clean-noinst_DSYMS: +- for f in $(noinst_DSYMS); do \ +- rm -rf $$f.dSYM; \ +- done +- +-#---------------------------------------------------------------------------- +-# Auxiliary testsuits +-#---------------------------------------------------------------------------- +- +-auxchecks: gsl-check +-auxclean: gsl-clean +- +-# Get the tar file if we don't have it yet. +-$(GSL_TAR): +- mkdir -p $(AUX_CHECK_DIR) +- wget -q -O $(GSL_TAR) $(GSL_URL) +- +-# We need to autoreconf to make sure to get config.guess, config.sub +-# and libtool for newer architectures. +-$(GSL_SRC_DIR)/gsl-patched: $(GSL_TAR) +- echo "$(GSL_SHA256_SUM) $(GSL_TAR)" | @SHA256SUM@ --check - +- (cd $(AUX_CHECK_DIR) && \ +- tar zxf $(GSL_TAR_NAME) && \ +- cd $(GSL_DIR_NAME) && \ +- patch -p1 < $(abs_top_srcdir)/auxprogs/gsl-1.6.patch && \ +- autoreconf -f -i -Wnone) +- touch $@ +- +-# We need make check -k because +-# some tests might fail even native (only on i386 though). +-# make check doesn't work reliably with -j. +-$(GSL_BUILD_DIR)/gsl-build: $(GSL_SRC_DIR)/gsl-patched +- mkdir -p $(GSL_BUILD_DIR) +- (cd $(GSL_BUILD_DIR) && \ +- $(GSL_SRC_DIR)/configure CC="${CC}" CXX="${CXX}" CFLAGS="$(GSL_CFLAGS)" && \ +- ${MAKE} -j $(nproc) && \ +- ${MAKE} check -k || true) +- touch $@ +- +-# We hope all tests PASS (so don't produce output except for the test names). +-# But on x86 we get one FAIL, so that is "fine" too. +-# We currently don't check stderr, but we probably should. +-gsl-check: $(GSL_BUILD_DIR)/gsl-build +- (cd $(GSL_BUILD_DIR); \ +- for gsl_test in $(GSL_TESTS); do \ +- echo $$gsl_test; \ +- ./libtool --mode=execute $(abs_top_builddir)/vg-in-place -q \ +- --suppressions=$(abs_top_srcdir)/auxprogs/gsl-1.6.supp \ +- $$gsl_test/test; \ +- done | grep --line-buffered -v ^PASS: | tee valgrind-gsl.out) +- for gsl_test in $(GSL_TESTS); do echo $$gsl_test; done \ +- | cmp - $(GSL_BUILD_DIR)/valgrind-gsl.out || \ +- diff -u $(abs_top_srcdir)/auxprogs/gsl-1.6.out.x86.exp \ +- $(GSL_BUILD_DIR)/valgrind-gsl.out +- +-# We keep the tarball but remove the unpacked sources and build +-gsl-clean: +- rm -rf $(GSL_SRC_NAME) $(GSL_BUILD_DIR) +- +-#---------------------------------------------------------------------------- +-# General stuff +-#---------------------------------------------------------------------------- +- +-all-local: inplace-noinst_PROGRAMS inplace-noinst_DSYMS +- +-clean-local: clean-noinst_DSYMS auxclean +- +-install-exec-local: install-noinst_PROGRAMS install-noinst_DSYMS +- +-uninstall-local: uninstall-noinst_PROGRAMS uninstall-noinst_DSYMS +- +-# Tell versions [3.59,3.63) of GNU make to not export all variables. +-# Otherwise a system limit (for SysV at least) may be exceeded. +-.NOEXPORT: +diff '--color=auto' -ru --new-file valgrind-3.21.0/auxprogs/s390-check-opcodes.pl valgrind-riscv64/auxprogs/s390-check-opcodes.pl +--- valgrind-3.21.0/auxprogs/s390-check-opcodes.pl 1970-01-01 08:00:00.000000000 +0800 ++++ valgrind-riscv64/auxprogs/s390-check-opcodes.pl 2022-09-21 06:23:46.000000000 +0800 +@@ -0,0 +1,386 @@ ++#!/usr/bin/env perl ++ ++use strict; ++use warnings; ++ ++#------------------------------------------------------------------ ++# This script assists in updating s390-opcodes.csv ++# It utilizes /opcodes/s390-opc.txt and ++# /VEX/priv/guest_s390_toIR.c and will ++# - identify new opcodes that are present in s390-opc.txt ++# (s390-opc.txt is the golden list) ++# - identify opcodes that are implemented in guest_s390_toIR.c ++# but have an out-of-date status in the CSV file. ++#------------------------------------------------------------------ ++my $num_arg = $#ARGV + 1; ++ ++if ($num_arg != 3) { ++ die "usage: s390-check-opcodes s390-opcodes.csv s390-opc.txt guest_s390_toIR.c\n"; ++} ++ ++my $csv_file = $ARGV[0]; ++my $opc_file = $ARGV[1]; ++my $toir_file = $ARGV[2]; ++ ++my %opc_desc = (); ++my %csv_desc = (); ++my %csv_implemented = (); ++my %toir_implemented = (); ++my %toir_decoded = (); ++my %known_arch = map {($_ => 1)} ++ qw(g5 z900 z990 z9-109 z9-ec z10 z196 zEC12 z13 arch12 arch13); ++ ++# Patterns for identifying certain extended mnemonics that shall be ++# skipped in "s390-opc.txt" and "s390-opcodes.csv". ++ ++my @extended_mnemonics = ( ++ "bi", # extended mnemonic for bic ++ 'brul?', ++ 'jasl?', ++ 'jctg?', ++ 'jg?nop', ++ 'jxleg?', ++ 'jxhg?', ++ 'l[de]rv', ++ 'risbgn?z', ++ 'st[de]rv', ++ "va[bhfgq]", ++ "vacc[bhfgq]", ++ "vacccq", ++ "vacq", ++ "vavgl*[bhfg]", ++ "vcdl*gb", ++ 'vcfp[sl]', ++ '[vw]cel?fb', ++ 'vc[sl]fp', ++ '[vw]cl?feb', ++ "vceq[bhfg]s*", ++ "vchl*[bhfg]s*", ++ "vcl*gdb", ++ "vc[lt]z[bhfg]", ++ "vecl*[bhfg]", ++ "verim[bhfg]", ++ "verllv*[bhfg]", ++ "veslv*[bhfg]", ++ "vesrav*[bhfg]", ++ "vesrlv*[bhfg]", ++ "vfaez*[bhfg]s*", ++ "vfeez*[bhfg]s*", ++ "vfenez*[bhfg]s*", ++ "vfce[sd]bs*", ++ "vfchdbs*", ++ "vfche[sd]bs*", ++ "vfchsbs*", ++ "vfd[sd]b", ++ "vfa[sd]b", ++ "vfi[sd]b", ++ "vfke[sd]bs*", ++ "vfkhe*[sd]bs*", ++ "vflc[sd]b", ++ "vfll[sd]", ++ "[vw]flr[dx]", ++ "vfl[np][sd]b", ++ "vfm[as]*[sd]b", ++ "vfmax[sd]b", ++ "vfmin[sd]b", ++ "vfnm[as][sd]b", ++ "vfpso[sd]b", ++ "vfsq*[sd]b", ++ "vftci[sd]b", ++ "vgfma*[bhfg]", ++ "vgm[bhfg]", ++ "vistr[bhfg]s*", ++ 'vlbr[hfgq]', ++ 'vlbrrep[hfg]', ++ "vlc[bhfg]", ++ "[vw]ldeb", ++ "[vw]ledb", ++ 'vler[hfg]', ++ "vlgv[bhfg]", ++ 'vllebrz[hfge]', ++ "vllez[bhfg]", ++ "vllezlf", ++ "vlp[bhfg]", ++ "vlrep[bhfg]", ++ "vlvg[bhfg]", ++ "vmal?[eoh][bhfg]", ++ "vmal(b|hw|f)", ++ "vml(b|hw|f)", ++ "vml?(o|e)[bhf]", ++ "vml?h[bhf]", ++ "vm[nx]l*[bhfg]", ++ "vmr[lh][bhfg]", ++ "vmslg", ++ "vnot", ++ "(vone|vzero)", ++ "vpkl*[bhfg]", ++ "vpkl*s*[bhfg]s*", ++ "vpopct[bhfg]", ++ "vrepi*[bhgf]", ++ "vs[bhfgq]", ++ "vsbcbiq", ++ "vsbiq", ++ "vscbi[bhfgq]", ++ "vseg[bfh]", ++ 'vstbr[hfgq]', ++ 'vster[hfg]', ++ "vstrcz*[bhf]s*", ++ 'vstrsz?[bhf]', ++ "vsum(b|gh|gf|h|qf|qg)", ++ "vuplh[bhf]", ++ "vuph[bhf]", ++ "vupl(b|hw|f)", ++ "vupll[bhf]", ++ "wcdl*gb", ++ "wcl*gdb", ++ "wfa[sdx]b", ++ "wfch*e*[sdx]bs*", ++ "wf[cdi][sdx]b", ++ "wfkh*e*[sdx]bs*", ++ "wfk[sdx]b", ++ "wfl[clnp][sdx]b*", ++ "wfmax[sdx]b", ++ "wfmin[sdx]b", ++ "wfm[as]*[sdx]b", ++ "wfnm[as][sdx]b", ++ "wfpso[sdx]b", ++ "wftci[sdx]b", ++ "wfsq*[sdx]b", ++ "vl(ed|de)", ++ "prno" # alternate mnemonic for ppno ++ ); ++ ++# Compile excluded mnemonics into one regular expression to optimize ++# speed. Also it simplifies the code. ++ ++my $extended_mnemonics_pattern = '^(' . ++ join('|', map "$_", @extended_mnemonics) . ')$'; ++ ++#---------------------------------------------------- ++# Read s390-opc.txt (binutils) ++#---------------------------------------------------- ++open(OPC, "$opc_file") || die "cannot open $opc_file\n"; ++while (my $line = ) { ++ chomp $line; ++ next if ($line =~ "^[ ]*#"); # comments ++ next if ($line =~ /^\s*$/); # blank line ++ my ($encoding,$mnemonic,$format) = $line =~ /^(\S+) (\S+) (\S+)/gc; ++ ++ # Ignore opcodes that have wildcards in them ('$', '*') ++ # Those provide alternate mnemonics for specific instances of this opcode ++ next if ($mnemonic =~ /\$/); ++ next if ($mnemonic =~ /\*/); ++ ++ # Ignore certain opcodes which are special cases of other opcodes ++ next if ($mnemonic eq "br"); # special case of bcr ++ next if ($mnemonic eq "nopr"); # special case of bcr ++ next if ($mnemonic eq "b"); # special case of bc ++ next if ($mnemonic eq "nop"); # special case of bc ++ next if ($mnemonic eq "j"); # special case of brc ++ next if ($mnemonic eq "jg"); # special case of brcl ++ next if ($mnemonic eq "tmh"); # alternate mnemonic for tmlh ++ next if ($mnemonic eq "tml"); # alternate mnemonic for tmll ++ next if ($mnemonic eq "lrdr"); # alternate mnemonic for ldxr ++ next if ($mnemonic eq "lrer"); # alternate mnemonic for ledr ++ next if ($mnemonic eq "me"); # alternate mnemonic for mde ++ next if ($mnemonic eq "mer"); # alternate mnemonic for mder ++ next if ($mnemonic eq "cuutf"); # alternate mnemonic for cu21 ++ next if ($mnemonic eq "cutfu"); # alternate mnemonic for cu12 ++ ++ next if ($mnemonic eq "cfdbra"); # indistinguishable from cfdbr ++ next if ($mnemonic eq "cfebra"); # indistinguishable from cfebr ++ next if ($mnemonic eq "cfxbra"); # indistinguishable from cfxbr ++ next if ($mnemonic eq "cgdbra"); # indistinguishable from cgdbr ++ next if ($mnemonic eq "cgebra"); # indistinguishable from cgebr ++ next if ($mnemonic eq "cgxbra"); # indistinguishable from cgxbr ++ next if ($mnemonic eq "cdfbra"); # indistinguishable from cdfbr ++ next if ($mnemonic eq "cefbra"); # indistinguishable from cefbr ++ next if ($mnemonic eq "cxfbra"); # indistinguishable from cxfbr ++ next if ($mnemonic eq "cdgbra"); # indistinguishable from cdgbr ++ next if ($mnemonic eq "cegbra"); # indistinguishable from cegbr ++ next if ($mnemonic eq "cxgbra"); # indistinguishable from cxgbr ++ next if ($mnemonic eq "ldxbra"); # indistinguishable from ldxbr ++ next if ($mnemonic eq "lexbra"); # indistinguishable from lexbr ++ next if ($mnemonic eq "ledbra"); # indistinguishable from ledbr ++ next if ($mnemonic eq "cdgtr"); # indistinguishable from cdgtra ++ next if ($mnemonic eq "cxgtra"); # indistinguishable from cxgtr ++ next if ($mnemonic eq "cgdtra"); # indistinguishable from cgdtr ++ next if ($mnemonic eq "cgxtra"); # indistinguishable from cgxtr ++ next if ($mnemonic eq "fidbr"); # indistinguishable from fidbra ++ next if ($mnemonic eq "fiebr"); # indistinguishable from fiebra ++ next if ($mnemonic eq "fixbr"); # indistinguishable from fixbra ++ next if ($mnemonic eq "adtr"); # indistinguishable from adtra ++ next if ($mnemonic eq "axtr"); # indistinguishable from axtra ++ next if ($mnemonic eq "sdtr"); # indistinguishable from sdtra ++ next if ($mnemonic eq "sxtr"); # indistinguishable from sxtra ++ next if ($mnemonic eq "ddtr"); # indistinguishable from ddtra ++ next if ($mnemonic eq "dxtr"); # indistinguishable from dxtra ++ next if ($mnemonic eq "mdtr"); # indistinguishable from mdtra ++ next if ($mnemonic eq "mxtr"); # indistinguishable from mxtra ++ next if ($mnemonic =~ /$extended_mnemonics_pattern/); ++ ++ my ($description) = $line =~ /\G\s+"\s*(.*?)\s*"/gc; ++ my ($arch) = $line =~ /\G\s+(\S+)/gc; ++ unless ($known_arch{$arch}) { ++ unless (exists $known_arch{$arch}) { ++ print "warning: unsupported arch \"$arch\" in s390-opc.txt\n"; ++ $known_arch{$arch} = 0; ++ } ++ next; ++ } ++ ++ $description =~ s/\s\s+/ /g; # replace multiple blanks with a single one ++ ++ # Certain opcodes are listed more than once. Let the first description ++ # win. ++ if (exists $opc_desc{$mnemonic}) { ++ # already there ++# if ($opc_desc{$mnemonic} ne $description) { ++# print "multiple description for opcode $mnemonic\n"; ++# print " old: |" . $opc_desc{$mnemonic} . "|\n"; ++# print " new: |" . $description . "|\n"; ++# } ++ } else { ++ $opc_desc{$mnemonic} = $description; ++ } ++ ++ if ($description =~ /,/) { ++ print "warning: description of $mnemonic contains comma\n"; ++ } ++} ++close(OPC); ++ ++#---------------------------------------------------- ++# Read CSV file (valgrind) ++#---------------------------------------------------- ++open(CSV, "$csv_file") || die "cannot open $csv_file\n"; ++while (my $line = ) { ++ chomp $line; ++ next if ($line =~ "^[ ]*#"); # comments ++ my ($mnemonic,$description,$status) = split /,/,$line; ++ ++ $mnemonic =~ s/"//g; ++ $description =~ s/"//g; ++ ++ next if ($mnemonic eq "cfdbra"); # indistinguishable from cfdbr ++ next if ($mnemonic eq "cfebra"); # indistinguishable from cfebr ++ next if ($mnemonic eq "cfxbra"); # indistinguishable from cfxbr ++ next if ($mnemonic eq "cgdbra"); # indistinguishable from cgdbr ++ next if ($mnemonic eq "cgebra"); # indistinguishable from cgebr ++ next if ($mnemonic eq "cgxbra"); # indistinguishable from cgxbr ++ next if ($mnemonic eq "cdfbra"); # indistinguishable from cdfbr ++ next if ($mnemonic eq "cefbra"); # indistinguishable from cefbr ++ next if ($mnemonic eq "cxfbra"); # indistinguishable from cxfbr ++ next if ($mnemonic eq "cegbra"); # indistinguishable from cegbr ++ next if ($mnemonic eq "cdgbra"); # indistinguishable from cdgbr ++ next if ($mnemonic eq "cegbra"); # indistinguishable from cegbr ++ next if ($mnemonic eq "cxgbra"); # indistinguishable from cxgbr ++ next if ($mnemonic eq "ldxbra"); # indistinguishable from ldxbr ++ next if ($mnemonic eq "lexbra"); # indistinguishable from lexbr ++ next if ($mnemonic eq "ledbra"); # indistinguishable from ledbr ++ next if ($mnemonic eq "cdgtr"); # indistinguishable from cdgtra ++ next if ($mnemonic eq "cxgtra"); # indistinguishable from cxgtr ++ next if ($mnemonic eq "cgdtra"); # indistinguishable from cgdtr ++ next if ($mnemonic eq "cgxtra"); # indistinguishable from cgxtr ++ next if ($mnemonic eq "fidbr"); # indistinguishable from fidbra ++ next if ($mnemonic eq "fiebr"); # indistinguishable from fiebra ++ next if ($mnemonic eq "fixbr"); # indistinguishable from fixbra ++ next if ($mnemonic eq "adtr"); # indistinguishable from adtra ++ next if ($mnemonic eq "sdtr"); # indistinguishable from sdtra ++ next if ($mnemonic eq "ddtr"); # indistinguishable from ddtra ++ next if ($mnemonic eq "mdtr"); # indistinguishable from mdtra ++ next if ($mnemonic =~ /$extended_mnemonics_pattern/); ++ ++ # Complain about duplicate entries. We don't want them. ++ if ($csv_desc{$mnemonic}) { ++ print "$mnemonic: duplicate entry\n"; ++ } else { ++ $csv_desc{$mnemonic} = $description; ++ } ++ # Remember whether it is implemented or not ++ next if ($line =~ /not\s+implemented/); ++ next if ($line =~ /N\/A/); ++ next if ($line =~ /won't do/); ++ if ($line =~ /implemented/) { ++ $csv_implemented{$mnemonic} = 1; ++ } else { ++ print "*** unknown implementation status of $mnemonic\n"; ++ } ++} ++close(CSV); ++ ++#---------------------------------------------------- ++# Read s390_guest_toIR.c file. Compile list of implemented opcodes ++#---------------------------------------------------- ++open(TOIR, "$toir_file") || die "cannot open $toir_file\n"; ++while (my $line = ) { ++ chomp $line; ++ if ($line =~ /goto\s+unimplemented/) { ++ # Assume this is in the decoder ++ if ($line =~ /\/\*\s([A-Z][A-Z0-9]*)\s\*\//) { ++ my $mnemonic = lc $1; ++ $toir_decoded{$mnemonic} = 1; ++ } ++ } elsif ($line =~ /^s390_irgen_([A-Z][A-Z0-9]*)\b/) { ++ my $mnemonic = lc $1; ++ $toir_implemented{$mnemonic} = 1; ++ } ++} ++close(TOIR); ++ ++#---------------------------------------------------- ++# 1) Make sure there are no missing/extra opcodes ++#---------------------------------------------------- ++foreach my $opc (keys %opc_desc) { ++ if (! $csv_desc{$opc}) { ++ print "*** opcode $opc not listed in $csv_file\n"; ++ } ++} ++foreach my $opc (keys %csv_desc) { ++ if (! $opc_desc{$opc}) { ++ print "*** opcode $opc not listed in $opc_file\n"; ++ } ++} ++ ++#---------------------------------------------------- ++# 2) Make sure opcode descriptions are the same ++#---------------------------------------------------- ++foreach my $opc (keys %opc_desc) { ++ if (defined $csv_desc{$opc}) { ++ if ($opc_desc{$opc} ne $csv_desc{$opc}) { ++ print "*** opcode $opc differs:\n"; ++ print " binutils: $opc_desc{$opc}\n"; ++ print " opcodes.csv: $csv_desc{$opc}\n"; ++ } ++ } ++} ++ ++#---------------------------------------------------- ++# 3) Make sure implemented'ness is correct ++#---------------------------------------------------- ++foreach my $opc (keys %toir_implemented) { ++ if (! $csv_implemented{$opc}) { ++ print "*** opcode $opc is implemented but CSV file does not say so\n"; ++ } ++} ++ ++foreach my $opc (keys %csv_implemented) { ++ if (! $toir_implemented{$opc}) { ++ print "*** opcode $opc is not implemented but CSV file says so\n"; ++ } ++} ++ ++#---------------------------------------------------- ++# 4) Make sure all opcodes are handled by the decoder ++#---------------------------------------------------- ++ ++# We only have to check those for which we don't generate IR. ++ ++foreach my $opc (keys %opc_desc) { ++ if (! $toir_implemented{$opc} && ! $toir_decoded{$opc}) { ++ print "*** opcode $opc is not handled by the decoder\n"; ++ } ++} ++ ++print "there are " . int(keys %toir_implemented) . " implemented opcodes\n"; ++exit 0 +diff '--color=auto' -ru --new-file valgrind-3.21.0/auxprogs/update-demangler valgrind-riscv64/auxprogs/update-demangler +--- valgrind-3.21.0/auxprogs/update-demangler 2023-01-07 22:48:49.000000000 +0800 ++++ valgrind-riscv64/auxprogs/update-demangler 2022-09-21 06:23:46.000000000 +0800 +@@ -17,8 +17,8 @@ + #--------------------------------------------------------------------- + + # You need to modify these revision numbers for your update. +-old_gcc_revision=b3585c0836e729bed56b9afd4292177673a25ca0 # the revision of the previous update +-new_gcc_revision=d3b2ead595467166c849950ecd3710501a5094d9 # the revision for this update ++old_gcc_revision=01d92cfd79872e4cffc78bf233bb9b767336beb8 # the revision of the previous update ++new_gcc_revision=b3585c0836e729bed56b9afd4292177673a25ca0 # the revision for this update + + # Unless the organization of demangler related files has changed, no + # changes below this line should be necessary. +@@ -37,7 +37,7 @@ + mkdir gcc + cd gcc + git init +-git remote add origin https://gcc.gnu.org/git/gcc.git ++git remote add origin git://gcc.gnu.org/git/gcc.git + git config core.sparsecheckout true + echo "libiberty/*" > .git/info/sparse-checkout + echo "include/*" >> .git/info/sparse-checkout +@@ -91,7 +91,7 @@ + mkdir valgrind-sparse-clone + cd valgrind-sparse-clone + git init +-git remote add origin -f https://sourceware.org/git/valgrind.git/ ++git remote add origin -f git://sourceware.org/git/valgrind.git/ + git config core.sparsecheckout true + echo "coregrind/m_demangle/*" > .git/info/sparse-checkout + git pull origin master +diff '--color=auto' -ru --new-file valgrind-3.21.0/auxprogs/valgrind-di-server.c valgrind-riscv64/auxprogs/valgrind-di-server.c +--- valgrind-3.21.0/auxprogs/valgrind-di-server.c 2023-03-22 17:10:13.000000000 +0800 ++++ valgrind-riscv64/auxprogs/valgrind-di-server.c 2022-09-21 06:23:46.000000000 +0800 +@@ -98,7 +98,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + #include +@@ -774,12 +774,10 @@ + int r = fstat(fd, &stat_buf); + if (r != 0) { + res = mk_Frame_asciiz("FAIL", "OPEN: cannot stat file"); +- close(fd); + ok = False; + } + if (ok && stat_buf.st_size == 0) { + res = mk_Frame_asciiz("FAIL", "OPEN: file has zero size"); +- close(fd); + ok = False; + } + if (ok) { +diff '--color=auto' -ru --new-file valgrind-3.21.0/auxprogs/valgrind-listener.c valgrind-riscv64/auxprogs/valgrind-listener.c +--- valgrind-3.21.0/auxprogs/valgrind-listener.c 2023-03-22 17:10:13.000000000 +0800 ++++ valgrind-riscv64/auxprogs/valgrind-listener.c 2022-09-21 06:23:46.000000000 +0800 +@@ -47,7 +47,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + #include +diff '--color=auto' -ru --new-file valgrind-3.21.0/cachegrind/cg_annotate.in valgrind-riscv64/cachegrind/cg_annotate.in +--- valgrind-3.21.0/cachegrind/cg_annotate.in 2023-04-21 21:20:47.000000000 +0800 ++++ valgrind-riscv64/cachegrind/cg_annotate.in 2022-09-21 06:23:46.000000000 +0800 +@@ -1,1221 +1,940 @@ +-#! /usr/bin/env python3 +-# pyright: strict ++#! @PERL@ + +-# -------------------------------------------------------------------- +-# --- Cachegrind's annotator. cg_annotate.in --- +-# -------------------------------------------------------------------- ++##--------------------------------------------------------------------## ++##--- Cachegrind's annotator. cg_annotate.in ---## ++##--------------------------------------------------------------------## + +-# This file is part of Cachegrind, a Valgrind tool for cache +-# profiling programs. ++# This file is part of Cachegrind, a Valgrind tool for cache ++# profiling programs. + # +-# Copyright (C) 2002-2023 Nicholas Nethercote +-# njn@valgrind.org ++# Copyright (C) 2002-2017 Nicholas Nethercote ++# njn@valgrind.org + # +-# This program is free software; you can redistribute it and/or +-# modify it under the terms of the GNU General Public License as +-# published by the Free Software Foundation; either version 2 of the +-# License, or (at your option) any later version. ++# This program is free software; you can redistribute it and/or ++# modify it under the terms of the GNU General Public License as ++# published by the Free Software Foundation; either version 2 of the ++# License, or (at your option) any later version. + # +-# This program is distributed in the hope that it will be useful, but +-# WITHOUT ANY WARRANTY; without even the implied warranty of +-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +-# General Public License for more details. ++# This program is distributed in the hope that it will be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# General Public License for more details. + # +-# You should have received a copy of the GNU General Public License +-# along with this program; if not, see . ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, see . + # +-# The GNU General Public License is contained in the file COPYING. ++# The GNU General Public License is contained in the file COPYING. + +-# This script reads Cachegrind output files and produces human-readable output. ++#---------------------------------------------------------------------------- ++# The file format is simple, basically printing the cost centre for every ++# source line, grouped by files and functions. The details are in ++# Cachegrind's manual. ++ ++#---------------------------------------------------------------------------- ++# Performance improvements record, using cachegrind.out for cacheprof, doing no ++# source annotation (irrelevant ones removed): ++# user time ++# 1. turned off warnings in add_hash_a_to_b() 3.81 --> 3.48s ++# [now add_array_a_to_b()] ++# 6. make line_to_CC() return a ref instead of a hash 3.01 --> 2.77s + # +-# Use `make pyann` to "build" this script with `auxprogs/pybuild.rs` every time +-# it is changed. This runs the formatters, type-checkers, and linters on +-# `cg_annotate.in` and then generates `cg_annotate`. +- +-from __future__ import annotations +- +-import filecmp +-import os +-import re +-import sys +-from argparse import ArgumentParser, BooleanOptionalAction, Namespace +-from collections import defaultdict +-from typing import Callable, DefaultDict, NoReturn, TextIO +- +- +-def die(msg: str) -> NoReturn: +- print("cg_annotate: error:", msg, file=sys.stderr) +- sys.exit(1) +- +- +-SearchAndReplace = Callable[[str], str] +- +-# A typed wrapper for parsed args. +-class Args(Namespace): +- # None of these fields are modified after arg parsing finishes. +- diff: bool +- mod_filename: SearchAndReplace +- mod_funcname: SearchAndReplace +- show: list[str] +- sort: list[str] +- threshold: float # a percentage +- show_percs: bool +- annotate: bool +- context: int +- cgout_filename: list[str] +- +- @staticmethod +- def parse() -> Args: +- # We support Perl-style `s/old/new/flags` search-and-replace +- # expressions, because that's how this option was implemented in the +- # old Perl version of `cg_diff`. This requires conversion from +- # `s/old/new/` style to `re.sub`. The conversion isn't a perfect +- # emulation of Perl regexps (e.g. Python uses `\1` rather than `$1` for +- # using captures in the `new` part), but it should be close enough. The +- # only supported flags are `g` (global) and `i` (ignore case). +- def search_and_replace(regex: str | None) -> SearchAndReplace: +- if regex is None: +- return lambda s: s +- +- # Extract the parts of an `s/old/new/tail` regex. `(? list[str]: +- return values.split(",") +- +- def threshold(n: str) -> float: +- f = float(n) +- if 0 <= f <= 20: +- return f +- raise ValueError +- +- # Add a bool argument that defaults to true. +- # +- # Supports these forms: `--foo`, `--no-foo`, `--foo=yes`, `--foo=no`. +- # The latter two were the forms supported by the old Perl version of +- # `cg_annotate`, and are now deprecated. +- def add_bool_argument( +- p: ArgumentParser, new_name: str, old_name: str, help_: str +- ) -> None: +- new_flag = "--" + new_name +- old_flag = "--" + old_name +- dest = new_name.replace("-", "_") +- +- # Note: the default value is always printed with `BooleanOptionalAction`, +- # due to an argparse bug: https://github.com/python/cpython/issues/83137. +- p.add_argument( +- new_flag, +- default=True, +- action=BooleanOptionalAction, +- help=help_, +- ) +- p.add_argument( +- f"{old_flag}=yes", +- dest=dest, +- action="store_true", +- help=f"(deprecated) same as --{new_name}", +- ) +- p.add_argument( +- f"{old_flag}=no", +- dest=dest, +- action="store_false", +- help=f"(deprecated) same as --no-{new_name}", +- ) +- +- p = ArgumentParser(description="Process one or more Cachegrind output files.") +- +- p.add_argument("--version", action="version", version="%(prog)s-@VERSION@") +- p.add_argument( +- "--diff", +- default=False, +- action="store_true", +- help="perform a diff between two Cachegrind output files", +- ) +- p.add_argument( +- "--mod-filename", +- type=search_and_replace, +- metavar="REGEX", +- default=search_and_replace(None), +- help="a search-and-replace regex applied to filenames, e.g. " +- "`s/prog[0-9]/progN/`", +- ) +- p.add_argument( +- "--mod-funcname", +- type=search_and_replace, +- metavar="REGEX", +- default=search_and_replace(None), +- help="like --mod-filename, but for function names", +- ) +- p.add_argument( +- "--show", +- type=comma_separated_list, +- metavar="A,B,C", +- help="only show figures for events A,B,C (default: all events)", +- ) +- p.add_argument( +- "--sort", +- type=comma_separated_list, +- metavar="A,B,C", +- help="sort functions by events A,B,C (default: event column order)", +- ) +- p.add_argument( +- "--threshold", +- type=threshold, +- default=0.1, +- metavar="N:[0,20]", +- help="only show file:function/function:file pairs with more than " +- "N%% of primary sort event counts (default: %(default)s)", +- ) +- add_bool_argument( +- p, +- "show-percs", +- "show-percs", +- "show a percentage for each non-zero count", +- ) +- add_bool_argument( +- p, +- "annotate", +- "auto", +- "annotate all source files containing functions that reached the " +- "event count threshold", +- ) +- p.add_argument( +- "--context", +- type=int, +- default=8, +- metavar="N", +- help="print N lines of context before and after annotated lines " +- "(default: %(default)s)", +- ) +- p.add_argument( +- "cgout_filename", +- nargs="+", +- metavar="cachegrind-out-file", +- help="file produced by Cachegrind", +- ) +- +- # `args0` name used to avoid shadowing the global `args`, which pylint +- # doesn't like. +- args0 = p.parse_args(namespace=Args()) +- if args0.diff and len(args0.cgout_filename) != 2: +- p.print_usage(file=sys.stderr) +- die("argument --diff: requires exactly two Cachegrind output files") +- +- return args0 +- +- +-# Args are stored in a global for easy access. +-args = Args.parse() +- +- +-# A single instance of this class is constructed, from `args` and the `events:` +-# line in the cgout file. +-class Events: +- # The event names. +- events: list[str] +- +- # Equal to `len(self.events)`. +- num_events: int +- +- # The order in which we must traverse events for --show. Can be shorter +- # than `events`. +- show_events: list[str] +- +- # Like `show_events`, but indices into `events`, rather than names. +- show_indices: list[int] +- +- # The order in which we must traverse events for --sort. Can be shorter +- # than `events`. +- sort_events: list[str] +- +- # Like `sort_events`, but indices into `events`, rather than names. +- sort_indices: list[int] +- +- def __init__(self) -> None: +- # All fields are left uninitialized here, and set instead in `init`. +- pass +- +- def init(self, text: str) -> None: +- self.events = text.split() +- self.num_events = len(self.events) +- +- # A temporary dict mapping events to indices, [0, n-1]. +- event_indices = {event: n for n, event in enumerate(self.events)} +- +- # If --show is given, check it is valid. If --show is not given, +- # default to all events in the standard order. +- if args.show: +- for event in args.show: +- if event not in event_indices: +- die(f"--show event `{event}` did not appear in `events:` line") +- self.show_events = args.show +- else: +- self.show_events = self.events +- +- self.show_indices = [event_indices[event] for event in self.show_events] +- +- # Likewise for --sort. +- if args.sort: +- for event in args.sort: +- if event not in event_indices: +- die(f"--sort event `{event}` did not appear in `events:` line") +- self.sort_events = args.sort +- else: +- self.sort_events = self.events +- +- self.sort_indices = [event_indices[event] for event in self.sort_events] +- +- # Raises a `ValueError` exception on syntax error. +- def mk_cc(self, str_counts: list[str]) -> Cc: +- # This is slightly faster than a list comprehension. +- counts = list(map(int, str_counts)) +- +- if len(counts) == self.num_events: +- pass +- elif len(counts) < self.num_events: +- # Add zeroes at the end for any missing numbers. +- counts.extend([0] * (self.num_events - len(counts))) +- else: +- raise ValueError +- +- return counts +- +- def mk_empty_cc(self) -> Cc: +- # This is much faster than a list comprehension. +- return [0] * self.num_events +- +- def mk_empty_dcc(self) -> Dcc: +- return Dcc(self.mk_empty_cc(), defaultdict(self.mk_empty_cc)) +- +- +-# A "cost centre", which is a dumb container for counts. Always the same length +-# as `Events.events`, but it doesn't even know event names. `Events.mk_cc` and +-# `Events.mk_empty_cc` are used for construction. ++#10. changed file format to avoid file/fn name repetition 2.40s ++# (not sure why higher; maybe due to new '.' entries?) ++#11. changed file format to drop unnecessary end-line "."s 2.36s ++# (shrunk file by about 37%) ++#12. switched from hash CCs to array CCs 1.61s ++#13. only adding b[i] to a[i] if b[i] defined (was doing it if ++# either a[i] or b[i] was defined, but if b[i] was undefined ++# it just added 0) 1.48s ++#14. Stopped converting "." entries to undef and then back 1.16s ++#15. Using foreach $i (x..y) instead of for ($i = 0...) in ++# add_array_a_to_b() 1.11s + # +-# This used to be a class with a single field `counts: list[int]`, but this +-# type is very hot and just using a type alias is much faster. +-Cc = list[int] +- +-# Add the counts in `a_cc` to `b_cc`. +-def add_cc_to_cc(a_cc: Cc, b_cc: Cc) -> None: +- for i, a_count in enumerate(a_cc): +- b_cc[i] += a_count +- +- +-# Subtract the counts in `a_cc` from `b_cc`. +-def sub_cc_from_cc(a_cc: Cc, b_cc: Cc) -> None: +- for i, a_count in enumerate(a_cc): +- b_cc[i] -= a_count +- +- +-# Unrolled version of `add_cc_to_cc`, for speed. +-def add_cc_to_ccs( +- a_cc: Cc, b_cc1: Cc, b_cc2: Cc, b_cc3: Cc, b_cc4: Cc, b_cc5: Cc, total_cc: Cc +-) -> None: +- for i, a_count in enumerate(a_cc): +- b_cc1[i] += a_count +- b_cc2[i] += a_count +- b_cc3[i] += a_count +- b_cc4[i] += a_count +- b_cc5[i] += a_count +- total_cc[i] += a_count +- +- +-# Unrolled version of `sub_cc_from_cc`, for speed. Note that the last one, +-# `total_cc`, is added. +-def sub_cc_from_ccs( +- a_cc: Cc, b_cc1: Cc, b_cc2: Cc, b_cc3: Cc, b_cc4: Cc, b_cc5: Cc, total_cc: Cc +-) -> None: +- for i, a_count in enumerate(a_cc): +- b_cc1[i] -= a_count +- b_cc2[i] -= a_count +- b_cc3[i] -= a_count +- b_cc4[i] -= a_count +- b_cc5[i] -= a_count +- total_cc[i] += a_count +- +- +-# Update `min_cc` and `max_cc` with `self`. +-def update_cc_extremes(self: Cc, min_cc: Cc, max_cc: Cc) -> None: +- for i, count in enumerate(self): +- if count > max_cc[i]: +- max_cc[i] = count +- elif count < min_cc[i]: +- min_cc[i] = count +- +- +-# Note: some abbrevations used below: +-# - Ofl/ofl: original filename, as mentioned in a cgout file. +-# - Ofn/ofn: original function name, as mentioned in a cgout file. +-# - Mfl/mfl: modified filename, the result of passing an Ofl through +-# `--mod-filename`. +-# - Mfn/mfn: modified function name, the result of passing an Ofn through +-# `--mod-funcname`. +-# - Mname/mname: modified name, used for what could be an Mfl or an Mfn. +- +-# A deep cost centre with a dict for the inner mnames and CCs. +-class Dcc: +- outer_cc: Cc +- inner_dict_mname_cc: DictMnameCc +- +- def __init__(self, outer_cc: Cc, inner_dict_mname_cc: DictMnameCc) -> None: +- self.outer_cc = outer_cc +- self.inner_dict_mname_cc = inner_dict_mname_cc +- +- +-# A deep cost centre with a list for the inner mnames and CCs. Used during +-# filtering and sorting. +-class Lcc: +- outer_cc: Cc +- inner_list_mname_cc: ListMnameCc +- +- def __init__(self, outer_cc: Cc, inner_list_mname_cc: ListMnameCc) -> None: +- self.outer_cc = outer_cc +- self.inner_list_mname_cc = inner_list_mname_cc +- +- +-# Per-Mfl/Mfn CCs. The list version is used during filtering and sorting. +-DictMnameCc = DefaultDict[str, Cc] +-ListMnameCc = list[tuple[str, Cc]] +- +-# Per-Mfl/Mfn DCCs. The outer Mnames are Mfls and the inner Mnames are Mfns, or +-# vice versa. The list version is used during filtering and sorting. +-DictMnameDcc = DefaultDict[str, Dcc] +-ListMnameLcc = list[tuple[str, Lcc]] +- +-# Per-line CCs, organised by Mfl and line number. +-DictLineCc = DefaultDict[int, Cc] +-DictMflDictLineCc = DefaultDict[str, DictLineCc] +- +-# A dictionary tracking how Ofls get mapped to Mfls by `--mod-filename`. If +-# `--mod-filename` isn't used, each entry will be the identity mapping: ("foo" +-# -> set(["foo"])). +-DictMflOfls = DefaultDict[str, set[str]] +- +- +-def read_cgout_file( +- cgout_filename: str, +- is_first_file: bool, +- descs: list[str], +- cmds: list[str], +- events: Events, +- dict_mfl_ofls: DictMflOfls, +- dict_mfl_dcc: DictMnameDcc, +- dict_mfn_dcc: DictMnameDcc, +- dict_mfl_dict_line_cc: DictMflDictLineCc, +- summary_cc: Cc, +-) -> None: +- # The file format is described in Cachegrind's manual. +- try: +- cgout_file = open(cgout_filename, "r", encoding="utf-8") +- except OSError as err: +- die(f"{err}") +- +- with cgout_file: +- cgout_line_num = 0 +- +- def parse_die(msg: str) -> NoReturn: +- die(f"{cgout_file.name}:{cgout_line_num}: {msg}") +- +- def readline() -> str: +- nonlocal cgout_line_num +- cgout_line_num += 1 +- return cgout_file.readline() +- +- # Read "desc:" lines. +- desc = "" +- while line := readline(): +- if m := re.match(r"desc:\s+(.*)", line): +- desc += m.group(1) + "\n" +- else: +- break +- descs.append(desc) +- +- # Read "cmd:" line. (`line` is already set from the "desc:" loop.) +- if m := re.match(r"cmd:\s+(.*)", line): +- cmds.append(m.group(1)) +- else: +- parse_die("missing a `command:` line") +- +- # Read "events:" line. +- line = readline() +- if m := re.match(r"events:\s+(.*)", line): +- if is_first_file: +- events.init(m.group(1)) +- else: +- events2 = Events() +- events2.init(m.group(1)) +- if events.events != events2.events: +- die("events in data files don't match") +- else: +- parse_die("missing an `events:` line") +- +- def mk_empty_dict_line_cc() -> DictLineCc: +- return defaultdict(events.mk_empty_cc) +- +- # The current Mfl and Mfn. +- mfl = "" +- mfn = "" +- +- # These values are passed in by reference and are modified by this +- # function. But they can't be properly initialized until the `events:` +- # line of the first file is read and the number of events is known. So +- # we initialize them in an invalid state in `main`, and then +- # reinitialize them properly here, before their first use. +- if is_first_file: +- dict_mfl_dcc.default_factory = events.mk_empty_dcc +- dict_mfn_dcc.default_factory = events.mk_empty_dcc +- dict_mfl_dict_line_cc.default_factory = mk_empty_dict_line_cc +- summary_cc.extend(events.mk_empty_cc()) +- +- # These are refs into the dicts above, used to avoid repeated lookups. +- # They are all overwritten before first use. +- mfl_dcc = events.mk_empty_dcc() +- mfn_dcc = events.mk_empty_dcc() +- mfl_dcc_inner_mfn_cc = events.mk_empty_cc() +- mfn_dcc_inner_mfl_cc = events.mk_empty_cc() +- dict_line_cc = mk_empty_dict_line_cc() +- total_cc = events.mk_empty_cc() +- +- # When diffing, we negate the first cgout file's counts to effectively +- # achieve `cgout2 - cgout1`. +- if args.diff and is_first_file: +- combine_cc_with_cc = sub_cc_from_cc +- combine_cc_with_ccs = sub_cc_from_ccs +- else: +- combine_cc_with_cc = add_cc_to_cc +- combine_cc_with_ccs = add_cc_to_ccs +- +- summary_cc_present = False +- +- # Line matching is done in order of pattern frequency, for speed. +- while line := readline(): +- if line[0].isdigit(): +- split_line = line.split() +- try: +- line_num = int(split_line[0]) +- cc = events.mk_cc(split_line[1:]) +- except ValueError: +- parse_die("malformed or too many event counts") +- +- # Record this CC at various levels. +- combine_cc_with_ccs( +- cc, +- mfl_dcc.outer_cc, +- mfn_dcc.outer_cc, +- mfl_dcc_inner_mfn_cc, +- mfn_dcc_inner_mfl_cc, +- dict_line_cc[line_num], +- total_cc, +- ) +- +- elif line.startswith("fn="): +- ofn = line[3:-1] +- mfn = args.mod_funcname(ofn) +- # `mfl_dcc` is unchanged. +- mfn_dcc = dict_mfn_dcc[mfn] +- mfl_dcc_inner_mfn_cc = mfl_dcc.inner_dict_mname_cc[mfn] +- mfn_dcc_inner_mfl_cc = mfn_dcc.inner_dict_mname_cc[mfl] +- +- elif line.startswith("fl="): +- ofl = line[3:-1] +- mfl = args.mod_filename(ofl) +- dict_mfl_ofls[mfl].add(ofl) +- # A `fn=` line should follow, overwriting the function name. +- mfn = "" +- mfl_dcc = dict_mfl_dcc[mfl] +- mfn_dcc = dict_mfn_dcc[mfn] +- mfl_dcc_inner_mfn_cc = mfl_dcc.inner_dict_mname_cc[mfn] +- mfn_dcc_inner_mfl_cc = mfn_dcc.inner_dict_mname_cc[mfl] +- dict_line_cc = dict_mfl_dict_line_cc[mfl] +- +- elif m := re.match(r"summary:\s+(.*)", line): +- summary_cc_present = True +- try: +- this_summary_cc = events.mk_cc(m.group(1).split()) +- combine_cc_with_cc(this_summary_cc, summary_cc) +- +- # Check summary is correct. Note that `total_cc` doesn't +- # get negated for the first file in a diff, unlike the +- # other CCs, because it's only used here as a sanity check. +- if this_summary_cc != total_cc: +- msg = ( +- "`summary:` line doesn't match computed total\n" +- f"- summary: {this_summary_cc}\n" +- f"- computed: {total_cc}" +- ) +- parse_die(msg) +- +- except ValueError: +- parse_die("malformed or too many event counts") +- +- elif line == "\n" or line.startswith("#"): +- # Skip empty lines and comment lines. +- pass +- +- else: +- parse_die(f"malformed line: {line[:-1]}") +- +- # Check if summary line was present. +- if not summary_cc_present: +- parse_die("missing `summary:` line, aborting") +- +- +-# The width of a column, in three parts. +-class Width: +- # Width of the widest commified event count. +- count: int +- +- # Width of the widest first percentage, of the form ` (n.n%)` or ` (n.n%,`. +- perc1: int +- +- # Width of the widest second percentage, of the form ` n.n%)`. +- perc2: int +- +- def __init__(self, count: int, perc1: int, perc2: int) -> None: +- self.count = count +- self.perc1 = perc1 +- self.perc2 = perc2 +- +- +-class CcPrinter: +- # Note: every `CcPrinter` gets the same `Events` object. +- events: Events +- +- # Note: every `CcPrinter` gets the same summary CC. +- summary_cc: Cc +- +- # String to print before the event names. +- events_prefix: str +- +- # The widths of each event column. For simplicity, its length matches +- # `events.events`, even though not all events are necessarily shown. +- widths: list[Width] +- +- # Text of a missing CC, which can be computed in advance. +- missing_cc_str: str +- +- # Must call `init_ccs` or `init_list_mname_lcc` after this. +- def __init__(self, events: Events, summary_cc: Cc) -> None: +- self.events = events +- self.summary_cc = summary_cc +- # Other fields initialized in `init_*`. +- +- def init_ccs(self, ccs: list[Cc]) -> None: +- self.events_prefix = "" +- +- # Find min and max count for each event. One of them will be the widest +- # value. +- min_cc = self.events.mk_empty_cc() +- max_cc = self.events.mk_empty_cc() +- for cc in ccs: +- update_cc_extremes(cc, min_cc, max_cc) +- +- self.init_widths(min_cc, max_cc, None, None) +- +- def init_list_mname_lcc(self, list_mname_lcc: ListMnameLcc) -> None: +- self.events_prefix = " " +- +- cumul_cc = self.events.mk_empty_cc() +- +- # Find min and max value for each event. One of them will be the widest +- # value. Likewise for the cumulative counts. +- min_cc = self.events.mk_empty_cc() +- max_cc = self.events.mk_empty_cc() +- min_cumul_cc = self.events.mk_empty_cc() +- max_cumul_cc = self.events.mk_empty_cc() +- for _, lcc in list_mname_lcc: +- # Consider both outer and inner CCs for `count` and `perc1`. +- update_cc_extremes(lcc.outer_cc, min_cc, max_cc) +- for _, inner_cc in lcc.inner_list_mname_cc: +- update_cc_extremes(inner_cc, min_cc, max_cc) +- +- # Consider only outer CCs for `perc2`. +- add_cc_to_cc(lcc.outer_cc, cumul_cc) +- update_cc_extremes(cumul_cc, min_cumul_cc, max_cumul_cc) +- +- self.init_widths(min_cc, max_cc, min_cumul_cc, max_cumul_cc) +- +- def init_widths( +- self, min_cc1: Cc, max_cc1: Cc, min_cc2: Cc | None, max_cc2: Cc | None +- ) -> None: +- self.widths = [Width(0, 0, 0)] * self.events.num_events +- for i in range(len(self.events.events)): +- # Get count and percs widths of the min and max CCs. +- (min_count, min_perc1, min_perc2) = self.count_and_percs_strs( +- min_cc1, min_cc2, i +- ) +- (max_count, max_perc1, max_perc2) = self.count_and_percs_strs( +- max_cc1, max_cc2, i +- ) +- self.widths[i] = Width( +- max(len(min_count), len(max_count)), +- max(len(min_perc1), len(max_perc1)), +- max(len(min_perc2), len(max_perc2)), +- ) +- +- self.missing_cc_str = "" +- for i in self.events.show_indices: +- self.missing_cc_str += self.count_and_percs_str(i, ".", "", "") +- +- # Get the count and perc string for `cc1[i]` and the perc string for +- # `cc2[i]`. (Unless `cc2` is `None`, in which case `perc2` will be "".) +- def count_and_percs_strs( +- self, cc1: Cc, cc2: Cc | None, i: int +- ) -> tuple[str, str, str]: +- count = f"{cc1[i]:,d}" # commify +- if args.show_percs: +- summary_count = self.summary_cc[i] +- if cc2 is None: +- # A plain or inner CC, with a single percentage. +- if cc1[i] == 0: +- # Don't show percentages for "0" entries, it's just clutter. +- perc1 = "" +- elif summary_count == 0: +- # Avoid dividing by zero. +- perc1 = " (n/a)" +- else: +- perc1 = f" ({cc1[i] * 100 / summary_count:.1f}%)" +- perc2 = "" +- else: +- # An outer CC, with two percentages. +- if summary_count == 0: +- # Avoid dividing by zero. +- perc1 = " (n/a," +- perc2 = " n/a)" +- else: +- perc1 = f" ({cc1[i] * 100 / summary_count:.1f}%," +- perc2 = f" {cc2[i] * 100 / summary_count:.1f}%)" +- else: +- perc1 = "" +- perc2 = "" +- +- return (count, perc1, perc2) +- +- def count_and_percs_str(self, i: int, count: str, perc1: str, perc2: str) -> str: +- event_w = len(self.events.events[i]) +- count_w = self.widths[i].count +- perc1_w = self.widths[i].perc1 +- perc2_w = self.widths[i].perc2 +- pre_w = max(0, event_w - count_w - perc1_w - perc2_w) +- return f"{'':>{pre_w}}{count:>{count_w}}{perc1:>{perc1_w}}{perc2:>{perc2_w}} " +- +- def print_events(self, suffix: str) -> None: +- print(self.events_prefix, end="") +- for i in self.events.show_indices: +- event = self.events.events[i] +- event_w = len(event) +- count_w = self.widths[i].count +- perc1_w = self.widths[i].perc1 +- perc2_w = self.widths[i].perc2 +- print(f"{event:_<{max(event_w, count_w + perc1_w + perc2_w)}} ", end="") +- +- print(suffix) +- +- def print_lcc(self, indent: str, lcc: Lcc, outer_mname: str, cumul_cc: Cc) -> None: +- print(indent, end="") +- if ( +- len(lcc.inner_list_mname_cc) == 1 +- and lcc.outer_cc == lcc.inner_list_mname_cc[0][1] +- ): +- # There is only one inner CC, it met the threshold, and it is equal +- # to the outer CC. Print the inner CC and outer CC in a single +- # line, because they are the same. +- inner_mname = lcc.inner_list_mname_cc[0][0] +- self.print_cc(lcc.outer_cc, cumul_cc, f"{outer_mname}:{inner_mname}") +- else: +- # There are multiple inner CCs, and at least one met the threshold. +- # Print the outer CC and then the inner CCs, indented. +- self.print_cc(lcc.outer_cc, cumul_cc, f"{outer_mname}:") +- for inner_mname, inner_cc in lcc.inner_list_mname_cc: +- print(" ", end="") +- self.print_cc(inner_cc, None, f" {inner_mname}") +- print() +- +- # If `cc2` is `None`, it's a vanilla CC or inner CC. Otherwise, it's an +- # outer CC. +- def print_cc(self, cc: Cc, cc2: Cc | None, suffix: str) -> None: +- for i in self.events.show_indices: +- (count, perc1, perc2) = self.count_and_percs_strs(cc, cc2, i) +- print(self.count_and_percs_str(i, count, perc1, perc2), end="") +- +- print("", suffix) +- +- def print_missing_cc(self, suffix: str) -> None: +- print(self.missing_cc_str, suffix) +- +- +-# Used in various places in the output. +-def print_fancy(text: str) -> None: +- fancy = "-" * 80 +- print(fancy) +- print("--", text) +- print(fancy) +- +- +-def print_metadata(descs: list[str], cmds: list[str], events: Events) -> None: +- print_fancy("Metadata") +- +- def all_the_same(strs: list[str]) -> bool: +- for i in range(len(strs) - 1): +- if strs[i] != strs[i + 1]: +- return False +- +- return True +- +- print("Invocation: ", *sys.argv) +- +- # When there are multiple descriptions, they are usually all the same. Only +- # print the description once in that case. +- if all_the_same(descs): +- print(descs[0], end="") +- else: +- for i, desc in enumerate(descs): +- print(f"Description {i+1}:") +- print(desc, end="") +- +- # Commands are sometimes the same, sometimes not. Always print them +- # individually, but refer to the previous one when appropriate. +- if len(cmds) == 1: +- print("Command: ", cmds[0]) +- else: +- for i, cmd in enumerate(cmds): +- if i > 0 and cmds[i - 1] == cmd: +- print(f"Command {i+1}: (same as Command {i})") +- else: +- print(f"Command {i+1}: ", cmd) +- +- print("Events recorded: ", *events.events) +- print("Events shown: ", *events.show_events) +- print("Event sort order:", *events.sort_events) +- print("Threshold: ", args.threshold, "%", sep="") +- print("Annotation: ", "on" if args.annotate else "off") +- print() +- +- +-def print_summary(events: Events, summary_cc: Cc) -> None: +- printer = CcPrinter(events, summary_cc) +- printer.init_ccs([summary_cc]) +- print_fancy("Summary") +- printer.print_events("") +- print() +- printer.print_cc(summary_cc, None, "PROGRAM TOTALS") +- print() +- +- +-def print_mname_summary( +- kind: str, indent: str, events: Events, dict_mname_dcc: DictMnameDcc, summary_cc: Cc +-) -> set[str]: +- # The primary sort event is used for the threshold. +- threshold_index = events.sort_indices[0] +- +- # Convert the threshold from a percentage to an event count. +- threshold = args.threshold * abs(summary_cc[threshold_index]) / 100 +- +- def meets_threshold(mname_and_cc: tuple[str, Cc]) -> bool: +- cc = mname_and_cc[1] +- return abs(cc[threshold_index]) >= threshold +- +- # Create a list with the outer CC counts in sort order, so that +- # left-to-right list comparison does the right thing. Plus the outer name +- # at the end for deterministic output when all the event counts are +- # identical in two CCs. +- def key_mname_and_lcc(mname_and_lcc: tuple[str, Lcc]) -> tuple[list[int], str]: +- (outer_mname, lcc) = mname_and_lcc +- return ( +- [abs(lcc.outer_cc[i]) for i in events.sort_indices], +- outer_mname, +- ) +- +- # Similar to `key_mname_and_lcc`. +- def key_mname_and_cc(mname_and_cc: tuple[str, Cc]) -> tuple[list[int], str]: +- (mname, cc) = mname_and_cc +- return ([abs(cc[i]) for i in events.sort_indices], mname) +- +- # This is a `filter_map` operation, which Python doesn't directly support. +- list_mname_lcc: ListMnameLcc = [] +- for outer_mname, dcc in dict_mname_dcc.items(): +- # Filter out inner CCs for which the primary sort event count is below the +- # threshold, and sort the remainder. +- inner_list_mname_cc = sorted( +- filter(meets_threshold, dcc.inner_dict_mname_cc.items()), +- key=key_mname_and_cc, +- reverse=True, +- ) +- +- # If no inner CCs meet the threshold, ignore the entire DCC, even if +- # the outer CC meets the threshold. +- if len(inner_list_mname_cc) == 0: +- continue +- +- list_mname_lcc.append((outer_mname, Lcc(dcc.outer_cc, inner_list_mname_cc))) +- +- list_mname_lcc = sorted(list_mname_lcc, key=key_mname_and_lcc, reverse=True) +- +- printer = CcPrinter(events, summary_cc) +- printer.init_list_mname_lcc(list_mname_lcc) +- print_fancy(kind + " summary") +- printer.print_events(" " + kind.lower()) +- print() +- +- # Print LCCs. +- threshold_mnames = set([]) +- cumul_cc = events.mk_empty_cc() +- for mname, lcc in list_mname_lcc: +- add_cc_to_cc(lcc.outer_cc, cumul_cc) +- printer.print_lcc(indent, lcc, mname, cumul_cc) +- threshold_mnames.add(mname) +- +- return threshold_mnames +- +- +-class AnnotatedCcs: +- line_nums_known_cc: Cc +- line_nums_unknown_cc: Cc +- non_identical_cc: Cc +- unreadable_cc: Cc +- below_threshold_cc: Cc +- files_unknown_cc: Cc +- +- labels = [ +- " annotated: files known & above threshold & readable, line numbers known", +- " annotated: files known & above threshold & readable, line numbers unknown", +- "unannotated: files known & above threshold & two or more non-identical", +- "unannotated: files known & above threshold & unreadable ", +- "unannotated: files known & below threshold", +- "unannotated: files unknown", +- ] +- +- def __init__(self, events: Events) -> None: +- self.line_nums_known_cc = events.mk_empty_cc() +- self.line_nums_unknown_cc = events.mk_empty_cc() +- self.non_identical_cc = events.mk_empty_cc() +- self.unreadable_cc = events.mk_empty_cc() +- self.below_threshold_cc = events.mk_empty_cc() +- self.files_unknown_cc = events.mk_empty_cc() +- +- def ccs(self) -> list[Cc]: +- return [ +- self.line_nums_known_cc, +- self.line_nums_unknown_cc, +- self.non_identical_cc, +- self.unreadable_cc, +- self.below_threshold_cc, +- self.files_unknown_cc, +- ] ++# Auto-annotating primes: ++#16. Finding count lengths by int((length-1)/3), not by ++# commifying (halves the number of commify calls) 1.68s --> 1.47s ++ ++use warnings; ++use strict; ++ ++#---------------------------------------------------------------------------- ++# Overview: the running example in the comments is for: ++# - events = A,B,C,D ++# - --show=C,A,D ++# - --sort=D,C ++#---------------------------------------------------------------------------- ++ ++#---------------------------------------------------------------------------- ++# Global variables, main data structures ++#---------------------------------------------------------------------------- ++# CCs are arrays, the counts corresponding to @events, with 'undef' ++# representing '.'. This makes things fast (faster than using hashes for CCs) ++# but we have to use @sort_order and @show_order below to handle the --sort and ++# --show options, which is a bit tricky. ++#---------------------------------------------------------------------------- ++ ++# Total counts for summary (an array reference). ++my $summary_CC; ++ ++# Totals for each function, for overall summary. ++# hash(filename:fn_name => CC array) ++my %fn_totals; ++ ++# Individual CCs, organised by filename and line_num for easy annotation. ++# hash(filename => hash(line_num => CC array)) ++my %allCCs; ++ ++# Files chosen for annotation on the command line. ++# key = basename (trimmed of any directory), value = full filename ++my %user_ann_files; ++ ++# Generic description string. ++my $desc = ""; ++ ++# Command line of profiled program. ++my $cmd; ++ ++# Events in input file, eg. (A,B,C,D) ++my @events; ++ ++# Events to show, from command line, eg. (C,A,D) ++my @show_events; ++ ++# Map from @show_events indices to @events indices, eg. (2,0,3). Gives the ++# order in which we must traverse @events in order to show the @show_events, ++# eg. (@events[$show_order[1]], @events[$show_order[2]]...) = @show_events. ++# (Might help to think of it like a hash (0 => 2, 1 => 0, 2 => 3).) ++my @show_order; ++ ++# Print out the function totals sorted by these events, eg. (D,C). ++my @sort_events; ++ ++# Map from @sort_events indices to @events indices, eg. (3,2). Same idea as ++# for @show_order. ++my @sort_order; ++ ++# Thresholds, one for each sort event (or default to 1 if no sort events ++# specified). We print out functions and do auto-annotations until we've ++# handled this proportion of all the events thresholded. ++my @thresholds; ++ ++my $default_threshold = 0.1; ++ ++my $single_threshold = $default_threshold; ++ ++# If on, show a percentage for each non-zero count. ++my $show_percs = 1; ++ ++# If on, automatically annotates all files that are involved in getting over ++# all the threshold counts. ++my $auto_annotate = 1; ++ ++# Number of lines to show around each annotated line. ++my $context = 8; ++ ++# Directories in which to look for annotation files. ++my @include_dirs = (""); ++ ++# Input file name ++my $input_file = undef; ++ ++# Version number ++my $version = "@VERSION@"; ++ ++# Usage message. ++my $usage = < a function is shown if it accounts for more than x% of ++ the counts of the primary sort event [$default_threshold] ++ --show-percs=yes|no show a percentage for each non-zero count [yes] ++ --auto=yes|no annotate all source files containing functions ++ that helped reach the event count threshold [yes] ++ --context=N print N lines of context before and after ++ annotated lines [8] ++ -I --include= add to list of directories to search for ++ source files ++ ++ cg_annotate is Copyright (C) 2002-2017 Nicholas Nethercote. ++ and licensed under the GNU General Public License, version 2. ++ Bug reports, feedback, admiration, abuse, etc, to: njn\@valgrind.org. ++ ++END ++; ++ ++# Used in various places of output. ++my $fancy = '-' x 80 . "\n"; ++ ++sub safe_div($$) ++{ ++ my ($x, $y) = @_; ++ return ($y == 0 ? 0 : $x / $y); ++} ++ ++#----------------------------------------------------------------------------- ++# Argument and option handling ++#----------------------------------------------------------------------------- ++sub process_cmd_line() ++{ ++ for my $arg (@ARGV) { ++ ++ # Option handling ++ if ($arg =~ /^-/) { ++ ++ # --version ++ if ($arg =~ /^--version$/) { ++ die("cg_annotate-$version\n"); ++ ++ # --show=A,B,C ++ } elsif ($arg =~ /^--show=(.*)$/) { ++ @show_events = split(/,/, $1); ++ ++ # --sort=A,B,C ++ # Nb: You can specify thresholds individually, eg. ++ # --sort=A:99,B:95,C:90. These will override any --threshold ++ # argument. ++ } elsif ($arg =~ /^--sort=(.*)$/) { ++ @sort_events = split(/,/, $1); ++ my $th_specified = 0; ++ foreach my $i (0 .. scalar @sort_events - 1) { ++ if ($sort_events[$i] =~ /.*:([\d\.]+)%?$/) { ++ my $th = $1; ++ ($th >= 0 && $th <= 100) or die($usage); ++ $sort_events[$i] =~ s/:.*//; ++ $thresholds[$i] = $th; ++ $th_specified = 1; ++ } else { ++ $thresholds[$i] = 0; ++ } ++ } ++ if (not $th_specified) { ++ @thresholds = (); ++ } ++ ++ # --threshold=X (tolerates a trailing '%') ++ } elsif ($arg =~ /^--threshold=([\d\.]+)%?$/) { ++ $single_threshold = $1; ++ ($1 >= 0 && $1 <= 20) or die($usage); ++ ++ # --show-percs=yes|no ++ } elsif ($arg =~ /^--show-percs=yes$/) { ++ $show_percs = 1; ++ } elsif ($arg =~ /^--show-percs=no$/) { ++ $show_percs = 0; ++ ++ # --auto=yes|no ++ } elsif ($arg =~ /^--auto=yes$/) { ++ $auto_annotate = 1; ++ } elsif ($arg =~ /^--auto=no$/) { ++ $auto_annotate = 0; ++ ++ # --context=N ++ } elsif ($arg =~ /^--context=([\d\.]+)$/) { ++ $context = $1; ++ if ($context < 0) { ++ die($usage); ++ } ++ ++ # We don't handle "-I name" -- there can be no space. ++ } elsif ($arg =~ /^-I$/) { ++ die("Sorry, no space is allowed after a -I flag\n"); ++ ++ # --include=A,B,C. Allow -I=name for backwards compatibility. ++ } elsif ($arg =~ /^(-I=|-I|--include=)(.*)$/) { ++ my $inc = $2; ++ $inc =~ s|/$||; # trim trailing '/' ++ push(@include_dirs, "$inc/"); ++ ++ } else { # -h and --help fall under this case ++ die($usage); ++ } ++ ++ # Argument handling -- annotation file checking and selection. ++ # Stick filenames into a hash for quick 'n easy lookup throughout. ++ } else { ++ if (not defined $input_file) { ++ # First non-option argument is the output file. ++ $input_file = $arg; ++ } else { ++ # Subsequent non-option arguments are source files. ++ my $readable = 0; ++ foreach my $include_dir (@include_dirs) { ++ if (-r $include_dir . $arg) { ++ $readable = 1; ++ } ++ } ++ $readable or die("File $arg not found in any of: @include_dirs\n"); ++ $user_ann_files{$arg} = 1; ++ } ++ } ++ } ++ ++ # Must have chosen an input file ++ if (not defined $input_file) { ++ die($usage); ++ } ++} ++ ++#----------------------------------------------------------------------------- ++# Reading of input file ++#----------------------------------------------------------------------------- ++sub max ($$) ++{ ++ my ($x, $y) = @_; ++ return ($x > $y ? $x : $y); ++} ++ ++# Add the two arrays; any '.' entries are ignored. Two tricky things: ++# 1. If $a2->[$i] is undefined, it defaults to 0 which is what we want; we turn ++# off warnings to allow this. This makes things about 10% faster than ++# checking for definedness ourselves. ++# 2. We don't add an undefined count or a ".", even though it's value is 0, ++# because we don't want to make an $a2->[$i] that is undef become 0 ++# unnecessarily. ++sub add_array_a_to_b ($$) ++{ ++ my ($a1, $a2) = @_; ++ ++ my $n = max(scalar @$a1, scalar @$a2); ++ $^W = 0; ++ foreach my $i (0 .. $n-1) { ++ $a2->[$i] += $a1->[$i] if (defined $a1->[$i] && "." ne $a1->[$i]); ++ } ++ $^W = 1; ++} ++ ++# Add each event count to the CC array. '.' counts become undef, as do ++# missing entries (implicitly). ++sub line_to_CC ($) ++{ ++ my @CC = (split /\s+/, $_[0]); ++ (@CC <= @events) or die("Line $.: too many event counts\n"); ++ return \@CC; ++} ++ ++sub read_input_file() ++{ ++ open(INPUTFILE, "< $input_file") ++ || die "Cannot open $input_file for reading\n"; ++ ++ # Read "desc:" lines. ++ my $line; ++ while ($line = ) { ++ if ($line =~ s/desc:\s+//) { ++ $desc .= $line; ++ } else { ++ last; ++ } ++ } ++ ++ # Read "cmd:" line (Nb: will already be in $line from "desc:" loop above). ++ ($line =~ s/^cmd:\s+//) or die("Line $.: missing command line\n"); ++ $cmd = $line; ++ chomp($cmd); # Remove newline ++ ++ # Read "events:" line. We make a temporary hash in which the Nth event's ++ # value is N, which is useful for handling --show/--sort options below. ++ $line = ; ++ (defined $line && $line =~ s/^events:\s+//) ++ or die("Line $.: missing events line\n"); ++ @events = split(/\s+/, $line); ++ my %events; ++ my $n = 0; ++ foreach my $event (@events) { ++ $events{$event} = $n; ++ $n++ ++ } ++ ++ # If no --show arg give, default to showing all events in the file. ++ # If --show option is used, check all specified events appeared in the ++ # "events:" line. Then initialise @show_order. ++ if (@show_events) { ++ foreach my $show_event (@show_events) { ++ (defined $events{$show_event}) or ++ die("--show event `$show_event' did not appear in input\n"); ++ } ++ } else { ++ @show_events = @events; ++ } ++ foreach my $show_event (@show_events) { ++ push(@show_order, $events{$show_event}); ++ } ++ ++ # Do as for --show, but if no --sort arg given, default to sorting by ++ # column order (ie. first column event is primary sort key, 2nd column is ++ # 2ndary key, etc). ++ if (@sort_events) { ++ foreach my $sort_event (@sort_events) { ++ (defined $events{$sort_event}) or ++ die("--sort event `$sort_event' did not appear in input\n"); ++ } ++ } else { ++ @sort_events = @events; ++ } ++ foreach my $sort_event (@sort_events) { ++ push(@sort_order, $events{$sort_event}); ++ } ++ ++ # If multiple threshold args weren't given via --sort, stick in the single ++ # threshold (either from --threshold if used, or the default otherwise) for ++ # the primary sort event, and 0% for the rest. ++ if (not @thresholds) { ++ foreach my $e (@sort_order) { ++ push(@thresholds, 100); ++ } ++ $thresholds[0] = $single_threshold; ++ } ++ ++ my $currFileName; ++ my $currFileFuncName; ++ ++ my $currFuncCC; ++ my $currFileCCs = {}; # hash(line_num => CC) ++ ++ # Read body of input file. ++ while () { ++ # Skip comments and empty lines. ++ next if /^\s*$/ || /^\#/; ++ ++ if (s/^(-?\d+)\s+//) { ++ my $lineNum = $1; ++ my $CC = line_to_CC($_); ++ defined($currFuncCC) || die; ++ add_array_a_to_b($CC, $currFuncCC); ++ ++ # If currFileName is selected, add CC to currFileName list. We look for ++ # full filename matches; or, if auto-annotating, we have to ++ # remember everything -- we won't know until the end what's needed. ++ defined($currFileCCs) || die; ++ if ($auto_annotate || defined $user_ann_files{$currFileName}) { ++ my $currLineCC = $currFileCCs->{$lineNum}; ++ if (not defined $currLineCC) { ++ $currLineCC = []; ++ $currFileCCs->{$lineNum} = $currLineCC; ++ } ++ add_array_a_to_b($CC, $currLineCC); ++ } ++ ++ } elsif (s/^fn=(.*)$//) { ++ $currFileFuncName = "$currFileName:$1"; ++ $currFuncCC = $fn_totals{$currFileFuncName}; ++ if (not defined $currFuncCC) { ++ $currFuncCC = []; ++ $fn_totals{$currFileFuncName} = $currFuncCC; ++ } ++ ++ } elsif (s/^fl=(.*)$//) { ++ $currFileName = $1; ++ $currFileCCs = $allCCs{$currFileName}; ++ if (not defined $currFileCCs) { ++ $currFileCCs = {}; ++ $allCCs{$currFileName} = $currFileCCs; ++ } ++ # Assume that a "fn=" line is followed by a "fl=" line. ++ $currFileFuncName = undef; ++ ++ } elsif (s/^summary:\s+//) { ++ $summary_CC = line_to_CC($_); ++ (scalar(@$summary_CC) == @events) ++ or die("Line $.: summary event and total event mismatch\n"); ++ ++ } else { ++ warn("WARNING: line $. malformed, ignoring\n"); ++ } ++ } ++ ++ # Check if summary line was present ++ if (not defined $summary_CC) { ++ die("missing final summary line, aborting\n"); ++ } ++ ++ close(INPUTFILE); ++} ++ ++#----------------------------------------------------------------------------- ++# Print options used ++#----------------------------------------------------------------------------- ++sub print_options () ++{ ++ print($fancy); ++ print($desc); ++ print("Command: $cmd\n"); ++ print("Data file: $input_file\n"); ++ print("Events recorded: @events\n"); ++ print("Events shown: @show_events\n"); ++ print("Event sort order: @sort_events\n"); ++ print("Thresholds: @thresholds\n"); ++ ++ my @include_dirs2 = @include_dirs; # copy @include_dirs ++ shift(@include_dirs2); # remove "" entry, which is always the first ++ unshift(@include_dirs2, "") if (0 == @include_dirs2); ++ my $include_dir = shift(@include_dirs2); ++ print("Include dirs: $include_dir\n"); ++ foreach my $include_dir (@include_dirs2) { ++ print(" $include_dir\n"); ++ } ++ ++ my @user_ann_files = keys %user_ann_files; ++ unshift(@user_ann_files, "") if (0 == @user_ann_files); ++ my $user_ann_file = shift(@user_ann_files); ++ print("User annotated: $user_ann_file\n"); ++ foreach $user_ann_file (@user_ann_files) { ++ print(" $user_ann_file\n"); ++ } ++ ++ my $is_on = ($auto_annotate ? "on" : "off"); ++ print("Auto-annotation: $is_on\n"); ++ print("\n"); ++} ++ ++#----------------------------------------------------------------------------- ++# Print summary and sorted function totals ++#----------------------------------------------------------------------------- ++sub mycmp ($$) ++{ ++ my ($c, $d) = @_; ++ ++ # Iterate through sort events (eg. 3,2); return result if two are different ++ foreach my $i (@sort_order) { ++ my ($x, $y); ++ $x = $c->[$i]; ++ $y = $d->[$i]; ++ $x = -1 unless defined $x; ++ $y = -1 unless defined $y; ++ ++ my $cmp = abs($y) <=> abs($x); # reverse sort of absolute size ++ if (0 != $cmp) { ++ return $cmp; ++ } ++ } ++ # Exhausted events, equal ++ return 0; ++} ++ ++sub commify ($) { ++ my ($val) = @_; ++ 1 while ($val =~ s/^(-?\d+)(\d{3})/$1,$2/); ++ return $val; ++} ++ ++# Because the counts can get very big, and we don't want to waste screen space ++# and make lines too long, we compute exactly how wide each column needs to be ++# by finding the widest entry for each one. ++sub compute_CC_col_widths (@) ++{ ++ my @CCs = @_; ++ my $CC_col_widths = []; ++ ++ # Initialise with minimum widths (from event names) ++ foreach my $event (@events) { ++ push(@$CC_col_widths, length($event)); ++ } ++ ++ # Find maximum width count for each column. @CC_col_width positions ++ # correspond to @CC positions. ++ foreach my $CC (@CCs) { ++ foreach my $i (0 .. scalar(@$CC)-1) { ++ if (defined $CC->[$i]) { ++ # Find length, accounting for commas that will be added, and ++ # possibly a percentage. ++ my $length = length $CC->[$i]; ++ my $width = $length + int(($length - 1) / 3); ++ if ($show_percs) { ++ $width += 9; # e.g. " (12.34%)" is 9 chars ++ } ++ $CC_col_widths->[$i] = max($CC_col_widths->[$i], $width); ++ } ++ } ++ } ++ return $CC_col_widths; ++} ++ ++# Print the CC with each column's size dictated by $CC_col_widths. ++sub print_CC ($$) ++{ ++ my ($CC, $CC_col_widths) = @_; ++ ++ foreach my $i (@show_order) { ++ my $count = (defined $CC->[$i] ? commify($CC->[$i]) : "."); ++ ++ my $perc = ""; ++ if ($show_percs) { ++ if (defined $CC->[$i] && $CC->[$i] != 0) { ++ # Try our best to keep the number fitting into 5 chars. This ++ # requires dropping a digit after the decimal place if it's ++ # sufficiently negative (e.g. "-10.0") or positive (e.g. ++ # "100.0"). Thanks to diffs it's possible to have even more ++ # extreme values, like "-100.0" or "1000.0"; those rare case ++ # will end up with slightly wrong indenting, oh well. ++ $perc = safe_div($CC->[$i] * 100, $summary_CC->[$i]); ++ $perc = (-9.995 < $perc && $perc < 99.995) ++ ? sprintf(" (%5.2f%%)", $perc) ++ : sprintf(" (%5.1f%%)", $perc); ++ } else { ++ # Don't show percentages for "." and "0" entries. ++ $perc = " "; ++ } ++ } ++ ++ # $reps will be negative for the extreme values mentioned above. The ++ # use of max() avoids a possible warning about a negative repeat count. ++ my $text = $count . $perc; ++ my $len = length($text); ++ my $reps = $CC_col_widths->[$i] - length($text); ++ my $space = ' ' x max($reps, 0); ++ print("$space$text "); ++ } ++} ++ ++sub print_events ($) ++{ ++ my ($CC_col_widths) = @_; ++ ++ foreach my $i (@show_order) { ++ my $event = $events[$i]; ++ my $event_width = length($event); ++ my $col_width = $CC_col_widths->[$i]; ++ my $space = ' ' x ($col_width - $event_width); ++ print("$event$space "); ++ } ++} ++ ++# Prints summary and function totals (with separate column widths, so that ++# function names aren't pushed over unnecessarily by huge summary figures). ++# Also returns a hash containing all the files that are involved in getting the ++# events count above the thresholds (ie. all the interesting ones). ++sub print_summary_and_fn_totals () ++{ ++ my @fn_fullnames = keys %fn_totals; ++ ++ # Work out the size of each column for printing (summary and functions ++ # separately). ++ my $summary_CC_col_widths = compute_CC_col_widths($summary_CC); ++ my $fn_CC_col_widths = compute_CC_col_widths(values %fn_totals); ++ ++ # Header and counts for summary ++ print($fancy); ++ print_events($summary_CC_col_widths); ++ print("\n"); ++ print($fancy); ++ print_CC($summary_CC, $summary_CC_col_widths); ++ print(" PROGRAM TOTALS\n"); ++ print("\n"); ++ ++ # Header for functions ++ print($fancy); ++ print_events($fn_CC_col_widths); ++ print(" file:function\n"); ++ print($fancy); ++ ++ # Sort function names into order dictated by --sort option. ++ @fn_fullnames = sort { ++ mycmp($fn_totals{$a}, $fn_totals{$b}) ++ } @fn_fullnames; ++ ++ ++ # Assertion ++ (scalar @sort_order == scalar @thresholds) or ++ die("sort_order length != thresholds length:\n", ++ " @sort_order\n @thresholds\n"); ++ ++ my $threshold_files = {}; ++ # @curr_totals has the same shape as @sort_order and @thresholds ++ my @curr_totals = (); ++ foreach my $e (@thresholds) { ++ push(@curr_totals, 0); ++ } ++ ++ # Print functions, stopping when the threshold has been reached. ++ foreach my $fn_name (@fn_fullnames) { ++ ++ my $fn_CC = $fn_totals{$fn_name}; ++ ++ # Stop when we've reached all the thresholds ++ my $any_thresholds_exceeded = 0; ++ foreach my $i (0 .. scalar @thresholds - 1) { ++ my $prop = safe_div(abs($fn_CC->[$sort_order[$i]] * 100), ++ abs($summary_CC->[$sort_order[$i]])); ++ $any_thresholds_exceeded ||= ($prop >= $thresholds[$i]); ++ } ++ last if not $any_thresholds_exceeded; ++ ++ # Print function results ++ print_CC($fn_CC, $fn_CC_col_widths); ++ print(" $fn_name\n"); ++ ++ # Update the threshold counts ++ my $filename = $fn_name; ++ $filename =~ s/:.+$//; # remove function name ++ $threshold_files->{$filename} = 1; ++ foreach my $i (0 .. scalar @sort_order - 1) { ++ $curr_totals[$i] += $fn_CC->[$sort_order[$i]] ++ if (defined $fn_CC->[$sort_order[$i]]); ++ } ++ } ++ print("\n"); ++ ++ return $threshold_files; ++} ++ ++#----------------------------------------------------------------------------- ++# Annotate selected files ++#----------------------------------------------------------------------------- ++ ++# Issue a warning that the source file is more recent than the input file. ++sub warning_on_src_more_recent_than_inputfile ($) ++{ ++ my $src_file = $_[0]; + +- +-def mk_warning(msg: str) -> str: +- return f"""\ ++ my $warning = < None: +- s = "".join([f"@ - {ofl}\n" for ofl in ofls]) +- msg = f"""\ +-@ Original source files are all newer than data file '{cgout_filename}': +-{s}@ Annotations may not be correct. +-""" +- print(mk_warning(msg)) +- +- +-def warn_bogus_lines(src_filename: str) -> None: +- msg = f"""\ +-@@ Information recorded about lines past the end of '{src_filename}'. +-""" +- print(mk_warning(msg), end="") +- +- +-def print_annotated_src_file( +- events: Events, +- dict_line_cc: DictLineCc, +- src_file: TextIO, +- annotated_ccs: AnnotatedCcs, +- summary_cc: Cc, +-) -> None: +- printer = CcPrinter(events, summary_cc) +- printer.init_ccs(list(dict_line_cc.values())) +- # The starting fancy has already been printed by the caller. +- printer.print_events("") +- print() +- +- # The CC for line 0 is special, holding counts attributed to the source +- # file but not to any particular line (due to incomplete debug info). +- # Annotate the start of the file with this info, if present. +- line0_cc = dict_line_cc.pop(0, None) +- if line0_cc: +- suffix = "" +- printer.print_cc(line0_cc, None, suffix) +- add_cc_to_cc(line0_cc, annotated_ccs.line_nums_unknown_cc) +- print() +- +- # Find interesting line ranges: all lines with a CC, and all lines within +- # `args.context` lines of a line with a CC. +- line_nums = list(sorted(dict_line_cc.keys())) +- pairs: list[tuple[int, int]] = [] +- n = len(line_nums) +- i = 0 +- context = args.context +- while i < n: +- lo = max(line_nums[i] - context, 1) # `max` to prevent negatives +- while i < n - 1 and line_nums[i] + 2 * context >= line_nums[i + 1]: +- i += 1 +- hi = line_nums[i] + context +- pairs.append((lo, hi)) +- i += 1 +- +- def print_lines(pairs: list[tuple[int, int]]) -> None: +- line_num = 0 +- while pairs: +- src_line = "" +- (lo, hi) = pairs.pop(0) +- while line_num < lo - 1: +- src_line = src_file.readline() +- line_num += 1 +- if not src_line: +- return # EOF +- +- # Print line number, unless start of file. +- if lo != 1: +- print("-- line", lo, "-" * 40) +- +- while line_num < hi: +- src_line = src_file.readline() +- line_num += 1 +- if not src_line: +- return # EOF +- if line_nums and line_num == line_nums[0]: +- printer.print_cc(dict_line_cc[line_num], None, src_line[:-1]) +- add_cc_to_cc( +- dict_line_cc[line_num], annotated_ccs.line_nums_known_cc +- ) +- del line_nums[0] +- else: +- printer.print_missing_cc(src_line[:-1]) +- +- # Print line number. +- print("-- line", hi, "-" * 40) +- +- # Annotate chosen lines, tracking total annotated counts. +- if pairs: +- print_lines(pairs) +- +- # If there was info on lines past the end of the file, warn. +- if line_nums: +- print() +- for line_num in line_nums: +- printer.print_cc( +- dict_line_cc[line_num], None, f"" +- ) +- add_cc_to_cc(dict_line_cc[line_num], annotated_ccs.line_nums_known_cc) +- +- print() +- warn_bogus_lines(src_file.name) +- +- print() +- +- +-# This partially consumes `dict_mfl_dict_line_cc`, and fully consumes +-# `dict_mfl_olfs`. +-def print_annotated_src_files( +- ann_mfls: set[str], +- events: Events, +- dict_mfl_ofls: DictMflOfls, +- dict_mfl_dict_line_cc: DictMflDictLineCc, +- summary_cc: Cc, +-) -> AnnotatedCcs: +- annotated_ccs = AnnotatedCcs(events) +- +- def add_dict_line_cc_to_cc(dict_line_cc: DictLineCc, accum_cc: Cc) -> None: +- for line_cc in dict_line_cc.values(): +- add_cc_to_cc(line_cc, accum_cc) +- +- # Exclude the unknown ("???") file, which is unannotatable. +- ann_mfls.discard("???") +- if "???" in dict_mfl_dict_line_cc: +- dict_line_cc = dict_mfl_dict_line_cc.pop("???") +- add_dict_line_cc_to_cc(dict_line_cc, annotated_ccs.files_unknown_cc) +- +- def print_ann_fancy(mfl: str) -> None: +- print_fancy(f"Annotated source file: {mfl}") +- +- # This can raise an `OSError`. +- def all_ofl_contents_identical(ofls: list[str]) -> bool: +- for i in range(len(ofls) - 1): +- if not filecmp.cmp(ofls[i], ofls[i + 1], shallow=False): +- return False +- +- return True +- +- for mfl in sorted(ann_mfls): +- ofls = sorted(dict_mfl_ofls.pop(mfl)) +- first_ofl = ofls[0] +- +- try: +- if all_ofl_contents_identical(ofls): +- # All the Ofls that map to this Mfl are identical, which means we +- # can annotate, and it doesn't matter which Ofl we use. +- with open(first_ofl, "r", encoding="utf-8") as src_file: +- dict_line_cc = dict_mfl_dict_line_cc.pop(mfl) +- print_ann_fancy(mfl) +- +- # Because all the Ofls are identical, we can treat their +- # mtimes as if they are all as early as the earliest one. +- # Therefore, we warn only if the earliest source file is +- # more recent than the cgout file. +- min_ofl_st_mtime_ns = min( +- [os.stat(ofl).st_mtime_ns for ofl in ofls] +- ) +- +- for cgout_filename in args.cgout_filename: +- if min_ofl_st_mtime_ns > os.stat(cgout_filename).st_mtime_ns: +- warn_ofls_are_all_newer(ofls, cgout_filename) +- +- print_annotated_src_file( +- events, +- dict_line_cc, +- src_file, +- annotated_ccs, +- summary_cc, +- ) +- else: +- dict_line_cc = dict_mfl_dict_line_cc.pop(mfl) +- add_dict_line_cc_to_cc(dict_line_cc, annotated_ccs.non_identical_cc) +- +- # We could potentially do better here. +- # - Annotate until the first line where the src files diverge. +- # - Also, heuristic resyncing, e.g. by looking for matching +- # lines (of sufficient complexity) after a divergence. +- print_ann_fancy(mfl) +- print( +- "Unannotated because two or more of these original files are not " +- "identical:", +- *ofls, +- sep="\n- ", +- ) +- print() +- except OSError: +- dict_line_cc = dict_mfl_dict_line_cc.pop(mfl) +- add_dict_line_cc_to_cc(dict_line_cc, annotated_ccs.unreadable_cc) +- +- print_ann_fancy(mfl) +- print( +- "Unannotated because one or more of these original files are " +- "unreadable:", +- *ofls, +- sep="\n- ", +- ) +- print() +- +- # Sum the CCs remaining in `dict_mfl_dict_line_cc`, which are all in files +- # below the threshold. +- for dict_line_cc in dict_mfl_dict_line_cc.values(): +- add_dict_line_cc_to_cc(dict_line_cc, annotated_ccs.below_threshold_cc) +- +- return annotated_ccs +- +- +-def print_annotation_summary( +- events: Events, +- annotated_ccs: AnnotatedCcs, +- summary_cc: Cc, +-) -> None: +- # Show how many events were covered by annotated lines above. +- printer = CcPrinter(events, summary_cc) +- printer.init_ccs(annotated_ccs.ccs()) +- print_fancy("Annotation summary") +- printer.print_events("") +- print() +- +- total_cc = events.mk_empty_cc() +- for (cc, label) in zip(annotated_ccs.ccs(), AnnotatedCcs.labels): +- printer.print_cc(cc, None, label) +- add_cc_to_cc(cc, total_cc) +- +- print() +- +- # Internal sanity check. +- if summary_cc != total_cc: +- msg = ( +- "`summary:` line doesn't match computed annotated counts\n" +- f"- summary: {summary_cc}\n" +- f"- annotated: {total_cc}" +- ) +- die(msg) +- +- +-def main() -> None: +- # Metadata, initialized to empty states. +- descs: list[str] = [] +- cmds: list[str] = [] +- events = Events() +- +- # For tracking original filenames to modified filenames. +- dict_mfl_ofls: DictMflOfls = defaultdict(set) +- +- # Different places where we accumulate CC data. Initialized to invalid +- # states prior to the number of events being known. +- dict_mfl_dcc: DictMnameDcc = defaultdict(None) +- dict_mfn_dcc: DictMnameDcc = defaultdict(None) +- dict_mfl_dict_line_cc: DictMflDictLineCc = defaultdict(None) +- summary_cc: Cc = [] +- +- for n, filename in enumerate(args.cgout_filename): +- is_first_file = n == 0 +- read_cgout_file( +- filename, +- is_first_file, +- descs, +- cmds, +- events, +- dict_mfl_ofls, +- dict_mfl_dcc, +- dict_mfn_dcc, +- dict_mfl_dict_line_cc, +- summary_cc, +- ) +- +- # Each of the following calls prints a section of the output. +- print_metadata(descs, cmds, events) +- print_summary(events, summary_cc) +- ann_mfls = print_mname_summary( +- "File:function", "< ", events, dict_mfl_dcc, summary_cc +- ) +- print_mname_summary("Function:file", "> ", events, dict_mfn_dcc, summary_cc) +- if args.annotate: +- annotated_ccs = print_annotated_src_files( +- ann_mfls, events, dict_mfl_ofls, dict_mfl_dict_line_cc, summary_cc +- ) ++END ++; ++ print($warning); ++} ++ ++# If there is information about lines not in the file, issue a warning ++# explaining possible causes. ++sub warning_on_nonexistent_lines ($$$) ++{ ++ my ($src_more_recent_than_inputfile, $src_file, $excess_line_nums) = @_; ++ my $cause_and_solution; ++ ++ if ($src_more_recent_than_inputfile) { ++ $cause_and_solution = <{"???"}; ++ %all_ann_files = (%user_ann_files, %$threshold_files) ++ } else { ++ %all_ann_files = %user_ann_files; ++ } ++ ++ # Track if we did any annotations. ++ my $did_annotations = 0; ++ ++ LOOP: ++ foreach my $src_file (keys %all_ann_files) { ++ ++ my $opened_file = ""; ++ my $full_file_name = ""; ++ # Nb: include_dirs already includes "", so it works in the case ++ # where the filename has the full path. ++ foreach my $include_dir (@include_dirs) { ++ my $try_name = $include_dir . $src_file; ++ if (open(INPUTFILE, "< $try_name")) { ++ $opened_file = $try_name; ++ $full_file_name = ($include_dir eq "" ++ ? $src_file ++ : "$include_dir + $src_file"); ++ last; ++ } ++ } ++ ++ if (not $opened_file) { ++ # Failed to open the file. If chosen on the command line, die. ++ # If arose from auto-annotation, print a little message. ++ if (defined $user_ann_files{$src_file}) { ++ die("File $src_file not opened in any of: @include_dirs\n"); ++ ++ } else { ++ push(@unfound_auto_annotate_files, $src_file); ++ } ++ ++ } else { ++ # File header (distinguish between user- and auto-selected files). ++ print("$fancy"); ++ my $ann_type = ++ (defined $user_ann_files{$src_file} ? "User" : "Auto"); ++ print("-- $ann_type-annotated source: $full_file_name\n"); ++ print("$fancy"); ++ ++ # Get file's CCs ++ my $src_file_CCs = $allCCs{$src_file}; ++ if (!defined $src_file_CCs) { ++ print(" No information has been collected for $src_file\n\n"); ++ next LOOP; ++ } ++ ++ $did_annotations = 1; ++ ++ # Numeric, not lexicographic sort! ++ my @line_nums = sort {$a <=> $b} keys %$src_file_CCs; ++ ++ # If $src_file more recent than cachegrind.out, issue warning ++ my $src_more_recent_than_inputfile = 0; ++ if ((stat $opened_file)[9] > (stat $input_file)[9]) { ++ $src_more_recent_than_inputfile = 1; ++ warning_on_src_more_recent_than_inputfile($src_file); ++ } ++ ++ # Work out the size of each column for printing ++ my $CC_col_widths = compute_CC_col_widths(values %$src_file_CCs); ++ ++ # Events header ++ print_events($CC_col_widths); ++ print("\n\n"); ++ ++ # Shift out 0 if it's in the line numbers (from unknown entries, ++ # likely due to bugs in Valgrind's stabs debug info reader) ++ shift(@line_nums) if (0 == $line_nums[0]); ++ ++ # Finds interesting line ranges -- all lines with a CC, and all ++ # lines within $context lines of a line with a CC. ++ my $n = @line_nums; ++ my @pairs; ++ for (my $i = 0; $i < $n; $i++) { ++ push(@pairs, $line_nums[$i] - $context); # lower marker ++ while ($i < $n-1 && ++ $line_nums[$i] + 2*$context >= $line_nums[$i+1]) { ++ $i++; ++ } ++ push(@pairs, $line_nums[$i] + $context); # upper marker ++ } ++ ++ # Annotate chosen lines, tracking total counts of lines printed ++ $pairs[0] = 1 if ($pairs[0] < 1); ++ while (@pairs) { ++ my $low = shift @pairs; ++ my $high = shift @pairs; ++ while ($. < $low-1) { ++ my $tmp = ; ++ last unless (defined $tmp); # hack to detect EOF ++ } ++ my $src_line; ++ # Print line number, unless start of file ++ print("-- line $low " . '-' x 40 . "\n") if ($low != 1); ++ while (($. < $high) && ($src_line = )) { ++ if (defined $line_nums[0] && $. == $line_nums[0]) { ++ print_CC($src_file_CCs->{$.}, $CC_col_widths); ++ add_array_a_to_b($src_file_CCs->{$.}, ++ $printed_totals_CC); ++ shift(@line_nums); ++ ++ } else { ++ print_CC([], $CC_col_widths); ++ } ++ ++ print(" $src_line"); ++ } ++ # Print line number, unless EOF ++ if ($src_line) { ++ print("-- line $high " . '-' x 40 . "\n"); ++ } else { ++ last; ++ } ++ } ++ ++ # If there was info on lines past the end of the file... ++ if (@line_nums) { ++ foreach my $line_num (@line_nums) { ++ print_CC($src_file_CCs->{$line_num}, $CC_col_widths); ++ print(" \n"); ++ } ++ print("\n"); ++ warning_on_nonexistent_lines($src_more_recent_than_inputfile, ++ $src_file, \@line_nums); ++ } ++ print("\n"); ++ ++ # Print summary of counts attributed to file but not to any ++ # particular line (due to incomplete debug info). ++ if ($src_file_CCs->{0}) { ++ print_CC($src_file_CCs->{0}, $CC_col_widths); ++ print(" \n\n"); ++ } ++ ++ close(INPUTFILE); ++ } ++ } ++ ++ # Print list of unfound auto-annotate selected files. ++ if (@unfound_auto_annotate_files) { ++ print("$fancy"); ++ print("The following files chosen for auto-annotation could not be found:\n"); ++ print($fancy); ++ foreach my $f (sort @unfound_auto_annotate_files) { ++ print(" $f\n"); ++ } ++ print("\n"); ++ } ++ ++ # If we did any annotating, show how many events were covered by annotated ++ # lines above. ++ if ($did_annotations) { ++ my $CC_col_widths = compute_CC_col_widths($printed_totals_CC); ++ print($fancy); ++ print_events($CC_col_widths); ++ print("\n"); ++ print($fancy); ++ print_CC($printed_totals_CC, $CC_col_widths); ++ print(" events annotated\n\n"); ++ } ++} ++ ++#---------------------------------------------------------------------------- ++# "main()" ++#---------------------------------------------------------------------------- ++process_cmd_line(); ++read_input_file(); ++print_options(); ++my $threshold_files = print_summary_and_fn_totals(); ++annotate_ann_files($threshold_files); ++ ++##--------------------------------------------------------------------## ++##--- end cg_annotate.in ---## ++##--------------------------------------------------------------------## + + +-if __name__ == "__main__": +- main() +diff '--color=auto' -ru --new-file valgrind-3.21.0/cachegrind/cg_arch.c valgrind-riscv64/cachegrind/cg_arch.c +--- valgrind-3.21.0/cachegrind/cg_arch.c 2023-04-21 21:20:47.000000000 +0800 ++++ valgrind-riscv64/cachegrind/cg_arch.c 2022-09-21 06:23:46.000000000 +0800 +@@ -317,7 +317,7 @@ + " --I1=,, set I1 cache manually\n" + " --D1=,, set D1 cache manually\n" + " --LL=,, set LL cache manually\n" +- ); ++ ); + } + + +@@ -475,6 +475,13 @@ + *D1c = (cache_t) { 65536, 2, 64 }; + *LLc = (cache_t) { 262144, 8, 64 }; + ++#elif defined(VGA_riscv64) ++ ++ // Default cache configuration is SiFive FU740-C000 (HiFive Unmatched) ++ *I1c = (cache_t) { 32768, 4, 64 }; ++ *D1c = (cache_t) { 32768, 8, 64 }; ++ *LLc = (cache_t) { 2097152, 16, 64 }; ++ + #else + + #error "Unknown arch" +diff '--color=auto' -ru --new-file valgrind-3.21.0/cachegrind/cg_branchpred.c valgrind-riscv64/cachegrind/cg_branchpred.c +--- valgrind-3.21.0/cachegrind/cg_branchpred.c 2023-01-07 22:48:49.000000000 +0800 ++++ valgrind-riscv64/cachegrind/cg_branchpred.c 2022-09-21 06:23:46.000000000 +0800 +@@ -48,7 +48,7 @@ + # define N_IADDR_LO_ZERO_BITS 2 + #elif defined(VGA_x86) || defined(VGA_amd64) + # define N_IADDR_LO_ZERO_BITS 0 +-#elif defined(VGA_s390x) || defined(VGA_arm) ++#elif defined(VGA_s390x) || defined(VGA_arm) || defined(VGA_riscv64) + # define N_IADDR_LO_ZERO_BITS 1 + #else + # error "Unsupported architecture" +diff '--color=auto' -ru --new-file valgrind-3.21.0/cachegrind/cg_diff.in valgrind-riscv64/cachegrind/cg_diff.in +--- valgrind-3.21.0/cachegrind/cg_diff.in 2023-04-21 21:20:47.000000000 +0800 ++++ valgrind-riscv64/cachegrind/cg_diff.in 2022-09-21 06:23:46.000000000 +0800 +@@ -1,14 +1,13 @@ +-#! /usr/bin/env python3 +-# pyright: strict ++#! @PERL@ + +-# -------------------------------------------------------------------- +-# --- Cachegrind's differencer. cg_diff.in --- +-# -------------------------------------------------------------------- ++##--------------------------------------------------------------------## ++##--- Cachegrind's differencer. cg_diff.in ---## ++##--------------------------------------------------------------------## + + # This file is part of Cachegrind, a Valgrind tool for cache + # profiling programs. + # +-# Copyright (C) 2002-2023 Nicholas Nethercote ++# Copyright (C) 2002-2017 Nicholas Nethercote + # njn@valgrind.org + # + # This program is free software; you can redistribute it and/or +@@ -26,315 +25,313 @@ + # + # The GNU General Public License is contained in the file COPYING. + +-# This script diffs Cachegrind output files. +-# +-# Use `make pydiff` to "build" this script every time it is changed. This runs +-# the formatters, type-checkers, and linters on `cg_diff.in` and then generates +-# `cg_diff`. +-# +-# This is a cut-down version of `cg_annotate.in`. +- +-from __future__ import annotations +- +-import re +-import sys +-from argparse import ArgumentParser, Namespace +-from collections import defaultdict +-from typing import Callable, DefaultDict, NewType, NoReturn +- +-SearchAndReplace = Callable[[str], str] +- +- +-# A typed wrapper for parsed args. +-class Args(Namespace): +- # None of these fields are modified after arg parsing finishes. +- mod_filename: SearchAndReplace +- mod_funcname: SearchAndReplace +- cgout_filename1: str +- cgout_filename2: str +- +- @staticmethod +- def parse() -> Args: +- # We support Perl-style `s/old/new/flags` search-and-replace +- # expressions, because that's how this option was implemented in the +- # old Perl version of `cg_diff`. This requires conversion from +- # `s/old/new/` style to `re.sub`. The conversion isn't a perfect +- # emulation of Perl regexps (e.g. Python uses `\1` rather than `$1` for +- # using captures in the `new` part), but it should be close enough. The +- # only supported flags are `g` (global) and `i` (ignore case). +- def search_and_replace(regex: str | None) -> SearchAndReplace: +- if regex is None: +- return lambda s: s +- +- # Extract the parts of an `s/old/new/tail` regex. `(? None: +- self.events = text.split() +- self.num_events = len(self.events) +- +- # Raises a `ValueError` exception on syntax error. +- def mk_cc(self, str_counts: list[str]) -> Cc: +- # This is slightly faster than a list comprehension. +- counts = list(map(int, str_counts)) +- +- if len(counts) == self.num_events: +- pass +- elif len(counts) < self.num_events: +- # Add zeroes at the end for any missing numbers. +- counts.extend([0] * (self.num_events - len(counts))) +- else: +- raise ValueError +- +- return counts +- +- def mk_empty_cc(self) -> Cc: +- # This is much faster than a list comprehension. +- return [0] * self.num_events +- +- +-# A "cost centre", which is a dumb container for counts. Always the same length +-# as `Events.events`, but it doesn't even know event names. `Events.mk_cc` and +-# `Events.mk_empty_cc` are used for construction. +-# +-# This used to be a class with a single field `counts: list[int]`, but this +-# type is very hot and just using a type alias is much faster. +-Cc = list[int] +- +-# Add the counts in `a_cc` to `b_cc`. +-def add_cc_to_cc(a_cc: Cc, b_cc: Cc) -> None: +- for i, a_count in enumerate(a_cc): +- b_cc[i] += a_count +- +- +-# Subtract the counts in `a_cc` from `b_cc`. +-def sub_cc_from_cc(a_cc: Cc, b_cc: Cc) -> None: +- for i, a_count in enumerate(a_cc): +- b_cc[i] -= a_count +- +- +-# A paired filename and function name. +-Flfn = NewType("Flfn", tuple[str, str]) +- +-# Per-function CCs. +-DictFlfnCc = DefaultDict[Flfn, Cc] +- +- +-def die(msg: str) -> NoReturn: +- print("cg_diff: error:", msg, file=sys.stderr) +- sys.exit(1) +- +- +-def read_cgout_file(cgout_filename: str) -> tuple[str, Events, DictFlfnCc, Cc]: +- # The file format is described in Cachegrind's manual. +- try: +- cgout_file = open(cgout_filename, "r", encoding="utf-8") +- except OSError as err: +- die(f"{err}") +- +- with cgout_file: +- cgout_line_num = 0 +- +- def parse_die(msg: str) -> NoReturn: +- die(f"{cgout_file.name}:{cgout_line_num}: {msg}") +- +- def readline() -> str: +- nonlocal cgout_line_num +- cgout_line_num += 1 +- return cgout_file.readline() +- +- # Read "desc:" lines. +- while line := readline(): +- if m := re.match(r"desc:\s+(.*)", line): +- # The "desc:" lines are unused. +- pass +- else: +- break +- +- # Read "cmd:" line. (`line` is already set from the "desc:" loop.) +- if m := re.match(r"cmd:\s+(.*)", line): +- cmd = m.group(1) +- else: +- parse_die("missing a `command:` line") +- +- # Read "events:" line. +- line = readline() +- if m := re.match(r"events:\s+(.*)", line): +- events = Events(m.group(1)) +- else: +- parse_die("missing an `events:` line") +- +- fl = "" +- flfn = Flfn(("", "")) +- +- # Different places where we accumulate CC data. +- dict_flfn_cc: DictFlfnCc = defaultdict(events.mk_empty_cc) +- summary_cc = None +- +- # Line matching is done in order of pattern frequency, for speed. +- while line := readline(): +- if line[0].isdigit(): +- split_line = line.split() +- try: +- # The line_num isn't used. +- cc = events.mk_cc(split_line[1:]) +- except ValueError: +- parse_die("malformed or too many event counts") +- +- # Record this CC at the function level. +- add_cc_to_cc(cc, dict_flfn_cc[flfn]) +- +- elif line.startswith("fn="): +- flfn = Flfn((fl, args.mod_funcname(line[3:-1]))) +- +- elif line.startswith("fl="): +- # A longstanding bug: the use of `--mod-filename` makes it +- # likely that some files won't be found when annotating. This +- # doesn't matter much, because we use line number 0 for all +- # diffs anyway. It just means we get "This file was unreadable" +- # for modified filenames rather than a single "" CC. +- fl = args.mod_filename(line[3:-1]) +- # A `fn=` line should follow, overwriting the "???". +- flfn = Flfn((fl, "???")) +- +- elif m := re.match(r"summary:\s+(.*)", line): +- try: +- summary_cc = events.mk_cc(m.group(1).split()) +- except ValueError: +- parse_die("malformed or too many event counts") +- +- elif line == "\n" or line.startswith("#"): +- # Skip empty lines and comment lines. +- pass +- +- else: +- parse_die(f"malformed line: {line[:-1]}") +- +- # Check if summary line was present. +- if not summary_cc: +- parse_die("missing `summary:` line, aborting") +- +- # Check summary is correct. +- total_cc = events.mk_empty_cc() +- for flfn_cc in dict_flfn_cc.values(): +- add_cc_to_cc(flfn_cc, total_cc) +- if summary_cc != total_cc: +- msg = ( +- "`summary:` line doesn't match computed total\n" +- f"- summary: {summary_cc}\n" +- f"- total: {total_cc}" +- ) +- parse_die(msg) +- +- return (cmd, events, dict_flfn_cc, summary_cc) +- +- +-def main() -> None: +- filename1 = args.cgout_filename1[0] +- filename2 = args.cgout_filename2[0] +- +- (cmd1, events1, dict_flfn_cc1, summary_cc1) = read_cgout_file(filename1) +- (cmd2, events2, dict_flfn_cc2, summary_cc2) = read_cgout_file(filename2) +- +- if events1.events != events2.events: +- die("events in data files don't match") +- +- # Subtract file 1's CCs from file 2's CCs, at the Flfn level. +- for flfn, flfn_cc1 in dict_flfn_cc1.items(): +- flfn_cc2 = dict_flfn_cc2[flfn] +- sub_cc_from_cc(flfn_cc1, flfn_cc2) +- sub_cc_from_cc(summary_cc1, summary_cc2) +- +- print(f"desc: Files compared: {filename1}; {filename2}") +- print(f"cmd: {cmd1}; {cmd2}") +- print("events:", *events1.events, sep=" ") +- +- # Sort so the output is deterministic. +- def key(flfn_and_cc: tuple[Flfn, Cc]) -> Flfn: +- return flfn_and_cc[0] +- +- for flfn, flfn_cc2 in sorted(dict_flfn_cc2.items(), key=key): +- # Use `0` for the line number because we don't try to give line-level +- # CCs, due to the possibility of code changes causing line numbers to +- # move around. +- print(f"fl={flfn[0]}") +- print(f"fn={flfn[1]}") +- print("0", *flfn_cc2, sep=" ") +- +- print("summary:", *summary_cc2, sep=" ") +- +- +-if __name__ == "__main__": +- main() ++#---------------------------------------------------------------------------- ++# This is a very cut-down and modified version of cg_annotate. ++#---------------------------------------------------------------------------- ++ ++use warnings; ++use strict; ++ ++#---------------------------------------------------------------------------- ++# Global variables ++#---------------------------------------------------------------------------- ++ ++# Version number ++my $version = "@VERSION@"; ++ ++# Usage message. ++my $usage = < ++ ++ options for the user, with defaults in [ ], are: ++ -h --help show this message ++ -v --version show version ++ --mod-filename= a Perl search-and-replace expression that is applied ++ to filenames, eg. --mod-filename='s/prog[0-9]/projN/' ++ --mod-funcname= like --mod-filename, but applied to function names ++ ++ cg_diff is Copyright (C) 2002-2017 Nicholas Nethercote. ++ and licensed under the GNU General Public License, version 2. ++ Bug reports, feedback, admiration, abuse, etc, to: njn\@valgrind.org. ++ ++END ++; ++ ++# --mod-filename expression ++my $mod_filename = undef; ++ ++# --mod-funcname expression ++my $mod_funcname = undef; ++ ++#----------------------------------------------------------------------------- ++# Argument and option handling ++#----------------------------------------------------------------------------- ++sub process_cmd_line() ++{ ++ my ($file1, $file2) = (undef, undef); ++ ++ for my $arg (@ARGV) { ++ ++ if ($arg =~ /^-/) { ++ # --version ++ if ($arg =~ /^-v$|^--version$/) { ++ die("cg_diff-$version\n"); ++ ++ } elsif ($arg =~ /^--mod-filename=(.*)/) { ++ $mod_filename = $1; ++ ++ } elsif ($arg =~ /^--mod-funcname=(.*)/) { ++ $mod_funcname = $1; ++ ++ } else { # -h and --help fall under this case ++ die($usage); ++ } ++ ++ } elsif (not defined($file1)) { ++ $file1 = $arg; ++ ++ } elsif (not defined($file2)) { ++ $file2 = $arg; ++ ++ } else { ++ die($usage); ++ } ++ } ++ ++ # Must have specified two input files. ++ if (not defined $file1 or not defined $file2) { ++ die($usage); ++ } ++ ++ return ($file1, $file2); ++} ++ ++#----------------------------------------------------------------------------- ++# Reading of input file ++#----------------------------------------------------------------------------- ++sub max ($$) ++{ ++ my ($x, $y) = @_; ++ return ($x > $y ? $x : $y); ++} ++ ++# Add the two arrays; any '.' entries are ignored. Two tricky things: ++# 1. If $a2->[$i] is undefined, it defaults to 0 which is what we want; we turn ++# off warnings to allow this. This makes things about 10% faster than ++# checking for definedness ourselves. ++# 2. We don't add an undefined count or a ".", even though it's value is 0, ++# because we don't want to make an $a2->[$i] that is undef become 0 ++# unnecessarily. ++sub add_array_a_to_b ($$) ++{ ++ my ($a, $b) = @_; ++ ++ my $n = max(scalar @$a, scalar @$b); ++ $^W = 0; ++ foreach my $i (0 .. $n-1) { ++ $b->[$i] += $a->[$i] if (defined $a->[$i] && "." ne $a->[$i]); ++ } ++ $^W = 1; ++} ++ ++sub sub_array_b_from_a ($$) ++{ ++ my ($a, $b) = @_; ++ ++ my $n = max(scalar @$a, scalar @$b); ++ $^W = 0; ++ foreach my $i (0 .. $n-1) { ++ $a->[$i] -= $b->[$i]; # XXX: doesn't handle '.' entries ++ } ++ $^W = 1; ++} ++ ++# Add each event count to the CC array. '.' counts become undef, as do ++# missing entries (implicitly). ++sub line_to_CC ($$) ++{ ++ my ($line, $numEvents) = @_; ++ ++ my @CC = (split /\s+/, $line); ++ (@CC <= $numEvents) or die("Line $.: too many event counts\n"); ++ return \@CC; ++} ++ ++sub read_input_file($) ++{ ++ my ($input_file) = @_; ++ ++ open(INPUTFILE, "< $input_file") ++ || die "Cannot open $input_file for reading\n"; ++ ++ # Read "desc:" lines. ++ my $desc; ++ my $line; ++ while ($line = ) { ++ if ($line =~ s/desc:\s+//) { ++ $desc .= $line; ++ } else { ++ last; ++ } ++ } ++ ++ # Read "cmd:" line (Nb: will already be in $line from "desc:" loop above). ++ ($line =~ s/^cmd:\s+//) or die("Line $.: missing command line\n"); ++ my $cmd = $line; ++ chomp($cmd); # Remove newline ++ ++ # Read "events:" line. We make a temporary hash in which the Nth event's ++ # value is N, which is useful for handling --show/--sort options below. ++ $line = ; ++ (defined $line && $line =~ s/^events:\s+//) ++ or die("Line $.: missing events line\n"); ++ my @events = split(/\s+/, $line); ++ my $numEvents = scalar @events; ++ ++ my $currFileName; ++ my $currFileFuncName; ++ ++ my %CCs; # hash("$filename#$funcname" => CC array) ++ my $currCC = undef; # CC array ++ ++ my $summaryCC; ++ ++ # Read body of input file. ++ while () { ++ s/#.*$//; # remove comments ++ if (s/^(\d+)\s+//) { ++ my $CC = line_to_CC($_, $numEvents); ++ defined($currCC) || die; ++ add_array_a_to_b($CC, $currCC); ++ ++ } elsif (s/^fn=(.*)$//) { ++ defined($currFileName) || die; ++ my $tmpFuncName = $1; ++ if (defined $mod_funcname) { ++ eval "\$tmpFuncName =~ $mod_funcname"; ++ } ++ $currFileFuncName = "$currFileName#$tmpFuncName"; ++ $currCC = $CCs{$currFileFuncName}; ++ if (not defined $currCC) { ++ $currCC = []; ++ $CCs{$currFileFuncName} = $currCC; ++ } ++ ++ } elsif (s/^fl=(.*)$//) { ++ $currFileName = $1; ++ if (defined $mod_filename) { ++ eval "\$currFileName =~ $mod_filename"; ++ } ++ # Assume that a "fn=" line is followed by a "fl=" line. ++ $currFileFuncName = undef; ++ ++ } elsif (s/^\s*$//) { ++ # blank, do nothing ++ ++ } elsif (s/^summary:\s+//) { ++ $summaryCC = line_to_CC($_, $numEvents); ++ (scalar(@$summaryCC) == @events) ++ or die("Line $.: summary event and total event mismatch\n"); ++ ++ } else { ++ warn("WARNING: line $. malformed, ignoring\n"); ++ } ++ } ++ ++ # Check if summary line was present ++ if (not defined $summaryCC) { ++ die("missing final summary line, aborting\n"); ++ } ++ ++ close(INPUTFILE); ++ ++ return ($cmd, \@events, \%CCs, $summaryCC); ++} ++ ++#---------------------------------------------------------------------------- ++# "main()" ++#---------------------------------------------------------------------------- ++# Commands seen in the files. Need not match. ++my $cmd1; ++my $cmd2; ++ ++# Events seen in the files. They must match. ++my $events1; ++my $events2; ++ ++# Individual CCs, organised by filename/funcname/line_num. ++# hashref("$filename#$funcname", CC array) ++my $CCs1; ++my $CCs2; ++ ++# Total counts for summary (an arrayref). ++my $summaryCC1; ++my $summaryCC2; ++ ++#---------------------------------------------------------------------------- ++# Read the input files ++#---------------------------------------------------------------------------- ++my ($file1, $file2) = process_cmd_line(); ++($cmd1, $events1, $CCs1, $summaryCC1) = read_input_file($file1); ++($cmd2, $events2, $CCs2, $summaryCC2) = read_input_file($file2); ++ ++#---------------------------------------------------------------------------- ++# Check the events match ++#---------------------------------------------------------------------------- ++my $n = max(scalar @$events1, scalar @$events2); ++$^W = 0; # turn off warnings, because we might hit undefs ++foreach my $i (0 .. $n-1) { ++ ($events1->[$i] eq $events2->[$i]) || die "events don't match, aborting\n"; ++} ++$^W = 1; ++ ++#---------------------------------------------------------------------------- ++# Do the subtraction: CCs2 -= CCs1 ++#---------------------------------------------------------------------------- ++while (my ($filefuncname, $CC1) = each(%$CCs1)) { ++ my $CC2 = $CCs2->{$filefuncname}; ++ if (not defined $CC2) { ++ $CC2 = []; ++ sub_array_b_from_a($CC2, $CC1); # CC2 -= CC1 ++ $CCs2->{$filefuncname} = $CC2; ++ } else { ++ sub_array_b_from_a($CC2, $CC1); # CC2 -= CC1 ++ } ++} ++sub_array_b_from_a($summaryCC2, $summaryCC1); ++ ++#---------------------------------------------------------------------------- ++# Print the result, in CCs2 ++#---------------------------------------------------------------------------- ++print("desc: Files compared: $file1; $file2\n"); ++print("cmd: $cmd1; $cmd2\n"); ++print("events: "); ++for my $e (@$events1) { ++ print(" $e"); ++} ++print("\n"); ++ ++while (my ($filefuncname, $CC) = each(%$CCs2)) { ++ ++ my @x = split(/#/, $filefuncname); ++ (scalar @x == 2) || die; ++ ++ print("fl=$x[0]\n"); ++ print("fn=$x[1]\n"); ++ ++ print("0"); ++ foreach my $n (@$CC) { ++ print(" $n"); ++ } ++ print("\n"); ++} ++ ++print("summary:"); ++foreach my $n (@$summaryCC2) { ++ print(" $n"); ++} ++print("\n"); ++ ++##--------------------------------------------------------------------## ++##--- end ---## ++##--------------------------------------------------------------------## +diff '--color=auto' -ru --new-file valgrind-3.21.0/cachegrind/cg_main.c valgrind-riscv64/cachegrind/cg_main.c +--- valgrind-3.21.0/cachegrind/cg_main.c 2023-04-21 21:20:47.000000000 +0800 ++++ valgrind-riscv64/cachegrind/cg_main.c 2022-09-21 06:23:46.000000000 +0800 +@@ -57,7 +57,7 @@ + /*--- Options ---*/ + /*------------------------------------------------------------*/ + +-static Bool clo_cache_sim = False; /* do cache simulation? */ ++static Bool clo_cache_sim = True; /* do cache simulation? */ + static Bool clo_branch_sim = False; /* do branch simulation? */ + static const HChar* clo_cachegrind_out_file = "cachegrind.out.%p"; + +@@ -894,18 +894,15 @@ + static + void addEvent_Dr ( CgState* cgs, InstrInfo* inode, Int datasize, IRAtom* ea ) + { ++ Event* evt; + tl_assert(isIRAtom(ea)); +- ++ tl_assert(datasize >= 1 && datasize <= min_line_size); + if (!clo_cache_sim) + return; +- +- tl_assert(datasize >= 1 && datasize <= min_line_size); +- +- if (cgs->events_used == N_EVENTS) { ++ if (cgs->events_used == N_EVENTS) + flushEvents(cgs); +- } + tl_assert(cgs->events_used >= 0 && cgs->events_used < N_EVENTS); +- Event* evt = &cgs->events[cgs->events_used]; ++ evt = &cgs->events[cgs->events_used]; + init_Event(evt); + evt->tag = Ev_Dr; + evt->inode = inode; +@@ -917,13 +914,14 @@ + static + void addEvent_Dw ( CgState* cgs, InstrInfo* inode, Int datasize, IRAtom* ea ) + { ++ Event* evt; ++ + tl_assert(isIRAtom(ea)); ++ tl_assert(datasize >= 1 && datasize <= min_line_size); + + if (!clo_cache_sim) + return; + +- tl_assert(datasize >= 1 && datasize <= min_line_size); +- + /* Is it possible to merge this write with the preceding read? */ + if (cgs->events_used > 0) { + Event* lastEvt = &cgs->events[cgs->events_used-1]; +@@ -941,7 +939,7 @@ + if (cgs->events_used == N_EVENTS) + flushEvents(cgs); + tl_assert(cgs->events_used >= 0 && cgs->events_used < N_EVENTS); +- Event* evt = &cgs->events[cgs->events_used]; ++ evt = &cgs->events[cgs->events_used]; + init_Event(evt); + evt->tag = Ev_Dw; + evt->inode = inode; +@@ -958,12 +956,11 @@ + tl_assert(isIRAtom(ea)); + tl_assert(guard); + tl_assert(isIRAtom(guard)); ++ tl_assert(datasize >= 1 && datasize <= min_line_size); + + if (!clo_cache_sim) + return; + +- tl_assert(datasize >= 1 && datasize <= min_line_size); +- + /* Adding guarded memory actions and merging them with the existing + queue is too complex. Simply flush the queue and add this + action immediately. Since guarded loads and stores are pretty +@@ -1394,23 +1391,21 @@ + if (fp == NULL) { + // If the file can't be opened for whatever reason (conflict + // between multiple cachegrinded processes?), give up now. +- VG_(umsg)("error: can't open output data file '%s'\n", ++ VG_(umsg)("error: can't open cache simulation output file '%s'\n", + cachegrind_out_file ); +- VG_(umsg)(" ... so detailed results will be missing.\n"); ++ VG_(umsg)(" ... so simulation results will be missing.\n"); + VG_(free)(cachegrind_out_file); + return; + } else { + VG_(free)(cachegrind_out_file); + } + +- if (clo_cache_sim) { +- // "desc:" lines (giving I1/D1/LL cache configuration). The spaces after +- // the 2nd colon makes cg_annotate's output look nicer. +- VG_(fprintf)(fp, "desc: I1 cache: %s\n" +- "desc: D1 cache: %s\n" +- "desc: LL cache: %s\n", +- I1.desc_line, D1.desc_line, LL.desc_line); +- } ++ // "desc:" lines (giving I1/D1/LL cache configuration). The spaces after ++ // the 2nd colon makes cg_annotate's output look nicer. ++ VG_(fprintf)(fp, "desc: I1 cache: %s\n" ++ "desc: D1 cache: %s\n" ++ "desc: LL cache: %s\n", ++ I1.desc_line, D1.desc_line, LL.desc_line); + + // "cmd:" line + VG_(fprintf)(fp, "cmd: %s", VG_(args_the_exename)); +@@ -1514,7 +1509,7 @@ + } + + // Summary stats must come after rest of table, since we calculate them +- // during traversal. ++ // during traversal. */ + if (clo_cache_sim && clo_branch_sim) { + VG_(fprintf)(fp, "summary:" + " %llu %llu %llu" +@@ -1592,7 +1587,7 @@ + VG_(sprintf)(fmt, "%%s %%,%dllu\n", l1); + + /* Always print this */ +- VG_(umsg)(fmt, "I refs: ", Ir_total.a); ++ VG_(umsg)(fmt, "I refs: ", Ir_total.a); + + /* If cache profiling is enabled, show D access numbers and all + miss numbers */ +@@ -1617,7 +1612,7 @@ + VG_(sprintf)(fmt, "%%s %%,%dllu (%%,%dllu rd + %%,%dllu wr)\n", + l1, l2, l3); + +- VG_(umsg)(fmt, "D refs: ", ++ VG_(umsg)(fmt, "D refs: ", + D_total.a, Dr_total.a, Dw_total.a); + VG_(umsg)(fmt, "D1 misses: ", + D_total.m1, Dr_total.m1, Dw_total.m1); +@@ -1761,12 +1756,12 @@ + + static void cg_print_usage(void) + { ++ VG_(print_cache_clo_opts)(); + VG_(printf)( +-" --cachegrind-out-file= output file name [cachegrind.out.%%p]\n" + " --cache-sim=yes|no collect cache stats? [yes]\n" + " --branch-sim=yes|no collect branch prediction stats? [no]\n" ++" --cachegrind-out-file= output file name [cachegrind.out.%%p]\n" + ); +- VG_(print_cache_clo_opts)(); + } + + static void cg_print_debug_usage(void) +@@ -1826,34 +1821,32 @@ + VG_(malloc), "cg.main.cpci.3", + VG_(free)); + +- if (clo_cache_sim) { +- VG_(post_clo_init_configure_caches)(&I1c, &D1c, &LLc, +- &clo_I1_cache, +- &clo_D1_cache, +- &clo_LL_cache); +- +- // min_line_size is used to make sure that we never feed +- // accesses to the simulator straddling more than two +- // cache lines at any cache level +- min_line_size = (I1c.line_size < D1c.line_size) ? I1c.line_size : D1c.line_size; +- min_line_size = (LLc.line_size < min_line_size) ? LLc.line_size : min_line_size; +- +- Int largest_load_or_store_size +- = VG_(machine_get_size_of_largest_guest_register)(); +- if (min_line_size < largest_load_or_store_size) { +- /* We can't continue, because the cache simulation might +- straddle more than 2 lines, and it will assert. So let's +- just stop before we start. */ +- VG_(umsg)("Cachegrind: cannot continue: the minimum line size (%d)\n", +- (Int)min_line_size); +- VG_(umsg)(" must be equal to or larger than the maximum register size (%d)\n", +- largest_load_or_store_size ); +- VG_(umsg)(" but it is not. Exiting now.\n"); +- VG_(exit)(1); +- } +- +- cachesim_initcaches(I1c, D1c, LLc); ++ VG_(post_clo_init_configure_caches)(&I1c, &D1c, &LLc, ++ &clo_I1_cache, ++ &clo_D1_cache, ++ &clo_LL_cache); ++ ++ // min_line_size is used to make sure that we never feed ++ // accesses to the simulator straddling more than two ++ // cache lines at any cache level ++ min_line_size = (I1c.line_size < D1c.line_size) ? I1c.line_size : D1c.line_size; ++ min_line_size = (LLc.line_size < min_line_size) ? LLc.line_size : min_line_size; ++ ++ Int largest_load_or_store_size ++ = VG_(machine_get_size_of_largest_guest_register)(); ++ if (min_line_size < largest_load_or_store_size) { ++ /* We can't continue, because the cache simulation might ++ straddle more than 2 lines, and it will assert. So let's ++ just stop before we start. */ ++ VG_(umsg)("Cachegrind: cannot continue: the minimum line size (%d)\n", ++ (Int)min_line_size); ++ VG_(umsg)(" must be equal to or larger than the maximum register size (%d)\n", ++ largest_load_or_store_size ); ++ VG_(umsg)(" but it is not. Exiting now.\n"); ++ VG_(exit)(1); + } ++ ++ cachesim_initcaches(I1c, D1c, LLc); + } + + VG_DETERMINE_INTERFACE_VERSION(cg_pre_clo_init) +diff '--color=auto' -ru --new-file valgrind-3.21.0/cachegrind/cg_merge.c valgrind-riscv64/cachegrind/cg_merge.c +--- valgrind-3.21.0/cachegrind/cg_merge.c 1970-01-01 08:00:00.000000000 +0800 ++++ valgrind-riscv64/cachegrind/cg_merge.c 2022-09-21 06:23:46.000000000 +0800 +@@ -0,0 +1,1580 @@ ++ ++/*--------------------------------------------------------------------*/ ++/*--- A program that merges multiple cachegrind output files. ---*/ ++/*--- cg_merge.c ---*/ ++/*--------------------------------------------------------------------*/ ++ ++/* ++ This file is part of Cachegrind, a Valgrind tool for cache ++ profiling programs. ++ ++ Copyright (C) 2002-2017 Nicholas Nethercote ++ njn@valgrind.org ++ ++ AVL tree code derived from ++ ANSI C Library for maintenance of AVL Balanced Trees ++ (C) 2000 Daniel Nagy, Budapest University of Technology and Economics ++ Released under GNU General Public License (GPL) version 2 ++ ++ This program is free software; you can redistribute it and/or ++ modify it under the terms of the GNU General Public License as ++ published by the Free Software Foundation; either version 2 of the ++ License, or (at your option) any later version. ++ ++ This program is distributed in the hope that it will be useful, but ++ WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ General Public License for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with this program; if not, see . ++ ++ The GNU General Public License is contained in the file COPYING. ++*/ ++ ++#include ++#include ++#include ++#include ++#include ++ ++typedef signed long Word; ++typedef unsigned long UWord; ++typedef unsigned char Bool; ++#define True ((Bool)1) ++#define False ((Bool)0) ++typedef signed int Int; ++typedef unsigned int UInt; ++typedef unsigned long long int ULong; ++typedef signed char Char; ++typedef size_t SizeT; ++ ++ ++//------------------------------------------------------------------// ++//--- WordFM ---// ++//--- Public interface ---// ++//------------------------------------------------------------------// ++ ++typedef struct _WordFM WordFM; /* opaque */ ++ ++/* Initialise a WordFM */ ++void initFM ( WordFM* t, ++ void* (*alloc_nofail)( SizeT ), ++ void (*dealloc)(void*), ++ Word (*kCmp)(Word,Word) ); ++ ++/* Allocate and initialise a WordFM */ ++WordFM* newFM( void* (*alloc_nofail)( SizeT ), ++ void (*dealloc)(void*), ++ Word (*kCmp)(Word,Word) ); ++ ++/* Free up the FM. If kFin is non-NULL, it is applied to keys ++ before the FM is deleted; ditto with vFin for vals. */ ++void deleteFM ( WordFM*, void(*kFin)(Word), void(*vFin)(Word) ); ++ ++/* Add (k,v) to fm. If a binding for k already exists, it is updated ++ to map to this new v. In that case we should really return the ++ previous v so that caller can finalise it. Oh well. */ ++void addToFM ( WordFM* fm, Word k, Word v ); ++ ++// Delete key from fm, returning associated val if found ++Bool delFromFM ( WordFM* fm, /*OUT*/Word* oldV, Word key ); ++ ++// Look up in fm, assigning found val at spec'd address ++Bool lookupFM ( WordFM* fm, /*OUT*/Word* valP, Word key ); ++ ++Word sizeFM ( WordFM* fm ); ++ ++// set up FM for iteration ++void initIterFM ( WordFM* fm ); ++ ++// get next key/val pair. Will assert if fm has been modified ++// or looked up in since initIterFM was called. ++Bool nextIterFM ( WordFM* fm, /*OUT*/Word* pKey, /*OUT*/Word* pVal ); ++ ++// clear the I'm iterating flag ++void doneIterFM ( WordFM* fm ); ++ ++// Deep copy a FM. If dopyK is NULL, keys are copied verbatim. ++// If non-null, dopyK is applied to each key to generate the ++// version in the new copy. In that case, if the argument to dopyK ++// is non-NULL but the result is NULL, it is assumed that dopyK ++// could not allocate memory, in which case the copy is abandoned ++// and NULL is returned. Ditto with dopyV for values. ++WordFM* dopyFM ( WordFM* fm, Word(*dopyK)(Word), Word(*dopyV)(Word) ); ++ ++//------------------------------------------------------------------// ++//--- end WordFM ---// ++//--- Public interface ---// ++//------------------------------------------------------------------// ++ ++ ++static const char* argv0 = "cg_merge"; ++ ++/* Keep track of source filename/line no so as to be able to ++ print decent error messages. */ ++typedef ++ struct { ++ FILE* fp; ++ UInt lno; ++ char* filename; ++ } ++ SOURCE; ++ ++static void printSrcLoc ( SOURCE* s ) ++{ ++ fprintf(stderr, "%s: near %s line %u\n", argv0, s->filename, s->lno-1); ++} ++ ++__attribute__((noreturn)) ++static void mallocFail ( SOURCE* s, const char* who ) ++{ ++ fprintf(stderr, "%s: out of memory in %s\n", argv0, who ); ++ printSrcLoc( s ); ++ exit(2); ++} ++ ++__attribute__((noreturn)) ++static void parseError ( SOURCE* s, const char* msg ) ++{ ++ fprintf(stderr, "%s: parse error: %s\n", argv0, msg ); ++ printSrcLoc( s ); ++ exit(1); ++} ++ ++__attribute__((noreturn)) ++static void barf ( SOURCE* s, const char* msg ) ++{ ++ fprintf(stderr, "%s: %s\n", argv0, msg ); ++ printSrcLoc( s ); ++ exit(1); ++} ++ ++// Read a line. Return the line read, or NULL if at EOF. ++// The line is allocated dynamically but will be overwritten with ++// every invocation. Caller must not free it. ++static const char *readline ( SOURCE* s ) ++{ ++ static char *line = NULL; ++ static size_t linesiz = 0; ++ ++ int ch, i = 0; ++ ++ while (1) { ++ ch = getc(s->fp); ++ if (ch != EOF) { ++ if (i + 1 >= linesiz) { ++ linesiz += 500; ++ line = realloc(line, linesiz * sizeof *line); ++ if (line == NULL) ++ mallocFail(s, "readline:"); ++ } ++ line[i++] = ch; ++ line[i] = 0; ++ if (ch == '\n') { ++ line[i-1] = 0; ++ s->lno++; ++ break; ++ } ++ } else { ++ if (ferror(s->fp)) { ++ perror(argv0); ++ barf(s, "I/O error while reading input file"); ++ } else { ++ // hit EOF ++ break; ++ } ++ } ++ } ++ return i == 0 ? NULL : line; ++} ++ ++static Bool streqn ( const char* s1, const char* s2, size_t n ) ++{ ++ return 0 == strncmp(s1, s2, n); ++} ++ ++static Bool streq ( const char* s1, const char* s2 ) ++{ ++ return 0 == strcmp(s1, s2 ); ++} ++ ++ ++//////////////////////////////////////////////////////////////// ++ ++typedef ++ struct { ++ char* fi_name; ++ char* fn_name; ++ } ++ FileFn; ++ ++typedef ++ struct { ++ Int n_counts; ++ ULong* counts; ++ } ++ Counts; ++ ++typedef ++ struct { ++ // null-terminated vector of desc_lines ++ char** desc_lines; ++ ++ // Cmd line ++ char* cmd_line; ++ ++ // Events line ++ char* events_line; ++ Int n_events; ++ ++ // Summary line (copied from input) ++ char* summary_line; ++ ++ /* Outermost map is ++ WordFM FileFn* innerMap ++ where innerMap is WordFM line-number=UWord Counts */ ++ WordFM* outerMap; ++ ++ // Summary counts (computed whilst parsing) ++ // should match .summary_line ++ Counts* summary; ++ } ++ CacheProfFile; ++ ++static FileFn* new_FileFn ( char* file_name, char* fn_name ) ++{ ++ FileFn* ffn = malloc(sizeof(FileFn)); ++ if (ffn == NULL) ++ return NULL; ++ ffn->fi_name = file_name; ++ ffn->fn_name = fn_name; ++ return ffn; ++} ++ ++static void ddel_FileFn ( FileFn* ffn ) ++{ ++ if (ffn->fi_name) ++ free(ffn->fi_name); ++ if (ffn->fn_name) ++ free(ffn->fn_name); ++ memset(ffn, 0, sizeof(FileFn)); ++ free(ffn); ++} ++ ++static FileFn* dopy_FileFn ( FileFn* ff ) ++{ ++ char *fi2, *fn2; ++ fi2 = strdup(ff->fi_name); ++ if (fi2 == NULL) return NULL; ++ fn2 = strdup(ff->fn_name); ++ if (fn2 == NULL) { ++ free(fi2); ++ return NULL; ++ } ++ return new_FileFn( fi2, fn2 ); ++} ++ ++static Counts* new_Counts ( Int n_counts, /*COPIED*/ULong* counts ) ++{ ++ Int i; ++ Counts* cts = malloc(sizeof(Counts)); ++ if (cts == NULL) ++ return NULL; ++ ++ assert(n_counts >= 0); ++ cts->counts = malloc(n_counts * sizeof(ULong)); ++ if (cts->counts == NULL) { ++ free(cts); ++ return NULL; ++ } ++ ++ cts->n_counts = n_counts; ++ for (i = 0; i < n_counts; i++) ++ cts->counts[i] = counts[i]; ++ ++ return cts; ++} ++ ++static Counts* new_Counts_Zeroed ( Int n_counts ) ++{ ++ Int i; ++ Counts* cts = malloc(sizeof(Counts)); ++ if (cts == NULL) ++ return NULL; ++ ++ assert(n_counts >= 0); ++ cts->counts = malloc(n_counts * sizeof(ULong)); ++ if (cts->counts == NULL) { ++ free(cts); ++ return NULL; ++ } ++ ++ cts->n_counts = n_counts; ++ for (i = 0; i < n_counts; i++) ++ cts->counts[i] = 0; ++ ++ return cts; ++} ++ ++static void sdel_Counts ( Counts* cts ) ++{ ++ memset(cts, 0, sizeof(Counts)); ++ free(cts); ++} ++ ++static void ddel_Counts ( Counts* cts ) ++{ ++ if (cts->counts) ++ free(cts->counts); ++ memset(cts, 0, sizeof(Counts)); ++ free(cts); ++} ++ ++static Counts* dopy_Counts ( Counts* cts ) ++{ ++ return new_Counts( cts->n_counts, cts->counts ); ++} ++ ++static ++CacheProfFile* new_CacheProfFile ( char** desc_lines, ++ char* cmd_line, ++ char* events_line, ++ Int n_events, ++ char* summary_line, ++ WordFM* outerMap, ++ Counts* summary ) ++{ ++ CacheProfFile* cpf = malloc(sizeof(CacheProfFile)); ++ if (cpf == NULL) ++ return NULL; ++ cpf->desc_lines = desc_lines; ++ cpf->cmd_line = cmd_line; ++ cpf->events_line = events_line; ++ cpf->n_events = n_events; ++ cpf->summary_line = summary_line; ++ cpf->outerMap = outerMap; ++ cpf->summary = summary; ++ return cpf; ++} ++ ++static WordFM* dopy_InnerMap ( WordFM* innerMap ) ++{ ++ return dopyFM ( innerMap, NULL, ++ (Word(*)(Word))dopy_Counts ); ++} ++ ++static void ddel_InnerMap ( WordFM* innerMap ) ++{ ++ deleteFM( innerMap, NULL, (void(*)(Word))ddel_Counts ); ++} ++ ++static void ddel_CacheProfFile ( CacheProfFile* cpf ) ++{ ++ char** p; ++ if (cpf->desc_lines) { ++ for (p = cpf->desc_lines; *p; p++) ++ free(*p); ++ free(cpf->desc_lines); ++ } ++ if (cpf->cmd_line) ++ free(cpf->cmd_line); ++ if (cpf->events_line) ++ free(cpf->events_line); ++ if (cpf->summary_line) ++ free(cpf->summary_line); ++ if (cpf->outerMap) ++ deleteFM( cpf->outerMap, (void(*)(Word))ddel_FileFn, ++ (void(*)(Word))ddel_InnerMap ); ++ if (cpf->summary) ++ ddel_Counts(cpf->summary); ++ ++ memset(cpf, 0, sizeof(CacheProfFile)); ++ free(cpf); ++} ++ ++static void showCounts ( FILE* f, Counts* c ) ++{ ++ Int i; ++ for (i = 0; i < c->n_counts; i++) { ++ fprintf(f, "%lld ", c->counts[i]); ++ } ++} ++ ++static void show_CacheProfFile ( FILE* f, CacheProfFile* cpf ) ++{ ++ Int i; ++ char** d; ++ FileFn* topKey; ++ WordFM* topVal; ++ UWord subKey; ++ Counts* subVal; ++ ++ for (d = cpf->desc_lines; *d; d++) ++ fprintf(f, "%s\n", *d); ++ fprintf(f, "%s\n", cpf->cmd_line); ++ fprintf(f, "%s\n", cpf->events_line); ++ ++ initIterFM( cpf->outerMap ); ++ while (nextIterFM( cpf->outerMap, (Word*)(&topKey), (Word*)(&topVal) )) { ++ fprintf(f, "fl=%s\nfn=%s\n", ++ topKey->fi_name, topKey->fn_name ); ++ initIterFM( topVal ); ++ while (nextIterFM( topVal, (Word*)(&subKey), (Word*)(&subVal) )) { ++ fprintf(f, "%ld ", subKey ); ++ showCounts( f, subVal ); ++ fprintf(f, "\n"); ++ } ++ doneIterFM( topVal ); ++ } ++ doneIterFM( cpf->outerMap ); ++ ++ //fprintf(f, "%s\n", cpf->summary_line); ++ fprintf(f, "summary:"); ++ for (i = 0; i < cpf->summary->n_counts; i++) ++ fprintf(f, " %lld", cpf->summary->counts[i]); ++ fprintf(f, "\n"); ++} ++ ++//////////////////////////////////////////////////////////////// ++ ++static Word cmp_FileFn ( Word s1, Word s2 ) ++{ ++ FileFn* ff1 = (FileFn*)s1; ++ FileFn* ff2 = (FileFn*)s2; ++ Word r = strcmp(ff1->fi_name, ff2->fi_name); ++ if (r == 0) ++ r = strcmp(ff1->fn_name, ff2->fn_name); ++ return r; ++} ++ ++static Word cmp_unboxed_UWord ( Word s1, Word s2 ) ++{ ++ UWord u1 = (UWord)s1; ++ UWord u2 = (UWord)s2; ++ if (u1 < u2) return -1; ++ if (u1 > u2) return 1; ++ return 0; ++} ++ ++//////////////////////////////////////////////////////////////// ++ ++static Bool parse_ULong ( /*OUT*/ULong* res, /*INOUT*/const char** pptr) ++{ ++ ULong u64; ++ const char* ptr = *pptr; ++ while (isspace(*ptr)) ptr++; ++ if (!isdigit(*ptr)) { ++ *pptr = ptr; ++ return False; /* end of string, or junk */ ++ } ++ u64 = 0; ++ while (isdigit(*ptr)) { ++ u64 = (u64 * 10) + (ULong)(*ptr - '0'); ++ ptr++; ++ } ++ *res = u64; ++ *pptr = ptr; ++ return True; ++} ++ ++// str is a line of integers, starting with a line number. Parse it, ++// returning the first number in *lnno and the rest in a newly ++// allocated Counts struct. If lnno is non-NULL, treat the first ++// number as a line number and assign it to *lnno instead of ++// incorporating it in the counts array. ++static ++Counts* splitUpCountsLine ( SOURCE* s, /*OUT*/UWord* lnno, const char* str ) ++{ ++ Bool ok; ++ Counts* counts; ++ ULong *tmpC = NULL; ++ UInt n_tmpC = 0, tmpCsize = 0; ++ while (1) { ++ if (n_tmpC >= tmpCsize) { ++ tmpCsize += 50; ++ tmpC = realloc(tmpC, tmpCsize * sizeof *tmpC); ++ if (tmpC == NULL) ++ mallocFail(s, "splitUpCountsLine:"); ++ } ++ ok = parse_ULong( &tmpC[n_tmpC], &str ); ++ if (!ok) ++ break; ++ n_tmpC++; ++ } ++ if (*str != 0) ++ parseError(s, "garbage in counts line"); ++ if (lnno ? (n_tmpC < 2) : (n_tmpC < 1)) ++ parseError(s, "too few counts in count line"); ++ ++ if (lnno) { ++ *lnno = (UWord)tmpC[0]; ++ counts = new_Counts( n_tmpC-1, /*COPIED*/&tmpC[1] ); ++ } else { ++ counts = new_Counts( n_tmpC, /*COPIED*/&tmpC[0] ); ++ } ++ free(tmpC); ++ ++ return counts; ++} ++ ++static void addCounts ( SOURCE* s, /*OUT*/Counts* counts1, Counts* counts2 ) ++{ ++ Int i; ++ if (counts1->n_counts != counts2->n_counts) ++ parseError(s, "addCounts: inconsistent number of counts"); ++ for (i = 0; i < counts1->n_counts; i++) ++ counts1->counts[i] += counts2->counts[i]; ++} ++ ++static Bool addCountsToMap ( SOURCE* s, ++ WordFM* counts_map, ++ UWord lnno, Counts* newCounts ) ++{ ++ Counts* oldCounts; ++ // look up lnno in the map. If none present, add a binding ++ // lnno->counts. If present, add counts to the existing entry. ++ if (lookupFM( counts_map, (Word*)(&oldCounts), (Word)lnno )) { ++ // merge with existing binding ++ addCounts( s, oldCounts, newCounts ); ++ return True; ++ } else { ++ // create new binding ++ addToFM( counts_map, (Word)lnno, (Word)newCounts ); ++ return False; ++ } ++} ++ ++static ++void handle_counts ( SOURCE* s, ++ CacheProfFile* cpf, ++ const char* fi, const char* fn, const char* newCountsStr ) ++{ ++ WordFM* countsMap; ++ Bool freeNewCounts; ++ UWord lnno; ++ Counts* newCounts; ++ FileFn* topKey; ++ ++ if (0) printf("%s %s %s\n", fi, fn, newCountsStr ); ++ ++ // parse the numbers ++ newCounts = splitUpCountsLine( s, &lnno, newCountsStr ); ++ ++ // Did we get the right number? ++ if (newCounts->n_counts != cpf->n_events) ++ goto oom; ++ ++ // allocate the key ++ topKey = malloc(sizeof(FileFn)); ++ if (topKey) { ++ topKey->fi_name = strdup(fi); ++ topKey->fn_name = strdup(fn); ++ } ++ if (! (topKey && topKey->fi_name && topKey->fn_name)) ++ mallocFail(s, "handle_counts:"); ++ ++ // search for it ++ if (lookupFM( cpf->outerMap, (Word*)(&countsMap), (Word)topKey )) { ++ // found it. Merge in new counts ++ freeNewCounts = addCountsToMap( s, countsMap, lnno, newCounts ); ++ ddel_FileFn(topKey); ++ } else { ++ // not found in the top map. Create new entry ++ countsMap = newFM( malloc, free, cmp_unboxed_UWord ); ++ if (!countsMap) ++ goto oom; ++ addToFM( cpf->outerMap, (Word)topKey, (Word)countsMap ); ++ freeNewCounts = addCountsToMap( s, countsMap, lnno, newCounts ); ++ } ++ ++ // also add to running summary total ++ addCounts( s, cpf->summary, newCounts ); ++ ++ // if safe to do so, free up the count vector ++ if (freeNewCounts) ++ ddel_Counts(newCounts); ++ ++ return; ++ ++ oom: ++ parseError(s, "# counts doesn't match # events"); ++} ++ ++ ++/* Parse a complete file from the stream in 's'. If a parse error ++ happens, do not return; instead exit via parseError(). If an ++ out-of-memory condition happens, do not return; instead exit via ++ mallocError(). ++*/ ++static CacheProfFile* parse_CacheProfFile ( SOURCE* s ) ++{ ++ Int i; ++ char** tmp_desclines = NULL; ++ unsigned tmp_desclines_size = 0; ++ char* p; ++ int n_tmp_desclines = 0; ++ CacheProfFile* cpf; ++ Counts* summaryRead; ++ char* curr_fn = strdup("???"); ++ char* curr_fl = strdup("???"); ++ const char* line; ++ ++ cpf = new_CacheProfFile( NULL, NULL, NULL, 0, NULL, NULL, NULL ); ++ if (cpf == NULL) ++ mallocFail(s, "parse_CacheProfFile(1)"); ++ ++ // Parse "desc:" lines ++ while (1) { ++ line = readline(s); ++ if (!line) ++ break; ++ if (!streqn(line, "desc: ", 6)) ++ break; ++ if (n_tmp_desclines >= tmp_desclines_size) { ++ tmp_desclines_size += 100; ++ tmp_desclines = realloc(tmp_desclines, ++ tmp_desclines_size * sizeof *tmp_desclines); ++ if (tmp_desclines == NULL) ++ mallocFail(s, "parse_CacheProfFile(1)"); ++ } ++ tmp_desclines[n_tmp_desclines++] = strdup(line); ++ } ++ ++ if (n_tmp_desclines == 0) ++ parseError(s, "parse_CacheProfFile: no DESC lines present"); ++ ++ cpf->desc_lines = malloc( (1+n_tmp_desclines) * sizeof(char*) ); ++ if (cpf->desc_lines == NULL) ++ mallocFail(s, "parse_CacheProfFile(2)"); ++ ++ cpf->desc_lines[n_tmp_desclines] = NULL; ++ for (i = 0; i < n_tmp_desclines; i++) ++ cpf->desc_lines[i] = tmp_desclines[i]; ++ ++ // Parse "cmd:" line ++ if (!streqn(line, "cmd: ", 5)) ++ parseError(s, "parse_CacheProfFile: no CMD line present"); ++ ++ cpf->cmd_line = strdup(line); ++ if (cpf->cmd_line == NULL) ++ mallocFail(s, "parse_CacheProfFile(3)"); ++ ++ // Parse "events:" line and figure out how many events there are ++ line = readline(s); ++ if (!line) ++ parseError(s, "parse_CacheProfFile: eof before EVENTS line"); ++ if (!streqn(line, "events: ", 8)) ++ parseError(s, "parse_CacheProfFile: no EVENTS line present"); ++ ++ // figure out how many events there are by counting the number ++ // of space-alphanum transitions in the events_line ++ cpf->events_line = strdup(line); ++ if (cpf->events_line == NULL) ++ mallocFail(s, "parse_CacheProfFile(3)"); ++ ++ cpf->n_events = 0; ++ assert(cpf->events_line[6] == ':'); ++ for (p = &cpf->events_line[6]; *p; p++) { ++ if (p[0] == ' ' && isalpha(p[1])) ++ cpf->n_events++; ++ } ++ ++ // create the running cross-check summary ++ cpf->summary = new_Counts_Zeroed( cpf->n_events ); ++ if (cpf->summary == NULL) ++ mallocFail(s, "parse_CacheProfFile(4)"); ++ ++ // create the outer map (file+fn name --> inner map) ++ cpf->outerMap = newFM ( malloc, free, cmp_FileFn ); ++ if (cpf->outerMap == NULL) ++ mallocFail(s, "parse_CacheProfFile(5)"); ++ ++ // process count lines ++ while (1) { ++ line = readline(s); ++ if (!line) ++ parseError(s, "parse_CacheProfFile: eof before SUMMARY line"); ++ ++ if (isdigit(line[0])) { ++ handle_counts(s, cpf, curr_fl, curr_fn, line); ++ continue; ++ } ++ else ++ if (streqn(line, "fn=", 3)) { ++ free(curr_fn); ++ curr_fn = strdup(line+3); ++ continue; ++ } ++ else ++ if (streqn(line, "fl=", 3)) { ++ free(curr_fl); ++ curr_fl = strdup(line+3); ++ continue; ++ } ++ else ++ if (streqn(line, "summary: ", 9)) { ++ break; ++ } ++ else ++ parseError(s, "parse_CacheProfFile: unexpected line in main data"); ++ } ++ ++ // finally, the "summary:" line ++ if (!streqn(line, "summary: ", 9)) ++ parseError(s, "parse_CacheProfFile: missing SUMMARY line"); ++ ++ cpf->summary_line = strdup(line); ++ if (cpf->summary_line == NULL) ++ mallocFail(s, "parse_CacheProfFile(6)"); ++ ++ // there should be nothing more ++ line = readline(s); ++ if (line) ++ parseError(s, "parse_CacheProfFile: " ++ "extraneous content after SUMMARY line"); ++ ++ // check the summary counts are as expected ++ summaryRead = splitUpCountsLine( s, NULL, &cpf->summary_line[8] ); ++ if (summaryRead == NULL) ++ mallocFail(s, "parse_CacheProfFile(7)"); ++ if (summaryRead->n_counts != cpf->n_events) ++ parseError(s, "parse_CacheProfFile: wrong # counts in SUMMARY line"); ++ for (i = 0; i < summaryRead->n_counts; i++) { ++ if (summaryRead->counts[i] != cpf->summary->counts[i]) { ++ parseError(s, "parse_CacheProfFile: " ++ "computed vs stated SUMMARY counts mismatch"); ++ } ++ } ++ free(summaryRead->counts); ++ sdel_Counts(summaryRead); ++ ++ // since the summary counts are OK, free up the summary_line text ++ // which contains the same info. ++ free(cpf->summary_line); ++ cpf->summary_line = NULL; ++ ++ free(tmp_desclines); ++ free(curr_fn); ++ free(curr_fl); ++ ++ // All looks OK ++ return cpf; ++} ++ ++ ++static void merge_CacheProfInfo ( SOURCE* s, ++ /*MOD*/CacheProfFile* dst, ++ CacheProfFile* src ) ++{ ++ /* For each (filefn, innerMap) in src ++ if filefn not in dst ++ add binding dopy(filefn)->dopy(innerMap) in src ++ else ++ // merge src->innerMap with dst->innerMap ++ for each (lineno, counts) in src->innerMap ++ if lineno not in dst->innerMap ++ add binding lineno->dopy(counts) to dst->innerMap ++ else ++ add counts into dst->innerMap[lineno] ++ */ ++ /* Outer iterator: FileFn* -> WordFM* (inner iterator) ++ Inner iterator: UWord -> Counts* ++ */ ++ FileFn* soKey; ++ WordFM* soVal; ++ WordFM* doVal; ++ UWord siKey; ++ Counts* siVal; ++ Counts* diVal; ++ ++ /* First check mundane things: that the events: lines are ++ identical. */ ++ if (!streq( dst->events_line, src->events_line )) ++ barf(s, "\"events:\" line of most recent file does " ++ "not match those previously processed"); ++ ++ initIterFM( src->outerMap ); ++ ++ // for (filefn, innerMap) in src ++ while (nextIterFM( src->outerMap, (Word*)&soKey, (Word*)&soVal )) { ++ ++ // is filefn in dst? ++ if (! lookupFM( dst->outerMap, (Word*)&doVal, (Word)soKey )) { ++ ++ // no .. add dopy(filefn) -> dopy(innerMap) to src ++ FileFn* c_soKey = dopy_FileFn(soKey); ++ WordFM* c_soVal = dopy_InnerMap(soVal); ++ if ((!c_soKey) || (!c_soVal)) goto oom; ++ addToFM( dst->outerMap, (Word)c_soKey, (Word)c_soVal ); ++ ++ } else { ++ ++ // yes .. merge the two innermaps ++ initIterFM( soVal ); ++ ++ // for (lno, counts) in soVal (source inner map) ++ while (nextIterFM( soVal, (Word*)&siKey, (Word*)&siVal )) { ++ ++ // is lno in the corresponding dst inner map? ++ if (! lookupFM( doVal, (Word*)&diVal, siKey )) { ++ ++ // no .. add lineno->dopy(counts) to dst inner map ++ Counts* c_siVal = dopy_Counts( siVal ); ++ if (!c_siVal) goto oom; ++ addToFM( doVal, siKey, (Word)c_siVal ); ++ ++ } else { ++ ++ // yes .. merge counts into dst inner map val ++ addCounts( s, diVal, siVal ); ++ ++ } ++ } ++ ++ } ++ ++ } ++ ++ // add the summaries too ++ addCounts(s, dst->summary, src->summary ); ++ ++ return; ++ ++ oom: ++ mallocFail(s, "merge_CacheProfInfo"); ++} ++ ++static void usage ( void ) ++{ ++ fprintf(stderr, "%s: Merges multiple cachegrind output files into one\n", ++ argv0); ++ fprintf(stderr, "%s: usage: %s [-o outfile] [files-to-merge]\n", ++ argv0, argv0); ++ exit(1); ++} ++ ++int main ( int argc, char** argv ) ++{ ++ Int i; ++ SOURCE src; ++ CacheProfFile *cpf, *cpfTmp; ++ ++ FILE* outfile = NULL; ++ char* outfilename = NULL; ++ Int outfileix = 0; ++ ++ if (argv[0]) ++ argv0 = argv[0]; ++ ++ if (argc < 2) ++ usage(); ++ ++ for (i = 1; i < argc; i++) { ++ if (streq(argv[i], "-h") || streq(argv[i], "--help")) ++ usage(); ++ } ++ ++ /* Scan args, looking for '-o outfilename'. */ ++ for (i = 1; i < argc; i++) { ++ if (streq(argv[i], "-o")) { ++ if (i+1 < argc) { ++ outfilename = argv[i+1]; ++ outfileix = i; ++ break; ++ } else { ++ usage(); ++ } ++ } ++ } ++ ++ cpf = NULL; ++ ++ for (i = 1; i < argc; i++) { ++ ++ if (i == outfileix) { ++ /* Skip '-o' and whatever follows it */ ++ i += 1; ++ continue; ++ } ++ ++ fprintf(stderr, "%s: parsing %s\n", argv0, argv[i]); ++ src.lno = 1; ++ src.filename = argv[i]; ++ src.fp = fopen(src.filename, "r"); ++ if (!src.fp) { ++ perror(argv0); ++ barf(&src, "Cannot open input file"); ++ } ++ assert(src.fp); ++ cpfTmp = parse_CacheProfFile( &src ); ++ fclose(src.fp); ++ ++ /* If this isn't the first file, merge */ ++ if (cpf == NULL) { ++ /* this is the first file */ ++ cpf = cpfTmp; ++ } else { ++ /* not the first file; merge */ ++ fprintf(stderr, "%s: merging %s\n", argv0, argv[i]); ++ merge_CacheProfInfo( &src, cpf, cpfTmp ); ++ ddel_CacheProfFile( cpfTmp ); ++ } ++ ++ } ++ ++ /* Now create the output file. */ ++ ++ if (cpf) { ++ ++ fprintf(stderr, "%s: writing %s\n", ++ argv0, outfilename ? outfilename : "(stdout)" ); ++ ++ /* Write the output. */ ++ if (outfilename) { ++ outfile = fopen(outfilename, "w"); ++ if (!outfile) { ++ fprintf(stderr, "%s: can't create output file %s\n", ++ argv0, outfilename); ++ perror(argv0); ++ exit(1); ++ } ++ } else { ++ outfile = stdout; ++ } ++ ++ show_CacheProfFile( outfile, cpf ); ++ if (ferror(outfile)) { ++ fprintf(stderr, "%s: error writing output file %s\n", ++ argv0, outfilename ? outfilename : "(stdout)" ); ++ perror(argv0); ++ if (outfile != stdout) ++ fclose(outfile); ++ exit(1); ++ } ++ ++ fflush(outfile); ++ if (outfile != stdout) ++ fclose( outfile ); ++ ++ ddel_CacheProfFile( cpf ); ++ } ++ ++ return 0; ++} ++ ++ ++//------------------------------------------------------------------// ++//--- WordFM ---// ++//--- Implementation ---// ++//------------------------------------------------------------------// ++ ++/* ------------ Implementation ------------ */ ++ ++/* One element of the AVL tree */ ++typedef ++ struct _AvlNode { ++ Word key; ++ Word val; ++ struct _AvlNode* left; ++ struct _AvlNode* right; ++ Char balance; ++ } ++ AvlNode; ++ ++typedef ++ struct { ++ Word w; ++ Bool b; ++ } ++ MaybeWord; ++ ++#define WFM_STKMAX 32 // At most 2**32 entries can be iterated over ++ ++struct _WordFM { ++ AvlNode* root; ++ void* (*alloc_nofail)( SizeT ); ++ void (*dealloc)(void*); ++ Word (*kCmp)(Word,Word); ++ AvlNode* nodeStack[WFM_STKMAX]; // Iterator node stack ++ Int numStack[WFM_STKMAX]; // Iterator num stack ++ Int stackTop; // Iterator stack pointer, one past end ++}; ++ ++/* forward */ ++static Bool avl_removeroot_wrk(AvlNode** t, Word(*kCmp)(Word,Word)); ++ ++/* Swing to the left. Warning: no balance maintenance. */ ++static void avl_swl ( AvlNode** root ) ++{ ++ AvlNode* a = *root; ++ AvlNode* b = a->right; ++ *root = b; ++ a->right = b->left; ++ b->left = a; ++} ++ ++/* Swing to the right. Warning: no balance maintenance. */ ++static void avl_swr ( AvlNode** root ) ++{ ++ AvlNode* a = *root; ++ AvlNode* b = a->left; ++ *root = b; ++ a->left = b->right; ++ b->right = a; ++} ++ ++/* Balance maintenance after especially nasty swings. */ ++static void avl_nasty ( AvlNode* root ) ++{ ++ switch (root->balance) { ++ case -1: ++ root->left->balance = 0; ++ root->right->balance = 1; ++ break; ++ case 1: ++ root->left->balance = -1; ++ root->right->balance = 0; ++ break; ++ case 0: ++ root->left->balance = 0; ++ root->right->balance = 0; ++ break; ++ default: ++ assert(0); ++ } ++ root->balance=0; ++} ++ ++/* Find size of a non-NULL tree. */ ++static Word size_avl_nonNull ( AvlNode* nd ) ++{ ++ return 1 + (nd->left ? size_avl_nonNull(nd->left) : 0) ++ + (nd->right ? size_avl_nonNull(nd->right) : 0); ++} ++ ++/* Insert element a into the AVL tree t. Returns True if the depth of ++ the tree has grown. If element with that key is already present, ++ just copy a->val to existing node, first returning old ->val field ++ of existing node in *oldV, so that the caller can finalize it ++ however it wants. ++*/ ++static ++Bool avl_insert_wrk ( AvlNode** rootp, ++ /*OUT*/MaybeWord* oldV, ++ AvlNode* a, ++ Word (*kCmp)(Word,Word) ) ++{ ++ Word cmpres; ++ ++ /* initialize */ ++ a->left = 0; ++ a->right = 0; ++ a->balance = 0; ++ oldV->b = False; ++ ++ /* insert into an empty tree? */ ++ if (!(*rootp)) { ++ (*rootp) = a; ++ return True; ++ } ++ ++ cmpres = kCmp( (*rootp)->key, a->key ); ++ ++ if (cmpres > 0) { ++ /* insert into the left subtree */ ++ if ((*rootp)->left) { ++ AvlNode* left_subtree = (*rootp)->left; ++ if (avl_insert_wrk(&left_subtree, oldV, a, kCmp)) { ++ switch ((*rootp)->balance--) { ++ case 1: return False; ++ case 0: return True; ++ case -1: break; ++ default: assert(0); ++ } ++ if ((*rootp)->left->balance < 0) { ++ avl_swr( rootp ); ++ (*rootp)->balance = 0; ++ (*rootp)->right->balance = 0; ++ } else { ++ avl_swl( &((*rootp)->left) ); ++ avl_swr( rootp ); ++ avl_nasty( *rootp ); ++ } ++ } else { ++ (*rootp)->left = left_subtree; ++ } ++ return False; ++ } else { ++ (*rootp)->left = a; ++ if ((*rootp)->balance--) ++ return False; ++ return True; ++ } ++ assert(0);/*NOTREACHED*/ ++ } ++ else ++ if (cmpres < 0) { ++ /* insert into the right subtree */ ++ if ((*rootp)->right) { ++ AvlNode* right_subtree = (*rootp)->right; ++ if (avl_insert_wrk(&right_subtree, oldV, a, kCmp)) { ++ switch((*rootp)->balance++){ ++ case -1: return False; ++ case 0: return True; ++ case 1: break; ++ default: assert(0); ++ } ++ if ((*rootp)->right->balance > 0) { ++ avl_swl( rootp ); ++ (*rootp)->balance = 0; ++ (*rootp)->left->balance = 0; ++ } else { ++ avl_swr( &((*rootp)->right) ); ++ avl_swl( rootp ); ++ avl_nasty( *rootp ); ++ } ++ } else { ++ (*rootp)->right = right_subtree; ++ } ++ return False; ++ } else { ++ (*rootp)->right = a; ++ if ((*rootp)->balance++) ++ return False; ++ return True; ++ } ++ assert(0);/*NOTREACHED*/ ++ } ++ else { ++ /* cmpres == 0, a duplicate - replace the val, but don't ++ incorporate the node in the tree */ ++ oldV->b = True; ++ oldV->w = (*rootp)->val; ++ (*rootp)->val = a->val; ++ return False; ++ } ++} ++ ++/* Remove an element a from the AVL tree t. a must be part of ++ the tree. Returns True if the depth of the tree has shrunk. ++*/ ++static ++Bool avl_remove_wrk ( AvlNode** rootp, ++ AvlNode* a, ++ Word(*kCmp)(Word,Word) ) ++{ ++ Bool ch; ++ Word cmpres = kCmp( (*rootp)->key, a->key ); ++ ++ if (cmpres > 0){ ++ /* remove from the left subtree */ ++ AvlNode* left_subtree = (*rootp)->left; ++ assert(left_subtree); ++ ch = avl_remove_wrk(&left_subtree, a, kCmp); ++ (*rootp)->left=left_subtree; ++ if (ch) { ++ switch ((*rootp)->balance++) { ++ case -1: return True; ++ case 0: return False; ++ case 1: break; ++ default: assert(0); ++ } ++ switch ((*rootp)->right->balance) { ++ case 0: ++ avl_swl( rootp ); ++ (*rootp)->balance = -1; ++ (*rootp)->left->balance = 1; ++ return False; ++ case 1: ++ avl_swl( rootp ); ++ (*rootp)->balance = 0; ++ (*rootp)->left->balance = 0; ++ return -1; ++ case -1: ++ break; ++ default: ++ assert(0); ++ } ++ avl_swr( &((*rootp)->right) ); ++ avl_swl( rootp ); ++ avl_nasty( *rootp ); ++ return True; ++ } ++ } ++ else ++ if (cmpres < 0) { ++ /* remove from the right subtree */ ++ AvlNode* right_subtree = (*rootp)->right; ++ assert(right_subtree); ++ ch = avl_remove_wrk(&right_subtree, a, kCmp); ++ (*rootp)->right = right_subtree; ++ if (ch) { ++ switch ((*rootp)->balance--) { ++ case 1: return True; ++ case 0: return False; ++ case -1: break; ++ default: assert(0); ++ } ++ switch ((*rootp)->left->balance) { ++ case 0: ++ avl_swr( rootp ); ++ (*rootp)->balance = 1; ++ (*rootp)->right->balance = -1; ++ return False; ++ case -1: ++ avl_swr( rootp ); ++ (*rootp)->balance = 0; ++ (*rootp)->right->balance = 0; ++ return True; ++ case 1: ++ break; ++ default: ++ assert(0); ++ } ++ avl_swl( &((*rootp)->left) ); ++ avl_swr( rootp ); ++ avl_nasty( *rootp ); ++ return True; ++ } ++ } ++ else { ++ assert(cmpres == 0); ++ assert((*rootp)==a); ++ return avl_removeroot_wrk(rootp, kCmp); ++ } ++ return 0; ++} ++ ++/* Remove the root of the AVL tree *rootp. ++ * Warning: dumps core if *rootp is empty ++ */ ++static ++Bool avl_removeroot_wrk ( AvlNode** rootp, ++ Word(*kCmp)(Word,Word) ) ++{ ++ Bool ch; ++ AvlNode* a; ++ if (!(*rootp)->left) { ++ if (!(*rootp)->right) { ++ (*rootp) = 0; ++ return True; ++ } ++ (*rootp) = (*rootp)->right; ++ return True; ++ } ++ if (!(*rootp)->right) { ++ (*rootp) = (*rootp)->left; ++ return True; ++ } ++ if ((*rootp)->balance < 0) { ++ /* remove from the left subtree */ ++ a = (*rootp)->left; ++ while (a->right) a = a->right; ++ } else { ++ /* remove from the right subtree */ ++ a = (*rootp)->right; ++ while (a->left) a = a->left; ++ } ++ ch = avl_remove_wrk(rootp, a, kCmp); ++ a->left = (*rootp)->left; ++ a->right = (*rootp)->right; ++ a->balance = (*rootp)->balance; ++ (*rootp) = a; ++ if(a->balance == 0) return ch; ++ return False; ++} ++ ++static ++AvlNode* avl_find_node ( AvlNode* t, Word k, Word(*kCmp)(Word,Word) ) ++{ ++ Word cmpres; ++ while (True) { ++ if (t == NULL) return NULL; ++ cmpres = kCmp(t->key, k); ++ if (cmpres > 0) t = t->left; else ++ if (cmpres < 0) t = t->right; else ++ return t; ++ } ++} ++ ++// Clear the iterator stack. ++static void stackClear(WordFM* fm) ++{ ++ Int i; ++ assert(fm); ++ for (i = 0; i < WFM_STKMAX; i++) { ++ fm->nodeStack[i] = NULL; ++ fm->numStack[i] = 0; ++ } ++ fm->stackTop = 0; ++} ++ ++// Push onto the iterator stack. ++static inline void stackPush(WordFM* fm, AvlNode* n, Int i) ++{ ++ assert(fm->stackTop < WFM_STKMAX); ++ assert(1 <= i && i <= 3); ++ fm->nodeStack[fm->stackTop] = n; ++ fm-> numStack[fm->stackTop] = i; ++ fm->stackTop++; ++} ++ ++// Pop from the iterator stack. ++static inline Bool stackPop(WordFM* fm, AvlNode** n, Int* i) ++{ ++ assert(fm->stackTop <= WFM_STKMAX); ++ ++ if (fm->stackTop > 0) { ++ fm->stackTop--; ++ *n = fm->nodeStack[fm->stackTop]; ++ *i = fm-> numStack[fm->stackTop]; ++ assert(1 <= *i && *i <= 3); ++ fm->nodeStack[fm->stackTop] = NULL; ++ fm-> numStack[fm->stackTop] = 0; ++ return True; ++ } else { ++ return False; ++ } ++} ++ ++static ++AvlNode* avl_dopy ( AvlNode* nd, ++ Word(*dopyK)(Word), ++ Word(*dopyV)(Word), ++ void*(alloc_nofail)(SizeT) ) ++{ ++ AvlNode* nyu; ++ if (! nd) ++ return NULL; ++ nyu = alloc_nofail(sizeof(AvlNode)); ++ assert(nyu); ++ ++ nyu->left = nd->left; ++ nyu->right = nd->right; ++ nyu->balance = nd->balance; ++ ++ /* Copy key */ ++ if (dopyK) { ++ nyu->key = dopyK( nd->key ); ++ if (nd->key != 0 && nyu->key == 0) ++ return NULL; /* oom in key dcopy */ ++ } else { ++ /* copying assumedly unboxed keys */ ++ nyu->key = nd->key; ++ } ++ ++ /* Copy val */ ++ if (dopyV) { ++ nyu->val = dopyV( nd->val ); ++ if (nd->val != 0 && nyu->val == 0) ++ return NULL; /* oom in val dcopy */ ++ } else { ++ /* copying assumedly unboxed vals */ ++ nyu->val = nd->val; ++ } ++ ++ /* Copy subtrees */ ++ if (nyu->left) { ++ nyu->left = avl_dopy( nyu->left, dopyK, dopyV, alloc_nofail ); ++ if (! nyu->left) ++ return NULL; ++ } ++ if (nyu->right) { ++ nyu->right = avl_dopy( nyu->right, dopyK, dopyV, alloc_nofail ); ++ if (! nyu->right) ++ return NULL; ++ } ++ ++ return nyu; ++} ++ ++/* --- Public interface functions --- */ ++ ++/* Initialise a WordFM. */ ++void initFM ( WordFM* fm, ++ void* (*alloc_nofail)( SizeT ), ++ void (*dealloc)(void*), ++ Word (*kCmp)(Word,Word) ) ++{ ++ fm->root = 0; ++ fm->kCmp = kCmp; ++ fm->alloc_nofail = alloc_nofail; ++ fm->dealloc = dealloc; ++ fm->stackTop = 0; ++} ++ ++/* Allocate and Initialise a WordFM. */ ++WordFM* newFM( void* (*alloc_nofail)( SizeT ), ++ void (*dealloc)(void*), ++ Word (*kCmp)(Word,Word) ) ++{ ++ WordFM* fm = alloc_nofail(sizeof(WordFM)); ++ assert(fm); ++ initFM(fm, alloc_nofail, dealloc, kCmp); ++ return fm; ++} ++ ++static void avl_free ( AvlNode* nd, ++ void(*kFin)(Word), ++ void(*vFin)(Word), ++ void(*dealloc)(void*) ) ++{ ++ if (!nd) ++ return; ++ if (nd->left) ++ avl_free(nd->left, kFin, vFin, dealloc); ++ if (nd->right) ++ avl_free(nd->right, kFin, vFin, dealloc); ++ if (kFin) ++ kFin( nd->key ); ++ if (vFin) ++ vFin( nd->val ); ++ memset(nd, 0, sizeof(AvlNode)); ++ dealloc(nd); ++} ++ ++/* Free up the FM. If kFin is non-NULL, it is applied to keys ++ before the FM is deleted; ditto with vFin for vals. */ ++void deleteFM ( WordFM* fm, void(*kFin)(Word), void(*vFin)(Word) ) ++{ ++ void(*dealloc)(void*) = fm->dealloc; ++ avl_free( fm->root, kFin, vFin, dealloc ); ++ memset(fm, 0, sizeof(WordFM) ); ++ dealloc(fm); ++} ++ ++/* Add (k,v) to fm. */ ++void addToFM ( WordFM* fm, Word k, Word v ) ++{ ++ MaybeWord oldV; ++ AvlNode* node; ++ node = fm->alloc_nofail( sizeof(struct _AvlNode) ); ++ node->key = k; ++ node->val = v; ++ oldV.b = False; ++ oldV.w = 0; ++ avl_insert_wrk( &fm->root, &oldV, node, fm->kCmp ); ++ //if (oldV.b && fm->vFin) ++ // fm->vFin( oldV.w ); ++ if (oldV.b) ++ free(node); ++} ++ ++// Delete key from fm, returning associated val if found ++Bool delFromFM ( WordFM* fm, /*OUT*/Word* oldV, Word key ) ++{ ++ AvlNode* node = avl_find_node( fm->root, key, fm->kCmp ); ++ if (node) { ++ avl_remove_wrk( &fm->root, node, fm->kCmp ); ++ if (oldV) ++ *oldV = node->val; ++ fm->dealloc(node); ++ return True; ++ } else { ++ return False; ++ } ++} ++ ++// Look up in fm, assigning found val at spec'd address ++Bool lookupFM ( WordFM* fm, /*OUT*/Word* valP, Word key ) ++{ ++ AvlNode* node = avl_find_node( fm->root, key, fm->kCmp ); ++ if (node) { ++ if (valP) ++ *valP = node->val; ++ return True; ++ } else { ++ return False; ++ } ++} ++ ++Word sizeFM ( WordFM* fm ) ++{ ++ // Hmm, this is a bad way to do this ++ return fm->root ? size_avl_nonNull( fm->root ) : 0; ++} ++ ++// set up FM for iteration ++void initIterFM ( WordFM* fm ) ++{ ++ assert(fm); ++ stackClear(fm); ++ if (fm->root) ++ stackPush(fm, fm->root, 1); ++} ++ ++// get next key/val pair. Will assert if fm has been modified ++// or looked up in since initIterFM was called. ++Bool nextIterFM ( WordFM* fm, /*OUT*/Word* pKey, /*OUT*/Word* pVal ) ++{ ++ Int i = 0; ++ AvlNode* n = NULL; ++ ++ assert(fm); ++ ++ // This in-order traversal requires each node to be pushed and popped ++ // three times. These could be avoided by updating nodes in-situ on the ++ // top of the stack, but the push/pop cost is so small that it's worth ++ // keeping this loop in this simpler form. ++ while (stackPop(fm, &n, &i)) { ++ switch (i) { ++ case 1: ++ stackPush(fm, n, 2); ++ if (n->left) stackPush(fm, n->left, 1); ++ break; ++ case 2: ++ stackPush(fm, n, 3); ++ if (pKey) *pKey = n->key; ++ if (pVal) *pVal = n->val; ++ return True; ++ case 3: ++ if (n->right) stackPush(fm, n->right, 1); ++ break; ++ default: ++ assert(0); ++ } ++ } ++ ++ // Stack empty, iterator is exhausted, return NULL ++ return False; ++} ++ ++// clear the I'm iterating flag ++void doneIterFM ( WordFM* fm ) ++{ ++} ++ ++WordFM* dopyFM ( WordFM* fm, Word(*dopyK)(Word), Word(*dopyV)(Word) ) ++{ ++ WordFM* nyu; ++ ++ /* can't clone the fm whilst iterating on it */ ++ assert(fm->stackTop == 0); ++ ++ nyu = fm->alloc_nofail( sizeof(WordFM) ); ++ assert(nyu); ++ ++ *nyu = *fm; ++ ++ fm->stackTop = 0; ++ memset(fm->nodeStack, 0, sizeof(fm->nodeStack)); ++ memset(fm->numStack, 0, sizeof(fm->numStack)); ++ ++ if (nyu->root) { ++ nyu->root = avl_dopy( nyu->root, dopyK, dopyV, fm->alloc_nofail ); ++ if (! nyu->root) ++ return NULL; ++ } ++ ++ return nyu; ++} ++ ++//------------------------------------------------------------------// ++//--- end WordFM ---// ++//--- Implementation ---// ++//------------------------------------------------------------------// ++ ++/*--------------------------------------------------------------------*/ ++/*--- end cg_merge.c ---*/ ++/*--------------------------------------------------------------------*/ +diff '--color=auto' -ru --new-file valgrind-3.21.0/cachegrind/cg_merge.in valgrind-riscv64/cachegrind/cg_merge.in +--- valgrind-3.21.0/cachegrind/cg_merge.in 2023-04-21 21:20:47.000000000 +0800 ++++ valgrind-riscv64/cachegrind/cg_merge.in 1970-01-01 08:00:00.000000000 +0800 +@@ -1,314 +0,0 @@ +-#! /usr/bin/env python3 +-# pyright: strict +- +-# -------------------------------------------------------------------- +-# --- Cachegrind's merger. cg_merge.in --- +-# -------------------------------------------------------------------- +- +-# This file is part of Cachegrind, a Valgrind tool for cache +-# profiling programs. +-# +-# Copyright (C) 2002-2023 Nicholas Nethercote +-# njn@valgrind.org +-# +-# This program is free software; you can redistribute it and/or +-# modify it under the terms of the GNU General Public License as +-# published by the Free Software Foundation; either version 2 of the +-# License, or (at your option) any later version. +-# +-# This program is distributed in the hope that it will be useful, but +-# WITHOUT ANY WARRANTY; without even the implied warranty of +-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +-# General Public License for more details. +-# +-# You should have received a copy of the GNU General Public License +-# along with this program; if not, see . +-# +-# The GNU General Public License is contained in the file COPYING. +- +-# This script merges Cachegrind output files. +-# +-# Use `make pymerge` to "build" this script every time it is changed. This runs +-# the formatters, type-checkers, and linters on `cg_merge.in` and then +-# generates `cg_merge`. +-# +-# This is a cut-down version of `cg_annotate.in`. +- +-from __future__ import annotations +- +-import re +-import sys +-from argparse import ArgumentParser, Namespace +-from collections import defaultdict +-from typing import DefaultDict, NoReturn, TextIO +- +- +-# A typed wrapper for parsed args. +-class Args(Namespace): +- # None of these fields are modified after arg parsing finishes. +- output: str +- cgout_filename: list[str] +- +- @staticmethod +- def parse() -> Args: +- desc = ( +- "Merge multiple Cachegrind output files. Deprecated; use " +- "`cg_annotate` with multiple Cachegrind output files instead." +- ) +- p = ArgumentParser(description=desc) +- +- p.add_argument("--version", action="version", version="%(prog)s-@VERSION@") +- +- p.add_argument( +- "-o", +- dest="output", +- type=str, +- metavar="FILE", +- help="output file (default: stdout)", +- ) +- +- p.add_argument( +- "cgout_filename", +- nargs="+", +- metavar="cachegrind-out-file", +- help="file produced by Cachegrind", +- ) +- +- return p.parse_args(namespace=Args()) +- +- +-# Args are stored in a global for easy access. +-args = Args.parse() +- +-# A single instance of this class is constructed, from `args` and the `events:` +-# line in the cgout file. +-class Events: +- # The event names. +- events: list[str] +- +- def __init__(self, text: str) -> None: +- self.events = text.split() +- self.num_events = len(self.events) +- +- # Raises a `ValueError` exception on syntax error. +- def mk_cc(self, str_counts: list[str]) -> Cc: +- # This is slightly faster than a list comprehension. +- counts = list(map(int, str_counts)) +- +- if len(counts) == self.num_events: +- pass +- elif len(counts) < self.num_events: +- # Add zeroes at the end for any missing numbers. +- counts.extend([0] * (self.num_events - len(counts))) +- else: +- raise ValueError +- +- return counts +- +- def mk_empty_cc(self) -> Cc: +- # This is much faster than a list comprehension. +- return [0] * self.num_events +- +- +-# A "cost centre", which is a dumb container for counts. Always the same length +-# as `Events.events`, but it doesn't even know event names. `Events.mk_cc` and +-# `Events.mk_empty_cc` are used for construction. +-# +-# This used to be a class with a single field `counts: list[int]`, but this +-# type is very hot and just using a type alias is much faster. +-Cc = list[int] +- +- +-# Add the counts in `a_cc` to `b_cc`. +-def add_cc_to_cc(a_cc: Cc, b_cc: Cc) -> None: +- for i, a_count in enumerate(a_cc): +- b_cc[i] += a_count +- +- +-# Per-line CCs, organised by filename, function name, and line number. +-DictLineCc = DefaultDict[int, Cc] +-DictFnDictLineCc = DefaultDict[str, DictLineCc] +-DictFlDictFnDictLineCc = DefaultDict[str, DictFnDictLineCc] +- +- +-def die(msg: str) -> NoReturn: +- print("cg_merge: error:", msg, file=sys.stderr) +- sys.exit(1) +- +- +-def read_cgout_file( +- cgout_filename: str, +- is_first_file: bool, +- cumul_dict_fl_dict_fn_dict_line_cc: DictFlDictFnDictLineCc, +- cumul_summary_cc: Cc, +-) -> tuple[list[str], str, Events]: +- # The file format is described in Cachegrind's manual. +- try: +- cgout_file = open(cgout_filename, "r", encoding="utf-8") +- except OSError as err: +- die(f"{err}") +- +- with cgout_file: +- cgout_line_num = 0 +- +- def parse_die(msg: str) -> NoReturn: +- die(f"{cgout_file.name}:{cgout_line_num}: {msg}") +- +- def readline() -> str: +- nonlocal cgout_line_num +- cgout_line_num += 1 +- return cgout_file.readline() +- +- # Read "desc:" lines. +- desc: list[str] = [] +- while line := readline(): +- if m := re.match(r"desc:\s+(.*)", line): +- desc.append(m.group(1)) +- else: +- break +- +- # Read "cmd:" line. (`line` is already set from the "desc:" loop.) +- if m := re.match(r"cmd:\s+(.*)", line): +- cmd = m.group(1) +- else: +- parse_die("missing a `command:` line") +- +- # Read "events:" line. +- line = readline() +- if m := re.match(r"events:\s+(.*)", line): +- events = Events(m.group(1)) +- else: +- parse_die("missing an `events:` line") +- +- def mk_empty_dict_line_cc() -> DictLineCc: +- return defaultdict(events.mk_empty_cc) +- +- def mk_empty_dict_fn_dict_line_cc() -> DictFnDictLineCc: +- return defaultdict(mk_empty_dict_line_cc) +- +- summary_cc_present = False +- +- fl = "" +- fn = "" +- +- # The `cumul_*` values are passed in by reference and are modified by +- # this function. But they can't be properly initialized until the +- # `events:` line of the first file is read and the number of events is +- # known. So we initialize them in an invalid state, and then +- # reinitialize them properly here, before their first use. +- if is_first_file: +- cumul_dict_fl_dict_fn_dict_line_cc.default_factory = ( +- mk_empty_dict_fn_dict_line_cc +- ) +- cumul_summary_cc.extend(events.mk_empty_cc()) +- +- # Line matching is done in order of pattern frequency, for speed. +- while line := readline(): +- if line[0].isdigit(): +- split_line = line.split() +- try: +- line_num = int(split_line[0]) +- cc = events.mk_cc(split_line[1:]) +- except ValueError: +- parse_die("malformed or too many event counts") +- +- # Record this CC at the file/func/line level. +- add_cc_to_cc(cc, cumul_dict_fl_dict_fn_dict_line_cc[fl][fn][line_num]) +- +- elif line.startswith("fn="): +- fn = line[3:-1] +- +- elif line.startswith("fl="): +- fl = line[3:-1] +- # A `fn=` line should follow, overwriting the "???". +- fn = "???" +- +- elif m := re.match(r"summary:\s+(.*)", line): +- summary_cc_present = True +- try: +- add_cc_to_cc(events.mk_cc(m.group(1).split()), cumul_summary_cc) +- except ValueError: +- parse_die("malformed or too many event counts") +- +- elif line == "\n" or line.startswith("#"): +- # Skip empty lines and comment lines. +- pass +- +- else: +- parse_die(f"malformed line: {line[:-1]}") +- +- # Check if summary line was present. +- if not summary_cc_present: +- parse_die("missing `summary:` line, aborting") +- +- # In `cg_annotate.in` and `cg_diff.in` we check that the file's summary CC +- # matches the totals of the file's individual CCs, but not here. That's +- # because in this script we don't collect the file's CCs in isolation, +- # instead we just add them to the accumulated CCs, for speed. This makes it +- # difficult to do the per-file checking. +- +- return (desc, cmd, events) +- +- +-def main() -> None: +- desc1: list[str] | None = None +- cmd1 = None +- events1 = None +- +- # Different places where we accumulate CC data. Initialized to invalid +- # states prior to the number of events being known. +- cumul_dict_fl_dict_fn_dict_line_cc: DictFlDictFnDictLineCc = defaultdict(None) +- cumul_summary_cc: Cc = [] +- +- for n, filename in enumerate(args.cgout_filename): +- is_first_file = n == 0 +- (desc_n, cmd_n, events_n) = read_cgout_file( +- filename, +- is_first_file, +- cumul_dict_fl_dict_fn_dict_line_cc, +- cumul_summary_cc, +- ) +- # We reuse the description and command from the first file, like the +- # the old C version of `cg_merge`. +- if is_first_file: +- desc1 = desc_n +- cmd1 = cmd_n +- events1 = events_n +- else: +- assert events1 +- if events1.events != events_n.events: +- die("events in data files don't match") +- +- def write_output(f: TextIO) -> None: +- # These assertions hold because the loop above executes at least twice. +- assert desc1 +- assert events1 +- assert cumul_dict_fl_dict_fn_dict_line_cc is not None +- assert cumul_summary_cc +- +- for desc_line in desc1: +- print("desc:", desc_line, file=f) +- print("cmd:", cmd1, file=f) +- print("events:", *events1.events, sep=" ", file=f) +- +- for fl, dict_fn_dict_line_cc in cumul_dict_fl_dict_fn_dict_line_cc.items(): +- print(f"fl={fl}", file=f) +- for fn, dict_line_cc in dict_fn_dict_line_cc.items(): +- print(f"fn={fn}", file=f) +- for line, cc in dict_line_cc.items(): +- print(line, *cc, file=f) +- +- print("summary:", *cumul_summary_cc, sep=" ", file=f) +- +- if args.output: +- try: +- with open(args.output, "w", encoding="utf-8") as f: +- write_output(f) +- except OSError as err: +- die(f"{err}") +- else: +- write_output(sys.stdout) +- +- +-if __name__ == "__main__": +- main() +diff '--color=auto' -ru --new-file valgrind-3.21.0/cachegrind/docs/cg_annotate-manpage.xml valgrind-riscv64/cachegrind/docs/cg_annotate-manpage.xml +--- valgrind-3.21.0/cachegrind/docs/cg_annotate-manpage.xml 2023-04-21 21:20:47.000000000 +0800 ++++ valgrind-riscv64/cachegrind/docs/cg_annotate-manpage.xml 2022-09-21 06:23:46.000000000 +0800 +@@ -30,9 +30,8 @@ + + Description + +- +-cg_annotate takes one or more Cachegrind output files and +-prints data about the profiled program in an easy-to-read form. ++cg_annotate takes an output file produced by the ++Valgrind tool Cachegrind and prints the information in an easy-to-read form. + + + +diff '--color=auto' -ru --new-file valgrind-3.21.0/cachegrind/docs/cg_diff-manpage.xml valgrind-riscv64/cachegrind/docs/cg_diff-manpage.xml +--- valgrind-3.21.0/cachegrind/docs/cg_diff-manpage.xml 2023-04-21 21:20:47.000000000 +0800 ++++ valgrind-riscv64/cachegrind/docs/cg_diff-manpage.xml 2022-09-21 06:23:46.000000000 +0800 +@@ -14,7 +14,7 @@ + + + cg_diff +- (deprecated) diffs two Cachegrind output files ++ compares two Cachegrind output files + + + +@@ -30,10 +30,9 @@ + + Description + +- +-cg_diff diffs two Cachegrind output files into a single +-Cachegrind output file. It is deprecated because cg_annotate +-can now do much the same thing, but better. ++cg_diff takes two output files produced by the ++Valgrind tool Cachegrind, computes the difference and prints the result ++in the same format that Cachegrinds outputs. + + + +diff '--color=auto' -ru --new-file valgrind-3.21.0/cachegrind/docs/cg-manual.xml valgrind-riscv64/cachegrind/docs/cg-manual.xml +--- valgrind-3.21.0/cachegrind/docs/cg-manual.xml 2023-04-21 21:20:47.000000000 +0800 ++++ valgrind-riscv64/cachegrind/docs/cg-manual.xml 2022-09-21 06:23:46.000000000 +0800 +@@ -5,909 +5,862 @@ + + + +-Cachegrind: a high-precision tracing profiler ++Cachegrind: a cache and branch-prediction profiler + +- +-To use this tool, specify on the Valgrind +-command line. +- ++To use this tool, you must specify ++ on the ++Valgrind command line. + + + Overview + +- +-Cachegrind is a high-precision tracing profiler. It runs slowly, but collects +-precise and reproducible profiling data. It can merge and diff data from +-different runs. To expand on these characteristics: +- ++Cachegrind simulates how your program interacts with a machine's cache ++hierarchy and (optionally) branch predictor. It simulates a machine with ++independent first-level instruction and data caches (I1 and D1), backed by a ++unified second-level cache (L2). This exactly matches the configuration of ++many modern machines. ++ ++However, some modern machines have three or four levels of cache. For these ++machines (in the cases where Cachegrind can auto-detect the cache ++configuration) Cachegrind simulates the first-level and last-level caches. ++The reason for this choice is that the last-level cache has the most influence on ++runtime, as it masks accesses to main memory. Furthermore, the L1 caches ++often have low associativity, so simulating them can detect cases where the ++code interacts badly with this cache (eg. traversing a matrix column-wise ++with the row length being a power of 2). + ++Therefore, Cachegrind always refers to the I1, D1 and LL (last-level) ++caches. ++ ++ ++Cachegrind gathers the following statistics (abbreviations used for each statistic ++is given in parentheses): + + +- +- Precise. Cachegrind measures the exact number of +- instructions executed by your program, not an approximation. Furthermore, +- it presents the gathered data at the file, function, and line level. This +- is different to many other profilers that measure approximate execution +- time, using sampling, and only at the function level. ++ I cache reads (Ir, ++ which equals the number of instructions executed), ++ I1 cache read misses (I1mr) and ++ LL cache instruction read misses (ILmr). ++ ++ ++ ++ D cache reads (Dr, which ++ equals the number of memory reads), ++ D1 cache read misses (D1mr), and ++ LL cache data read misses (DLmr). + + +- + +- +- Reproducible. In general, execution time is a better +- metric than instruction counts because it's what users perceive. However, +- execution time often has high variability. When running the exact same +- program on the exact same input multiple times, execution time might vary +- by several percent. Furthermore, small changes in a program can change its +- memory layout and have even larger effects on runtime. In contrast, +- instruction counts are highly reproducible; for some programs they are +- perfectly reproducible. This means the effects of small changes in a +- program can be measured with high precision. ++ D cache writes (Dw, which equals ++ the number of memory writes), ++ D1 cache write misses (D1mw), and ++ LL cache data write misses (DLmw). ++ ++ ++ ++ Conditional branches executed (Bc) and ++ conditional branches mispredicted (Bcm). ++ ++ ++ ++ Indirect branches executed (Bi) and ++ indirect branches mispredicted (Bim). + + + + +- +-For these reasons, Cachegrind is an excellent complement to time-based profilers. +- +- +- +-Cachegrind can annotate programs written in any language, so long as debug info +-is present to map machine code back to the original source code. Cachegrind has +-been used successfully on programs written in C, C++, Rust, and assembly. ++Note that D1 total accesses is given by ++D1mr + ++D1mw, and that LL total ++accesses is given by ILmr + ++DLmr + ++DLmw. + + +- +-Cachegrind can also simulate how your program interacts with a machine's cache +-hierarchy and branch predictor. This simulation was the original motivation for +-the tool, hence its name. However, the simulations are basic and unlikely to +-reflect the behaviour of a modern machine. For this reason they are off by +-default. If you really want cache and branch information, a profiler like +-perf that accesses hardware counters is a +-better choice. +- ++These statistics are presented for the entire program and for each ++function in the program. You can also annotate each line of source code in ++the program with the counts that were caused directly by it. ++ ++On a modern machine, an L1 miss will typically cost ++around 10 cycles, an LL miss can cost as much as 200 ++cycles, and a mispredicted branch costs in the region of 10 ++to 30 cycles. Detailed cache and branch profiling can be very useful ++for understanding how your program interacts with the machine and thus how ++to make it faster. ++ ++Also, since one instruction cache read is performed per ++instruction executed, you can find out how many instructions are ++executed per line, which can be useful for traditional profiling. + + + + +- +-Using Cachegrind and cg_annotate +- +- +-First, as for normal Valgrind use, you should compile with debugging info (the +- option in most compilers). But by contrast with normal +-Valgrind use, you probably do want to turn optimisation on, since you should +-profile your program as it will be normally run. +- + +- +-Second, run Cachegrind itself to gather the profiling data. +- ++ ++Using Cachegrind, cg_annotate and cg_merge + +- +-Third, run cg_annotate to get a detailed presentation of that data. cg_annotate +-can combine the results of multiple Cachegrind output files. It can also +-perform a diff between two Cachegrind output files. +- ++First off, as for normal Valgrind use, you probably want to ++compile with debugging info (the ++ option). But by contrast with ++normal Valgrind use, you probably do want to turn ++optimisation on, since you should profile your program as it will ++be normally run. ++ ++Then, you need to run Cachegrind itself to gather the profiling ++information, and then run cg_annotate to get a detailed presentation of that ++information. As an optional intermediate step, you can use cg_merge to sum ++together the outputs of multiple Cachegrind runs into a single file which ++you then use as the input for cg_annotate. Alternatively, you can use ++cg_diff to difference the outputs of two Cachegrind runs into a single file ++which you then use as the input for cg_annotate. + + + + Running Cachegrind + +- +-To run Cachegrind on a program prog, run: ++To run Cachegrind on a program prog, run: + +- + +- +-The program will execute (slowly). Upon completion, summary statistics that +-look like this will be printed: +- ++The program will execute (slowly). Upon completion, ++summary statistics that look like this will be printed: + + +- +- +-The I refs number is short for "Instruction +-cache references", which is equivalent to "instructions executed". If you +-enable the cache and/or branch simulation, additional counts will be shown. ++==31751== I refs: 27,742,716 ++==31751== I1 misses: 276 ++==31751== LLi misses: 275 ++==31751== I1 miss rate: 0.0% ++==31751== LLi miss rate: 0.0% ++==31751== ++==31751== D refs: 15,430,290 (10,955,517 rd + 4,474,773 wr) ++==31751== D1 misses: 41,185 ( 21,905 rd + 19,280 wr) ++==31751== LLd misses: 23,085 ( 3,987 rd + 19,098 wr) ++==31751== D1 miss rate: 0.2% ( 0.1% + 0.4%) ++==31751== LLd miss rate: 0.1% ( 0.0% + 0.4%) ++==31751== ++==31751== LL misses: 23,360 ( 4,262 rd + 19,098 wr) ++==31751== LL miss rate: 0.0% ( 0.0% + 0.4%)]]> ++ ++Cache accesses for instruction fetches are summarised ++first, giving the number of fetches made (this is the number of ++instructions executed, which can be useful to know in its own ++right), the number of I1 misses, and the number of LL instruction ++(LLi) misses. ++ ++Cache accesses for data follow. The information is similar ++to that of the instruction fetches, except that the values are ++also shown split between reads and writes (note each row's ++rd and ++wr values add up to the row's ++total). ++ ++Combined instruction and data figures for the LL cache ++follow that. Note that the LL miss rate is computed relative to the total ++number of memory accesses, not the number of L1 misses. I.e. it is ++(ILmr + DLmr + DLmw) / (Ir + Dr + Dw) ++not ++(ILmr + DLmr + DLmw) / (I1mr + D1mr + D1mw) + + ++Branch prediction statistics are not collected by default. ++To do so, add the option . ++ + + + + + Output File + +- +-Cachegrind also writes more detailed profiling data to a file. By default this +-Cachegrind output file is named cachegrind.out.<pid> +-(where <pid> is the program's process ID), but its +-name can be changed with the option. +-This file is human-readable, but is intended to be interpreted by the +-accompanying program cg_annotate, described in the next section. +- ++As well as printing summary information, Cachegrind also writes ++more detailed profiling information to a file. By default this file is named ++cachegrind.out.<pid> (where ++<pid> is the program's process ID), but its name ++can be changed with the option. This ++file is human-readable, but is intended to be interpreted by the ++accompanying program cg_annotate, described in the next section. ++ ++The default .<pid> suffix ++on the output file name serves two purposes. Firstly, it means you ++don't have to rename old log files that you don't want to overwrite. ++Secondly, and more importantly, it allows correct profiling with the ++ option of ++programs that spawn child processes. + +- +-The default .<pid> suffix on the output +-file name serves two purposes. First, it means existing Cachegrind output files +-aren't immediately overwritten. Second, and more importantly, it allows correct +-profiling with the option of programs +-that spawn child processes. +- ++The output file can be big, many megabytes for large applications ++built with full debugging information. + + + ++ + + + Running cg_annotate + +- +-Before using cg_annotate, it is worth widening your window to be at least 120 +-characters wide if possible, because the output lines can be quite long. +- ++Before using cg_annotate, ++it is worth widening your window to be at least 120-characters ++wide if possible, as the output lines can be quite long. + +- +-Then run: +-cg_annotate <filename> +-on a Cachegrind output file. +- ++To get a function-by-function summary, run: + +- ++cg_annotate <filename> + +- + +- +-The Metadata Section ++ ++The Output Preamble + +- +-The first part of the output looks like this: +- ++The first part of the output looks like this: + + + +- +-It summarizes how Cachegrind and the profiled program were run. +- ++ ++This is a summary of the annotation options: + + ++ + +- +- Invocation: the command line used to produce this output. +- ++ I1 cache, D1 cache, LL cache: cache configuration. So ++ you know the configuration with which these results were ++ obtained. + + + +- +- Command: the command line used to run the profiled program. +- ++ Command: the command line invocation of the program ++ under examination. + + + +- +- Events recorded: which events were recorded. By default, this is +- Ir. More events will be recorded if cache +- and/or branch simulation is enabled. +- ++ Events recorded: which events were recorded. ++ ++ ++ ++ ++ Events shown: the events shown, which is a subset of the events ++ gathered. This can be adjusted with the ++ option. + + + +- +- Events shown: the events shown, which is a subset of the events gathered. +- This can be adjusted with the option. +- ++ Event sort order: the sort order in which functions are ++ shown. For example, in this case the functions are sorted ++ from highest Ir counts to ++ lowest. If two functions have identical ++ Ir counts, they will then be ++ sorted by I1mr counts, and ++ so on. This order can be adjusted with the ++ option. ++ ++ Note that this dictates the order the functions appear. ++ It is not the order in which the columns ++ appear; that is dictated by the "events shown" line (and can ++ be changed with the ++ option). + + + +- +- Event sort order: the sort order used for the subsequent sections. For +- example, in this case those sections are sorted from highest +- Ir counts to lowest. If there are multiple +- events, one will be the primary sort event, and then there can be a +- secondary sort event, tertiary sort event, etc., though more than one is +- rarely needed. This order can be adjusted with the +- option. Note that this does not specify the order in +- which the columns appear. That is specified by the "events shown" line (and +- can be changed with the option). +- ++ Threshold: cg_annotate ++ by default omits functions that cause very low counts ++ to avoid drowning you in information. In this case, ++ cg_annotate shows summaries the functions that account for ++ 99% of the Ir counts; ++ Ir is chosen as the ++ threshold event since it is the primary sort event. The ++ threshold can be adjusted with the ++ ++ option. + + + +- +- Threshold: cg_annotate by default omits files and functions with very low +- counts to keep the output size reasonable. By default cg_annotate only +- shows files and functions that account for at least 0.1% of the primary +- sort event. The threshold can be adjusted with the +- option. +- ++ Chosen for annotation: names of files specified ++ manually for annotation; in this case none. + + + +- +- Annotation: whether source file annotation is enabled. Controlled with the +- option. +- ++ Auto-annotation: whether auto-annotation was requested ++ via the ++ option. In this case no. + + + + +- +-If cache simulation is enabled, details of the cache parameters will be shown +-above the "Invocation" line. +- +- + + + + +-Global, File, and Function-level Counts ++ xreflabel="The Global and Function-level Counts"> ++The Global and Function-level Counts + +- +-Next comes the summary for the whole program: +- ++Then follows summary statistics for the whole ++program: + + +- +- +-The Ir column label is suffixed with +-underscores to show the bounds of the columns underneath. +- +- +- +-Then comes file:function counts. Here is the first part of that section: +- +- +- +- +- +-Each entry covers one file, and one or more functions within that file. If +-there is only one significant function within a file, as in the first entry, +-the file and function are shown on the same line separate by a colon. If there +-are multiple significant functions within a file, as in the third entry, each +-function gets its own line. +- +- +- +-This example involves a small C program, and shows a combination of code from +-the program itself (including functions like get_word and +-hash in the file concord.c) as well +-as code from system libraries, such as functions like +-malloc and getc. +- +- +- +-Each entry is preceded with a <, which can +-be useful when navigating through the output in an editor, or grepping through +-results. +- +- +- +-The first percentage in each column indicates the proportion of the total event +-count is covered by this line. The second percentage, which only shows on the +-first line of each entry, shows the cumulative percentage of all the entries up +-to and including this one. The entries shown here account for 96.8% of the +-instructions executed by the program. +- ++27,742,716 276 275 10,955,517 21,905 3,987 4,474,773 19,280 19,098 PROGRAM TOTALS]]> + + +-The name ??? is used if the file name and/or +-function name could not be determined from debugging information. If +-??? filenames dominate, the program probably wasn't +-compiled with . If ??? function names +-dominate, the program may have had symbols stripped. ++These are similar to the summary provided when Cachegrind finishes running. + + +- +-After that comes function:file counts. Here is the first part of that section: +- ++Then comes function-by-function statistics: + + 2,086,303 (25.5%, 25.5%) get_word: +- 1,630,232 (19.9%) /home/njn/grind/ws1/cachegrind/concord.c +- 456,071 (5.6%) /usr/include/ctype.h +- +-> 1,285,938 (15.7%, 41.1%) _int_malloc:./malloc/./malloc/malloc.c +- +-> 1,107,550 (13.5%, 54.7%) getc:./libio/./libio/getc.c +- +-> 630,918 (7.7%, 62.4%) hash:/home/njn/grind/ws1/cachegrind/concord.c +- +-> 551,071 (6.7%, 69.1%) __strcmp_avx2:./string/../sysdeps/x86_64/multiarch/strcmp-avx2.S +- +-> 480,248 (5.9%, 74.9%) malloc: +- 458,225 (5.6%) ./malloc/./malloc/malloc.c +- 22,023 (0.3%) ./malloc/./malloc/arena.c +- +-> 468,151 (5.7%, 80.7%) ???:??? +- +-> 461,095 (5.6%, 86.3%) insert:/home/njn/grind/ws1/cachegrind/concord.c +-]]> +- +- +-This is similar to the previous section, but is grouped by functions first and +-files second. Also, the entry markers are > +-instead of <. +- +- +- +-You might wonder why this section is needed, and how it differs from the +-previous section. The answer is inlining. In this example there are two entries +-demonstrating a function whose code is effectively spread across more than one +-file: get_word and malloc. Here is an +-example from profiling the Rust compiler, a much larger program that uses +-inlining more: +- +- +- 30,469,230 (1.3%, 11.1%) ::intern_ty: +- 10,269,220 (0.5%) /home/njn/.cargo/registry/src/github.com-1ecc6299db9ec823/hashbrown-0.12.3/src/raw/mod.rs +- 7,696,827 (0.3%) /home/njn/dev/rust0/compiler/rustc_middle/src/ty/context.rs +- 3,858,099 (0.2%) /home/njn/dev/rust0/library/core/src/cell.rs +-]]> +- +- +-In this case the compiled function intern_ty includes code +-from three different source files, due to inlining. These should be examined +-together. Older versions of cg_annotate presented this entry as three separate +-file:function entries, which would typically be intermixed with all the other +-entries, making it hard to see that they are all really part of the same +-function. +- ++8,821,482 5 5 2,242,702 1,621 73 1,794,230 0 0 getc.c:_IO_getc ++5,222,023 4 4 2,276,334 16 12 875,959 1 1 concord.c:get_word ++2,649,248 2 2 1,344,810 7,326 1,385 . . . vg_main.c:strcmp ++2,521,927 2 2 591,215 0 0 179,398 0 0 concord.c:hash ++2,242,740 2 2 1,046,612 568 22 448,548 0 0 ctype.c:tolower ++1,496,937 4 4 630,874 9,000 1,400 279,388 0 0 concord.c:insert ++ 897,991 51 51 897,831 95 30 62 1 1 ???:??? ++ 598,068 1 1 299,034 0 0 149,517 0 0 ../sysdeps/generic/lockfile.c:__flockfile ++ 598,068 0 0 299,034 0 0 149,517 0 0 ../sysdeps/generic/lockfile.c:__funlockfile ++ 598,024 4 4 213,580 35 16 149,506 0 0 vg_clientmalloc.c:malloc ++ 446,587 1 1 215,973 2,167 430 129,948 14,057 13,957 concord.c:add_existing ++ 341,760 2 2 128,160 0 0 128,160 0 0 vg_clientmalloc.c:vg_trap_here_WRAPPER ++ 320,782 4 4 150,711 276 0 56,027 53 53 concord.c:init_hash_table ++ 298,998 1 1 106,785 0 0 64,071 1 1 concord.c:create ++ 149,518 0 0 149,516 0 0 1 0 0 ???:tolower@@GLIBC_2.0 ++ 149,518 0 0 149,516 0 0 1 0 0 ???:fgetc@@GLIBC_2.0 ++ 95,983 4 4 38,031 0 0 34,409 3,152 3,150 concord.c:new_word_node ++ 85,440 0 0 42,720 0 0 21,360 0 0 vg_clientmalloc.c:vg_bogus_epilogue]]> ++ ++Each function ++is identified by a ++file_name:function_name pair. If ++a column contains only a dot it means the function never performs ++that event (e.g. the third row shows that ++strcmp() contains no ++instructions that write to memory). The name ++??? is used if the file name ++and/or function name could not be determined from debugging ++information. If most of the entries have the form ++???:??? the program probably ++wasn't compiled with . ++ ++It is worth noting that functions will come both from ++the profiled program (e.g. concord.c) ++and from libraries (e.g. getc.c) + + + + +- +-Per-line Counts ++ ++Line-by-line Counts + +- +-By default, a source file is annotated if it contains at least one function +-that meets the significance threshold. This can be disabled with the +- option. +- +- +- +-To continue the previous example, here is part of the annotation of the file +-concord.c: +- ++By default, all source code annotation is also shown. (Filenames to be ++annotated can also by specified manually as arguments to cg_annotate, but this ++is rarely needed.) For example, the output from running cg_annotate ++<filename> for our example produces the same output as above ++followed by an annotated version of concord.c, a section ++of which looks like: + + word, data->line, table); +- . +- 2 (0.0%) free(data); +- 2 (0.0%) fclose(file_ptr); +- 6 (0.0%) } +-]]> +- +- +-Each executed line is annotated with its event counts. Other lines are +-annotated with a dot. This may be because they contain no executable code, or +-they contain executable code but were never executed. +- +- +- +-You can easily tell if a function is inlined from this output. If it is not +-inlined, it will have event counts on the lines containing the opening and +-closing braces. If it is inlined, it will not have event counts on those lines. +-In the example above, init_hash_table does have counts, +-so you can tell it is not inlined. +- +- +- +-Note again that inlining can lead to surprising results. If a function +-f is always inlined, in the file:function and +-function:file sections counts will be attributed to the functions it is inlined +-into, rather than itself. However, if you look at the line-by-line annotations +-for f you'll see the counts that belong to +-f. So it's worth looking for large counts/percentages in the +-line-by-line annotations. +- ++ . . . . . . . . . void init_hash_table(char *file_name, Word_Node *table[]) ++ 3 1 1 . . . 1 0 0 { ++ . . . . . . . . . FILE *file_ptr; ++ . . . . . . . . . Word_Info *data; ++ 1 0 0 . . . 1 1 1 int line = 1, i; ++ . . . . . . . . . ++ 5 0 0 . . . 3 0 0 data = (Word_Info *) create(sizeof(Word_Info)); ++ . . . . . . . . . ++ 4,991 0 0 1,995 0 0 998 0 0 for (i = 0; i < TABLE_SIZE; i++) ++ 3,988 1 1 1,994 0 0 997 53 52 table[i] = NULL; ++ . . . . . . . . . ++ . . . . . . . . . /* Open file, check it. */ ++ 6 0 0 1 0 0 4 0 0 file_ptr = fopen(file_name, "r"); ++ 2 0 0 1 0 0 . . . if (!(file_ptr)) { ++ . . . . . . . . . fprintf(stderr, "Couldn't open '%s'.\n", file_name); ++ 1 1 1 . . . . . . exit(EXIT_FAILURE); ++ . . . . . . . . . } ++ . . . . . . . . . ++ 165,062 1 1 73,360 0 0 91,700 0 0 while ((line = get_word(data, line, file_ptr)) != EOF) ++ 146,712 0 0 73,356 0 0 73,356 0 0 insert(data->;word, data->line, table); ++ . . . . . . . . . ++ 4 0 0 1 0 0 2 0 0 free(data); ++ 4 0 0 1 0 0 2 0 0 fclose(file_ptr); ++ 3 0 0 2 0 0 . . . }]]> + +- +-Sometimes only a small section of a source file is executed. To minimise +-uninteresting output, Cachegrind only shows annotated lines and lines within a +-small distance of annotated lines. Gaps are marked with line numbers, for +-example: +- +- +- +- +- +-The number of lines of context shown around annotated lines is controlled by +-the option. +- +- +- +-Any significant source files that could not be found are shown like this: +- ++(Although column widths are automatically minimised, a wide ++terminal is clearly useful.) ++ ++Each source file is clearly marked ++(User-annotated source) as ++having been chosen manually for annotation. If the file was ++found in one of the directories specified with the ++/ option, the directory ++and file are both given. ++ ++Each line is annotated with its event counts. Events not ++applicable for a line are represented by a dot. This is useful ++for distinguishing between an event which cannot happen, and one ++which can but did not. ++ ++Sometimes only a small section of a source file is ++executed. To minimise uninteresting output, Cachegrind only shows ++annotated lines and lines within a small distance of annotated ++lines. Gaps are marked with the line numbers so you know which ++part of a file the shown code comes from, eg: + + +- +- +-This is common for library files, because libraries are usually compiled with +-debugging information but the source files are rarely present on a system. +- +- +- +-Cachegrind relies heavily on accurate debug info. Sometimes compilers do not +-map a particular compiled instruction to line number 0, where the 0 represents +-"unknown" or "none". This is annoying but does happen in practice. cg_annotate +-prints these in the following way: +- ++(figures and code for line 704) ++-- line 704 ---------------------------------------- ++-- line 878 ---------------------------------------- ++(figures and code for line 878)]]> ++ ++The amount of context to show around annotated lines is ++controlled by the ++option. ++ ++Automatic annotation is enabled by default. ++cg_annotate will automatically annotate every source file it can ++find that is mentioned in the function-by-function summary. ++Therefore, the files chosen for auto-annotation are affected by ++the and ++ options. Each ++source file is clearly marked (Auto-annotated ++source) as being chosen automatically. Any ++files that could not be found are mentioned at the end of the ++output, eg: + + ++ ++This is quite common for library files, since libraries are ++usually compiled with debugging information, but the source files ++are often not present on a system. If a file is chosen for ++annotation both manually and automatically, it ++is marked as User-annotated ++source. Use the ++/ option to tell Valgrind where ++to look for source files if the filenames found from the debugging ++information aren't specific enough. ++ ++Beware that cg_annotate can take some time to digest large ++cachegrind.out.<pid> files, ++e.g. 30 seconds or more. Also beware that auto-annotation can ++produce a lot of output if your program is large! + +-1,046,746 (0.0%) +-]]> ++ + +- +-Finally, when annotation is performed, the output ends with a summary of how +-many counts were annotated and unannotated, and why. For example: +- + +- ++Annotating Assembly Code Programs + +-3,534,817 (43.1%) annotated: files known & above threshold & readable, line numbers known +- 0 annotated: files known & above threshold & readable, line numbers unknown +- 0 unannotated: files known & above threshold & two or more non-identical +-4,132,126 (50.4%) unannotated: files known & above threshold & unreadable +- 59,950 (0.7%) unannotated: files known & below threshold +- 468,163 (5.7%) unannotated: files unknown +-]]> ++Valgrind can annotate assembly code programs too, or annotate ++the assembly code generated for your C program. Sometimes this is ++useful for understanding what is really happening when an ++interesting line of C code is translated into multiple ++instructions. ++ ++To do this, you just need to assemble your ++.s files with assembly-level debug ++information. You can use compile with the to compile C/C++ ++programs to assembly code, and then assemble the assembly code files with ++ to achieve this. You can then profile and annotate the ++assembly code source files in the same way as C/C++ source files. + + + +- + + Forking Programs ++If your program forks, the child will inherit all the profiling data that ++has been gathered for the parent. + +- +-If your program forks, the child will inherit all the profiling data that +-has been gathered for the parent. +- +- +- +-If the output file name (controlled by ) +-does not contain , then the outputs from the parent and +-child will be intermingled in a single output file, which will almost certainly +-make it unreadable by cg_annotate. +- +- ++If the output file format string (controlled by ++) does not contain , ++then the outputs from the parent and child will be intermingled in a single ++output file, which will almost certainly make it unreadable by ++cg_annotate. + + + + + cg_annotate Warnings + +- +-There are two situations in which cg_annotate prints warnings. +- ++There are a couple of situations in which ++cg_annotate issues warnings. + + + +- +- If a source file is more recent than the Cachegrind output file. This is +- because the information in the Cachegrind output file is only recorded with +- line numbers, so if the line numbers change at all in the source (e.g. +- lines added, deleted, swapped), any annotations will be incorrect. +- +- +- +- +- If information is recorded about line numbers past the end of a file. This +- can be caused by the above problem, e.g. shortening the source file while +- using an old Cachegrind output file. If this happens, the figures for the +- bogus lines are printed anyway (and clearly marked as bogus) in case they +- are important. +- ++ If a source file is more recent than the ++ cachegrind.out.<pid> file. ++ This is because the information in ++ cachegrind.out.<pid> is only ++ recorded with line numbers, so if the line numbers change at ++ all in the source (e.g. lines added, deleted, swapped), any ++ annotations will be incorrect. ++ ++ ++ If information is recorded about line numbers past the ++ end of a file. This can be caused by the above problem, ++ i.e. shortening the source file while using an old ++ cachegrind.out.<pid> file. If ++ this happens, the figures for the bogus lines are printed ++ anyway (clearly marked as bogus) in case they are ++ important. + + + + + + +- +-Merging Cachegrind Output Files + +- +-cg_annotate can merge data from multiple Cachegrind output files in a single +-run. (There is also a program called cg_merge that can merge multiple +-Cachegrind output files into a single Cachegrind output file, but it is now +-deprecated because cg_annotate's merging does a better job.) +- ++ ++Unusual Annotation Cases + +- +-Use it as follows: +- ++Some odd things that can occur during annotation: + ++ ++ ++ If annotating at the assembler level, you might see ++ something like this: + ++ 1 0 0 . . . . . . leal -12(%ebp),%eax ++ 1 0 0 . . . 1 0 0 movl %eax,84(%ebx) ++ 2 0 0 0 0 0 1 0 0 movl $1,-20(%ebp) ++ . . . . . . . . . .align 4,0x90 ++ 1 0 0 . . . . . . movl $.LnrB,%eax ++ 1 0 0 . . . 1 0 0 movl %eax,-16(%ebp)]]> ++ ++ How can the third instruction be executed twice when ++ the others are executed only once? As it turns out, it ++ isn't. Here's a dump of the executable, using ++ objdump -d: ++ ++ ++ Notice the extra mov ++ %esi,%esi instruction. Where did this come ++ from? The GNU assembler inserted it to serve as the two ++ bytes of padding needed to align the movl ++ $.LnrB,%eax instruction on a four-byte ++ boundary, but pretended it didn't exist when adding debug ++ information. Thus when Valgrind reads the debug info it ++ thinks that the movl ++ $0x1,0xffffffec(%ebp) instruction covers the ++ address range 0x8048f2b--0x804833 by itself, and attributes ++ the counts for the mov ++ %esi,%esi to it. ++ ++ ++ ++ ++ ++ Sometimes, the same filename might be represented with ++ a relative name and with an absolute name in different parts ++ of the debug info, eg: ++ /home/user/proj/proj.h and ++ ../proj.h. In this case, if you use ++ auto-annotation, the file will be annotated twice with the ++ counts split between the two. ++ ++ ++ ++ If you compile some files with ++ and some without, some ++ events that take place in a file without debug info could be ++ attributed to the last line of a file with debug info ++ (whichever one gets placed before the non-debug-info file in ++ the executable). ++ + +- +-cg_annotate computes the sum of these files (effectively +-file1 + file2 + +-file3), and then produces output as usual that shows the +-summed counts. +- ++ + +- +-The most common merging scenario is if you want to aggregate costs over +-multiple runs of the same program, possibly on different inputs. +- ++This list looks long, but these cases should be fairly ++rare. + + + + +- +-Differencing Cachegrind output files ++ ++Merging Profiles with cg_merge + + +-cg_annotate can diff data from two Cachegrind output files in a single run. +-(There is also a program called cg_diff that can diff two Cachegrind output +-files into a single Cachegrind output file, but it is now deprecated because +-cg_annotate's differencing does a better job.) +- ++cg_merge is a simple program which ++reads multiple profile files, as created by Cachegrind, merges them ++together, and writes the results into another file in the same format. ++You can then examine the merged results using ++cg_annotate <filename>, as ++described above. The merging functionality might be useful if you ++want to aggregate costs over multiple runs of the same program, or ++from a single parallel run with multiple instances of the same ++program. + + +-Use it as follows: ++cg_merge is invoked as follows: + + + +- +- +-cg_annotate computes the difference between these two files (effectively +-file2 - file1), and then +-produces output as usual that shows the count differences. Note that many of +-the counts may be negative; this indicates that the counts for the relevant +-file/function/line are smaller in the second version than those in the first +-version. +- ++cg_merge -o outputfile file1 file2 file3 ...]]> + + +-The simplest common scenario is comparing two Cachegrind output files that came +-from the same program, but on different inputs. cg_annotate will do a good job +-on this without assistance. +- +- +- +-A more complex scenario is if you want to compare Cachegrind output files from +-two slightly different versions of a program that you have sitting +-side-by-side, running on the same input. For example, you might have +-version1/prog.c and version2/prog.c. +-A straight comparison of the two would not be useful. Because functions are +-always paired with filenames, a function f would be listed +-as version1/prog.c:f for the first version but +-version2/prog.c:f for the second version. +- +- +- +-In this case, use the option. Its argument is a +-search-and-replace expression that will be applied to all the filenames in both +-Cachegrind output files. It can be used to remove minor differences in +-filenames. For example, the option +- will suffice for the +-above example. +- +- +- +-Similarly, sometimes compilers auto-generate certain functions and give them +-randomized names like T.1234 where the suffixes vary from +-build to build. You can use the option to +-remove small differences like these; it works in the same way as +-. +- +- +- +-When is used to compare two different versions +-of the same program, cg_annotate will not annotate any file that is different +-between the two versions, because the per-line counts are not reliable in such +-a case. For example, imagine if version2/prog.c is the +-same as version1/prog.c except with an extra blank line at +-the top of the file. Every single per-line count will have changed. In +-comparison, the per-file and per-function counts have not changed, and are +-still very useful for determining differences between programs. You might think +-that this means every interesting file will be left unannotated, but again +-inlining means that files that are identical in the two versions can have +-different counts on many lines. +- +- ++It reads and checks file1, then read ++and checks file2 and merges it into ++the running totals, then the same with ++file3, etc. The final results are ++written to outputfile, or to standard ++out if no output file is specified. ++ ++ ++Costs are summed on a per-function, per-line and per-instruction ++basis. Because of this, the order in which the input files does not ++matter, although you should take care to only mention each file once, ++since any file mentioned twice will be added in twice. ++ ++ ++cg_merge does not attempt to check ++that the input files come from runs of the same executable. It will ++happily merge together profile files from completely unrelated ++programs. It does however check that the ++Events: lines of all the inputs are ++identical, so as to ensure that the addition of costs makes sense. ++For example, it would be nonsensical for it to add a number indicating ++D1 read references to a number from a different file indicating LL ++write misses. ++ ++ ++A number of other syntax and sanity checks are done whilst reading the ++inputs. cg_merge will stop and ++attempt to print a helpful error message if any of the input files ++fail these checks. + + + +- +-Cache and Branch Simulation +- +- +-Cachegrind can simulate how your program interacts with a machine's cache +-hierarchy and/or branch predictor. + +-The cache simulation models a machine with independent first-level instruction +-and data caches (I1 and D1), backed by a unified second-level cache (L2). For +-these machines (in the cases where Cachegrind can auto-detect the cache +-configuration) Cachegrind simulates the first-level and last-level caches. +-Therefore, Cachegrind always refers to the I1, D1 and LL (last-level) caches. +- +- +- +-When simulating the cache, with , Cachegrind +-gathers the following statistics: +- +- +- +- +- +- I cache reads (Ir, which equals the number +- of instructions executed), I1 cache read misses +- (I1mr) and LL cache instruction read +- misses (ILmr). +- +- +- +- +- D cache reads (Dr, which equals the number +- of memory reads), D1 cache read misses +- (D1mr), and LL cache data read misses +- (DLmr). +- +- +- +- +- D cache writes (Dw, which equals the +- number of memory writes), D1 cache write misses +- (D1mw), and LL cache data write misses +- (DLmw). +- +- +- ++ ++Differencing Profiles with cg_diff + + +-Note that D1 total accesses is given by D1mr + +-D1mw, and that LL total accesses is given by +-ILmr + DLmr + +-DLmw. ++cg_diff is a simple program which ++reads two profile files, as created by Cachegrind, finds the difference ++between them, and writes the results into another file in the same format. ++You can then examine the merged results using ++cg_annotate <filename>, as ++described above. This is very useful if you want to measure how a change to ++a program affected its performance. + + + +-When simulating the branch predictor, with , +-Cachegrind gathers the following statistics: ++cg_diff is invoked as follows: + + +- +- +- +- Conditional branches executed (Bc) and +- conditional branches mispredicted (Bcm). +- +- +- +- +- Indirect branches executed (Bi) and +- indirect branches mispredicted (Bim). +- +- +- ++ + + +-When cache and/or branch simulation is enabled, cg_annotate will print multiple +-counts per line of output. For example: +- +- +-file1, then read ++and checks file2, then computes the ++difference (effectively file1 - ++file2). The final results are written to ++standard output. ++ ++ ++Costs are summed on a per-function basis. Per-line costs are not summed, ++because doing so is too difficult. For example, consider differencing two ++profiles, one from a single-file program A, and one from the same program A ++where a single blank line was inserted at the top of the file. Every single ++per-line count has changed. In comparison, the per-function counts have not ++changed. The per-function count differences are still very useful for ++determining differences between programs. Note that because the result is ++the difference of two profiles, many of the counts will be negative; this ++indicates that the counts for the relevant function are fewer in the second ++version than those in the first version. ++ ++ ++cg_diff does not attempt to check ++that the input files come from runs of the same executable. It will ++happily merge together profile files from completely unrelated ++programs. It does however check that the ++Events: lines of all the inputs are ++identical, so as to ensure that the addition of costs makes sense. ++For example, it would be nonsensical for it to add a number indicating ++D1 read references to a number from a different file indicating LL ++write misses. ++ ++ ++A number of other syntax and sanity checks are done whilst reading the ++inputs. cg_diff will stop and ++attempt to print a helpful error message if any of the input files ++fail these checks. ++ ++ ++Sometimes you will want to compare Cachegrind profiles of two versions of a ++program that you have sitting side-by-side. For example, you might have ++version1/prog.c and ++version2/prog.c, where the second is ++slightly different to the first. A straight comparison of the two will not ++be useful -- because functions are qualified with filenames, a function ++f will be listed as ++version1/prog.c:f for the first version but ++version2/prog.c:f for the second ++version. ++ ++ ++When this happens, you can use the option. ++Its argument is a Perl search-and-replace expression that will be applied ++to all the filenames in both Cachegrind output files. It can be used to ++remove minor differences in filenames. For example, the option ++ will suffice for ++this case. + +-> 8,547 (0.1%, 99.4%) 936 (0.1%, 99.1%) 177 (0.3%, 96.7%) 59 (0.0%, 99.9%) 38 (19.4%, 66.3%) strcmp: +- 8,503 (0.1%) 928 (0.1%) 175 (0.3%) 59 (0.0%) 38 (19.4%) ./string/../sysdeps/x86_64/multiarch/../multiarch/strcmp-sse2.S +-]]> ++ ++Similarly, sometimes compilers auto-generate certain functions and give them ++randomized names. For example, GCC sometimes auto-generates functions with ++names like T.1234, and the suffixes vary from build to ++build. You can use the option to remove ++small differences like these; it works in the same way as ++. + + + ++ + + + ++ + + Cachegrind Command-line Options + + +- +-Cachegrind-specific options are: +- ++Cachegrind-specific options are: + + + +- ++ + +- ++ + + +- +- Write the Cachegrind output file to file rather than +- to the default output file, +- cachegrind.out.<pid>. The +- and format specifiers can be used to embed the +- process ID and/or the contents of an environment variable in the name, as +- is the case for the core option +- . +- ++ Specify the size, associativity and line size of the level 1 ++ instruction cache. + + + +- ++ + +- ++ + + +- +- Enables or disables collection of cache access and miss counts. +- ++ Specify the size, associativity and line size of the level 1 ++ data cache. + + + +- ++ + +- ++ + + +- +- Enables or disables collection of branch instruction and +- misprediction counts. +- ++ Specify the size, associativity and line size of the last-level ++ cache. + + + +- ++ + +- ++ + + +- +- Specify the size, associativity and line size of the level 1 instruction +- cache. Only useful with . +- ++ Enables or disables collection of cache access and miss ++ counts. + + + +- ++ + +- ++ + + +- +- Specify the size, associativity and line size of the level 1 data cache. +- Only useful with . +- ++ Enables or disables collection of branch instruction and ++ misprediction counts. By default this is disabled as it ++ slows Cachegrind down by approximately 25%. Note that you ++ cannot specify ++ and ++ together, as that would leave Cachegrind with no ++ information to collect. + + + +- ++ + +- ++ + + +- +- Specify the size, associativity and line size of the last-level cache. +- Only useful with . ++ Write the profile data to ++ file rather than to the default ++ output file, ++ cachegrind.out.<pid>. The ++ and format specifiers ++ can be used to embed the process ID and/or the contents of an ++ environment variable in the name, as is the case for the core ++ option . + + + +@@ -945,114 +898,94 @@ + + + +- +- +- +- Diff two Cachegrind output files. +- +- +- +- +- +- +- +- +- +- Specifies an search-and-replace expression +- that is applied to all filenames. Useful when differencing, for removing +- minor differences in paths between two different versions of a program +- that are sitting in different directories. An suffix +- makes the regex case-insensitive, and a suffix makes +- it match multiple times. +- +- +- +- +- +- +- ++ + + +- +- Like , but for filenames. Useful for +- removing minor differences in randomized names of auto-generated +- functions generated by some compilers. +- ++ Specifies which events to show (and the column ++ order). Default is to use all present in the ++ cachegrind.out.<pid> file (and ++ use the order in the file). Useful if you want to concentrate on, for ++ example, I cache misses (), or data ++ read misses (), or LL data misses ++ (). Best used in conjunction with ++ . + + + + + +- ++ + + +- +- Specifies which events to show (and the column order). Default is to use +- all present in the Cachegrind output file (and use the order in the +- file). Best used in conjunction with . +- ++ Specifies the events upon which the sorting of the ++ function-by-function entries will be based. + + + + + +- ++ + + +- +- Specifies the events upon which the sorting of the file:function and +- function:file entries will be based. +- ++ Sets the threshold for the function-by-function ++ summary. A function is shown if it accounts for more than X% ++ of the counts for the primary sort event. If auto-annotating, also ++ affects which files are annotated. ++ ++ Note: thresholds can be set for more than one of the ++ events by appending any events for the ++ option with a colon ++ and a number (no spaces, though). E.g. if you want to see ++ each function that covers more than 1% of LL read misses or 1% of LL ++ write misses, use this option: ++ + + + + + +- ++ + + +- +- Sets the significance threshold for the file:function and function:files +- sections. A file or function is shown if it accounts for more than X% of +- the counts for the primary sort event. If annotating source files, this +- also affects which files are annotated. ++ When enabled, a percentage is printed next to all event counts. ++ This helps gauge the relative importance of each function and line. + + + + + + +- ++ + + +- +- When enabled, a percentage is printed next to all event counts. This +- helps gauge the relative importance of each function and line. +- ++ When enabled, automatically annotates every file that ++ is mentioned in the function-by-function summary that can be ++ found. Also gives a list of those that couldn't be found. + + + + + +- ++ + + +- +- Enables or disables source file annotation. +- ++ Print N lines of context before and after each ++ annotated line. Avoids printing large sections of source ++ files that were not executed. Use a large number ++ (e.g. 100000) to show all source lines. + + + + + +- ++