diff --git a/CleanSpec.mk b/CleanSpec.mk
new file mode 100644
index 0000000000000000000000000000000000000000..a97c71a49e02ef529e9ce288adad78f337ce00e9
--- /dev/null
+++ b/CleanSpec.mk
@@ -0,0 +1,383 @@
+# Copyright (C) 2007 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# If you don't need to do a full clean build but would like to touch
+# a file or delete some intermediate files, add a clean step to the end
+# of the list. These steps will only be run once, if they haven't been
+# run before.
+#
+# E.g.:
+# $(call add-clean-step, touch -c external/sqlite/sqlite3.h)
+# $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libz_intermediates)
+#
+# Always use "touch -c" and "rm -f" or "rm -rf" to gracefully deal with
+# files that are missing or have been moved.
+#
+# Use $(PRODUCT_OUT) to get to the "out/target/product/blah/" directory.
+# Use $(OUT_DIR) to refer to the "out" directory.
+#
+# If you need to re-do something that's already mentioned, just copy
+# the command and add it to the bottom of the list. E.g., if a change
+# that you made last week required touching a file and a change you
+# made today requires touching the same file, just copy the old
+# touch step and add it to the end of the list.
+#
+# ************************************************
+# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
+# ************************************************
+
+# For example:
+#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/AndroidTests_intermediates)
+#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/core_intermediates)
+#$(call add-clean-step, find $(OUT_DIR) -type f -name "IGTalkSession*" -print0 | xargs -0 rm -f)
+#$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/*)
+
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system)
+
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libmediaplayerservice_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libmedia_jni_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libstagefright_omx_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/default.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/vendor)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/android-info.txt)
+$(call add-clean-step, find $(PRODUCT_OUT) -name "*.apk" | xargs rm)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/*/LINKED)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/lib/*.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/*.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/symbols/system/lib/*.so)
+$(call add-clean-step, rm -rf $(HOST_OUT_EXECUTABLES)/iself)
+$(call add-clean-step, rm -rf $(HOST_OUT_EXECUTABLES)/lsd)
+$(call add-clean-step, rm -rf $(HOST_OUT_EXECUTABLES)/apriori)
+$(call add-clean-step, rm -rf $(HOST_OUT_EXECUTABLES)/isprelinked)
+$(call add-clean-step, rm -rf $(HOST_OUT_EXECUTABLES)/soslim)
+
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/lib/*.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/*.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/symbols/system/lib/*.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/YouTube*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libstagefright_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libstagefright_omx_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/librtp_jni_intermediates)
+
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/android-info.txt)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/JAVA_LIBRARIES/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/framework/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libbcinfo_intermediates)
+
+# ICS MR2!!!!!!!!!!!!
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libbcinfo_intermediates)
+
+# WAIT, I MEAN JELLY BEAN!!!!!!!!!!!!
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# Changing where ro.carrier value is instantiated for system/build.prop
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# Now we switched to build against Mac OS X SDK 10.6
+$(call add-clean-step, rm -rf $(OUT_DIR)/host/darwin-x86/obj)
+
+$(call add-clean-step, rm -f $(OUT_DIR)/versions_checked.mk)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/EXECUTABLES)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/lib/*.o)
+
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/EXECUTABLES)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/lib/*.o)
+
+# JB MR2!!!!!!! AND *NO*, THIS WILL NOT BE K-WHATEVER.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# Start of "K" development!
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# GCC 4.7
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/EXECUTABLES)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/lib/*.o)
+
+# Wait, back to some JB development!
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# And on to KLP...
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# KLP now based off API 18.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# Clean up around the /system/app -> /system/priv-app migration
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+
+# Clean up old location of generated Java files from aidl
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/framework_intermediates/src)
+
+# Clean up ApplicationsProvider which is being removed.
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/ApplicationsProvider_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/priv-app/ApplicationsProvider.apk)
+
+# Clean up Moto OMA DM client which isn't ready yet.
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/com.android.omadm.plugin.dev_intermediates)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/com.android.omadm.plugin.diagmon_intermediates)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/com.android.omadm.pluginhelper_intermediates)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/com.android.omadm.plugin_intermediates)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/com.android.omadm.service.api_intermediates)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/DMService_intermediates)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/SprintDM_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/priv-app/DMService.apk)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/SprintDM.apk)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/etc/omadm)
+
+# GCC 4.8
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/EXECUTABLES)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/lib/*.o)
+
+# KLP I mean KitKat now API 19.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# 4.4.1
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# 4.4.2
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# "L" and beyond.
+# Make libart the default runtime
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# Rename persist.sys.dalvik.vm.lib to allow new default
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# KKWT development
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# L development
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# L development
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# Add ro.product.cpu.abilist{32,64} to build.prop.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# Unset TARGET_PREFER_32_BIT_APPS for 64 bit targets.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# Adding dalvik.vm.dex2oat-flags to eng builds
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# Unset TARGET_PREFER_32_BIT_APPS for 64 bit targets.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# Switching the x86 emulator over to a 64 bit primary zygote.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
+
+# Rename persist.sys.dalvik.vm.lib.1 to allow new default
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# Switching PRODUCT_RUNTIMES default for some devices
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# Switching to 32-bit-by-default host multilib build
+$(call add-clean-step, rm -rf $(HOST_OUT_INTERMEDIATES))
+
+# KKWT has become API 20
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# ims-common.jar added to BOOTCLASSPATH
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/ETC/init.environ.rc_intermediates)
+
+# Change ro.zygote for core_64_bit.mk from zygote32_64 to zygote64_32
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/default.prop)
+
+# Adding dalvik.vm.dex2oat-Xms, dalvik.vm.dex2oat-Xmx
+# dalvik.vm.image-dex2oat-Xms, and dalvik.vm.image-dex2oat-Xmx
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/default.prop)
+
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system)
+
+# Switch host builds to Clang by default
+$(call add-clean-step, rm -rf $(OUT_DIR)/host)
+
+# Adding dalvik.vm.dex2oat-filter
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/default.prop)
+
+# API 21?
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# API 21!
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# API 22!
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# Move to libc++ as the default STL.
+$(call add-clean-step, rm -rf $(OUT_DIR))
+
+# dex2oat instruction-set changes
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/default.prop)
+
+# Make GNU++11 the default standard version. This requires a cleanspec because
+# char16_t/char32_t will be real types now instead of typedefs, which means
+# an ABI change since the names will mangle differently.
+$(call add-clean-step, rm -rf $(OUT_DIR))
+
+# 5.1!
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# Remove ro.product.locale.language/country and add ro.product.locale
+# instead.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# On to MNC
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# Adding dalvik.vm.usejit
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/default.prop)
+
+# Rename dalvik.vm.usejit to debug.dalvik.vm.usejit
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/default.prop)
+
+# Revert rename dalvik.vm.usejit to debug.dalvik.vm.usejit
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/default.prop)
+
+# Change from interpret-only to verify-at-runtime.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/root/default.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/default.prop)
+
+# New York, New York!
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# 23 is becoming alive!!!
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+
+# Change PLATFORM_VERSION from NYC to N
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
+# $(PRODUCT_OUT)/recovery/root/sdcard goes from symlink to folder.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/recovery/root/sdcard)
+
+# Add BOARD_USES_SYSTEM_OTHER_ODEX
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/priv-app/*)
+
+# ************************************************
+# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
+# ************************************************
diff --git a/android_build.spec b/android_build.spec
new file mode 100644
index 0000000000000000000000000000000000000000..ea2760ee7f56bda8c3ae2898af30a4b821b3b8ba
--- /dev/null
+++ b/android_build.spec
@@ -0,0 +1,10 @@
+Name: android_build
+Version: android7.1.3
+Release: 1
+Summary: platform frameworks build module
+License: APL
+URL: https://gitee.com/src-openeuler/platform_build
+
+
+%description
+This is my first RPM package, which does nothing.
diff --git a/blueprint/.gitignore b/blueprint/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..de998546cac306aa185ad5e941a58d73f808504f
--- /dev/null
+++ b/blueprint/.gitignore
@@ -0,0 +1 @@
+out.test
diff --git a/blueprint/.travis.fix-fork.sh b/blueprint/.travis.fix-fork.sh
new file mode 100755
index 0000000000000000000000000000000000000000..af26716fd4a6162b9cc37219c249eed1be90ef06
--- /dev/null
+++ b/blueprint/.travis.fix-fork.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+if echo $TRAVIS_BUILD_DIR | grep -vq "github.com/google/blueprint$" ; then
+ cd ../..
+ mkdir -p google
+ mv $TRAVIS_BUILD_DIR google/blueprint
+ cd google/blueprint
+ export TRAVIS_BUILD_DIR=$PWD
+fi
diff --git a/blueprint/.travis.install-ninja.sh b/blueprint/.travis.install-ninja.sh
new file mode 100755
index 0000000000000000000000000000000000000000..fef0e5b0582e4f0f1be4e8a1ce00df804ed90a88
--- /dev/null
+++ b/blueprint/.travis.install-ninja.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+# Version of ninja to build -- can be any git revision
+VERSION="v1.6.0"
+
+set -ev
+
+SCRIPT_HASH=$(sha1sum ${BASH_SOURCE[0]} | awk '{print $1}')
+
+cd ~
+if [[ -d ninjabin && "$SCRIPT_HASH" == "$(cat ninjabin/script_hash)" ]]; then
+ exit 0
+fi
+
+git clone https://github.com/martine/ninja
+cd ninja
+./configure.py --bootstrap
+
+mkdir -p ../ninjabin
+rm -f ../ninjabin/ninja
+echo -n $SCRIPT_HASH >../ninjabin/script_hash
+mv ninja ../ninjabin/
diff --git a/blueprint/.travis.yml b/blueprint/.travis.yml
new file mode 100644
index 0000000000000000000000000000000000000000..94a09202d8cbc45fe8b3842ff421eb6931d55ae4
--- /dev/null
+++ b/blueprint/.travis.yml
@@ -0,0 +1,24 @@
+language: go
+
+go:
+ - 1.5.1
+
+cache:
+ directories:
+ - $HOME/ninjabin
+
+install:
+ - ./.travis.install-ninja.sh
+ - export PATH=$PATH:~/ninjabin
+
+before_script:
+ - source .travis.fix-fork.sh
+
+script:
+ - go test ./...
+ - mkdir stage
+ - cd stage
+ - ../bootstrap.bash
+ - ./blueprint.bash
+ - diff -us ../build.ninja.in .bootstrap/bootstrap.ninja.in
+ - ../tests/test.sh
diff --git a/blueprint/Blueprints b/blueprint/Blueprints
new file mode 100644
index 0000000000000000000000000000000000000000..84345a3e0a715eb68dce1bdb9ca8763fc270a5db
--- /dev/null
+++ b/blueprint/Blueprints
@@ -0,0 +1,150 @@
+bootstrap_go_package(
+ name = "blueprint",
+ deps = [
+ "blueprint-parser",
+ "blueprint-pathtools",
+ "blueprint-proptools",
+ ],
+ pkgPath = "github.com/google/blueprint",
+ srcs = [
+ "context.go",
+ "live_tracker.go",
+ "mangle.go",
+ "module_ctx.go",
+ "ninja_defs.go",
+ "ninja_strings.go",
+ "ninja_writer.go",
+ "package_ctx.go",
+ "scope.go",
+ "singleton_ctx.go",
+ "unpack.go",
+ ],
+ testSrcs = [
+ "context_test.go",
+ "ninja_strings_test.go",
+ "ninja_writer_test.go",
+ "splice_modules_test.go",
+ "unpack_test.go",
+ ],
+)
+
+bootstrap_go_package(
+ name = "blueprint-parser",
+ pkgPath = "github.com/google/blueprint/parser",
+ srcs = [
+ "parser/modify.go",
+ "parser/parser.go",
+ "parser/printer.go",
+ "parser/sort.go",
+ ],
+ testSrcs = [
+ "parser/parser_test.go",
+ "parser/printer_test.go",
+ ],
+)
+
+bootstrap_go_package(
+ name = "blueprint-deptools",
+ pkgPath = "github.com/google/blueprint/deptools",
+ srcs = ["deptools/depfile.go"],
+)
+
+bootstrap_go_package(
+ name = "blueprint-pathtools",
+ pkgPath = "github.com/google/blueprint/pathtools",
+ srcs = [
+ "pathtools/lists.go",
+ "pathtools/glob.go",
+ ],
+ testSrcs = [
+ "pathtools/glob_test.go",
+ ],
+)
+
+bootstrap_go_package(
+ name = "blueprint-proptools",
+ pkgPath = "github.com/google/blueprint/proptools",
+ srcs = [
+ "proptools/clone.go",
+ "proptools/extend.go",
+ "proptools/proptools.go",
+ "proptools/typeequal.go",
+ ],
+ testSrcs = [
+ "proptools/clone_test.go",
+ "proptools/extend_test.go",
+ "proptools/typeequal_test.go",
+ ],
+)
+
+bootstrap_go_package(
+ name = "blueprint-bootstrap",
+ deps = [
+ "blueprint",
+ "blueprint-deptools",
+ "blueprint-pathtools",
+ "blueprint-bootstrap-bpdoc",
+ ],
+ pkgPath = "github.com/google/blueprint/bootstrap",
+ srcs = [
+ "bootstrap/bootstrap.go",
+ "bootstrap/cleanup.go",
+ "bootstrap/command.go",
+ "bootstrap/config.go",
+ "bootstrap/doc.go",
+ "bootstrap/writedocs.go",
+ ],
+)
+
+bootstrap_go_package(
+ name = "blueprint-bootstrap-bpdoc",
+ deps = [
+ "blueprint",
+ "blueprint-proptools",
+ ],
+ pkgPath = "github.com/google/blueprint/bootstrap/bpdoc",
+ srcs = [
+ "bootstrap/bpdoc/bpdoc.go",
+ ],
+)
+
+bootstrap_core_go_binary(
+ name = "minibp",
+ deps = [
+ "blueprint",
+ "blueprint-bootstrap",
+ ],
+ srcs = ["bootstrap/minibp/main.go"],
+)
+
+bootstrap_go_binary(
+ name = "bpfmt",
+ deps = ["blueprint-parser"],
+ srcs = ["bpfmt/bpfmt.go"],
+)
+
+bootstrap_go_binary(
+ name = "bpmodify",
+ deps = ["blueprint-parser"],
+ srcs = ["bpmodify/bpmodify.go"],
+)
+
+bootstrap_core_go_binary(
+ name = "gotestmain",
+ srcs = ["gotestmain/gotestmain.go"],
+)
+
+bootstrap_core_go_binary(
+ name = "gotestrunner",
+ srcs = ["gotestrunner/gotestrunner.go"],
+)
+
+bootstrap_core_go_binary(
+ name = "choosestage",
+ srcs = ["choosestage/choosestage.go"],
+)
+
+bootstrap_go_binary{
+ name = "loadplugins",
+ srcs = ["loadplugins/loadplugins.go"],
+}
diff --git a/blueprint/CONTRIBUTING.md b/blueprint/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..1ba853922fb804fd5265e877f3e8a4e1d6615448
--- /dev/null
+++ b/blueprint/CONTRIBUTING.md
@@ -0,0 +1,24 @@
+Want to contribute? Great! First, read this page (including the small print at the end).
+
+### Before you contribute
+Before we can use your code, you must sign the
+[Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual?csw=1)
+(CLA), which you can do online. The CLA is necessary mainly because you own the
+copyright to your changes, even after your contribution becomes part of our
+codebase, so we need your permission to use and distribute your code. We also
+need to be sure of various other things—for instance that you'll tell us if you
+know that your code infringes on other people's patents. You don't have to sign
+the CLA until after you've submitted your code for review and a member has
+approved it, but you must do it before we can put your code into our codebase.
+Before you start working on a larger contribution, you should get in touch with
+us first through the issue tracker with your idea so that we can help out and
+possibly guide you. Coordinating up front makes it much easier to avoid
+frustration later on.
+
+### Code reviews
+All submissions, including submissions by project members, require review. We
+use Github pull requests for this purpose.
+
+### The small print
+Contributions made by corporations are covered by a different agreement than
+the one above, the Software Grant and Corporate Contributor License Agreement.
diff --git a/blueprint/LICENSE b/blueprint/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7
--- /dev/null
+++ b/blueprint/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/blueprint/README.md b/blueprint/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b51731aec995429e9a951af9cd3db1cbe4e1efa6
--- /dev/null
+++ b/blueprint/README.md
@@ -0,0 +1,14 @@
+Blueprint Build System
+======================
+[](https://travis-ci.org/google/blueprint)
+
+Blueprint is a meta-build system that reads in Blueprints files that describe
+modules that need to be built, and produces a
+[Ninja](http://martine.github.io/ninja/) manifest describing the commands that
+need to be run and their dependencies. Where most build systems use built-in
+rules or a domain-specific language to describe the logic for converting module
+descriptions to build rules, Blueprint delegates this to per-project build
+logic written in Go. For large, heterogenous projects this allows the inherent
+complexity of the build logic to be maintained in a high-level language, while
+still allowing simple changes to individual modules by modifying easy to
+understand Blueprints files.
diff --git a/blueprint/blueprint.bash b/blueprint/blueprint.bash
new file mode 100755
index 0000000000000000000000000000000000000000..7b6b17a727923365152730989a79c27b08ca1d00
--- /dev/null
+++ b/blueprint/blueprint.bash
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+# This script is intented to wrap the execution of ninja so that we
+# can do some checks before each ninja run.
+#
+# It can either be run with a standalone Blueprint checkout to generate
+# the minibp binary, or can be used by another script as part of a custom
+# Blueprint-based build system. When used by another script, the following
+# environment variables can be set to configure this script, which are
+# documented below:
+#
+# BUILDDIR
+# SKIP_NINJA
+#
+# When run in a standalone Blueprint checkout, bootstrap.bash will install
+# this script into the $BUILDDIR, where it may be executed.
+#
+# For embedding into a custom build system, the current directory when this
+# script executes should be the same directory that $BOOTSTRAP should be
+# called from.
+
+set -e
+
+# BUILDDIR should be set to the path to store build results. By default,
+# this is the directory containing this script, but can be set explicitly
+# if the custom build system only wants to install their own wrapper.
+[ -z "$BUILDDIR" ] && BUILDDIR=`dirname "${BASH_SOURCE[0]}"`
+
+# .blueprint.bootstrap provides saved values from the bootstrap.bash script:
+#
+# BOOTSTRAP
+# BOOTSTRAP_MANIFEST
+#
+# If it doesn't exist, we probably just need to re-run bootstrap.bash, which
+# ninja will do when switching stages. So just skip to ninja.
+if [ -f "${BUILDDIR}/.blueprint.bootstrap" ]; then
+ source "${BUILDDIR}/.blueprint.bootstrap"
+
+ # Pick the newer of .bootstrap/bootstrap.ninja.in or $BOOTSTRAP_MANIFEST,
+ # and copy it to .bootstrap/build.ninja.in
+ GEN_BOOTSTRAP_MANIFEST="${BUILDDIR}/.bootstrap/bootstrap.ninja.in"
+ if [ -f "${GEN_BOOTSTRAP_MANIFEST}" ]; then
+ if [ "${GEN_BOOTSTRAP_MANIFEST}" -nt "${BOOTSTRAP_MANIFEST}" ]; then
+ BOOTSTRAP_MANIFEST="${GEN_BOOTSTRAP_MANIFEST}"
+ fi
+ fi
+
+ # Copy the selected manifest to $BUILDDIR/.bootstrap/build.ninja.in
+ mkdir -p "${BUILDDIR}/.bootstrap"
+ cp "${BOOTSTRAP_MANIFEST}" "${BUILDDIR}/.bootstrap/build.ninja.in"
+
+ # Bootstrap it to $BUILDDIR/build.ninja
+ "${BOOTSTRAP}" -i "${BUILDDIR}/.bootstrap/build.ninja.in"
+fi
+
+# SKIP_NINJA can be used by wrappers that wish to run ninja themselves.
+if [ -z "$SKIP_NINJA" ]; then
+ ninja -C "${BUILDDIR}" "$@"
+else
+ exit 0
+fi
diff --git a/blueprint/bootstrap.bash b/blueprint/bootstrap.bash
new file mode 100755
index 0000000000000000000000000000000000000000..6e5865161f4007359992560d31061a77cf92f988
--- /dev/null
+++ b/blueprint/bootstrap.bash
@@ -0,0 +1,145 @@
+#!/bin/bash
+
+# This script serves two purposes. First, it can bootstrap the standalone
+# Blueprint to generate the minibp binary. To do this simply run the script
+# with no arguments from the desired build directory.
+#
+# It can also be invoked from another script to bootstrap a custom Blueprint-
+# based build system. To do this, the invoking script must first set some or
+# all of the following environment variables, which are documented below where
+# their default values are set:
+#
+# BOOTSTRAP
+# WRAPPER
+# SRCDIR
+# BUILDDIR
+# BOOTSTRAP_MANIFEST
+# GOROOT
+# GOOS
+# GOARCH
+# GOCHAR
+#
+# The invoking script should then run this script, passing along all of its
+# command line arguments.
+
+set -e
+
+EXTRA_ARGS=""
+
+# BOOTSTRAP should be set to the path of the bootstrap script. It can be
+# either an absolute path or one relative to the build directory (which of
+# these is used should probably match what's used for SRCDIR).
+if [ -z "$BOOTSTRAP" ]; then
+ BOOTSTRAP="${BASH_SOURCE[0]}"
+
+ # WRAPPER should only be set if you want a ninja wrapper script to be
+ # installed into the builddir. It is set to blueprint's blueprint.bash
+ # only if BOOTSTRAP and WRAPPER are unset.
+ [ -z "$WRAPPER" ] && WRAPPER="`dirname "${BOOTSTRAP}"`/blueprint.bash"
+fi
+
+# SRCDIR should be set to the path of the root source directory. It can be
+# either an absolute path or a path relative to the build directory. Whether
+# its an absolute or relative path determines whether the build directory can
+# be moved relative to or along with the source directory without re-running
+# the bootstrap script.
+[ -z "$SRCDIR" ] && SRCDIR=`dirname "${BOOTSTRAP}"`
+
+# BUILDDIR should be set to the path to store build results. By default, this
+# is the current directory, but it may be set to an absolute or relative path.
+[ -z "$BUILDDIR" ] && BUILDDIR=.
+
+# TOPNAME should be set to the name of the top-level Blueprints file
+[ -z "$TOPNAME" ] && TOPNAME="Blueprints"
+
+# BOOTSTRAP_MANIFEST is the path to the bootstrap Ninja file that is part of
+# the source tree. It is used to bootstrap a build output directory from when
+# the script is run manually by a user.
+[ -z "$BOOTSTRAP_MANIFEST" ] && BOOTSTRAP_MANIFEST="${SRCDIR}/build.ninja.in"
+
+# These variables should be set by auto-detecting or knowing a priori the host
+# Go toolchain properties.
+[ -z "$GOROOT" ] && GOROOT=`go env GOROOT`
+[ -z "$GOOS" ] && GOOS=`go env GOHOSTOS`
+[ -z "$GOARCH" ] && GOARCH=`go env GOHOSTARCH`
+[ -z "$GOCHAR" ] && GOCHAR=`go env GOCHAR`
+
+# If RUN_TESTS is set, behave like -t was passed in as an option.
+[ ! -z "$RUN_TESTS" ] && EXTRA_ARGS="$EXTRA_ARGS -t"
+
+GOTOOLDIR="$GOROOT/pkg/tool/${GOOS}_$GOARCH"
+GOCOMPILE="$GOTOOLDIR/${GOCHAR}g"
+GOLINK="$GOTOOLDIR/${GOCHAR}l"
+
+if [ ! -f $GOCOMPILE ]; then
+ GOCOMPILE="$GOTOOLDIR/compile"
+fi
+if [ ! -f $GOLINK ]; then
+ GOLINK="$GOTOOLDIR/link"
+fi
+if [[ ! -f $GOCOMPILE || ! -f $GOLINK ]]; then
+ echo "Cannot find go tools under $GOROOT"
+ exit 1
+fi
+
+usage() {
+ echo "Usage of ${BOOTSTRAP}:"
+ echo " -h: print a help message and exit"
+ echo " -r: regenerate ${BOOTSTRAP_MANIFEST}"
+ echo " -t: include tests when regenerating manifest"
+}
+
+# Parse the command line flags.
+IN="$BOOTSTRAP_MANIFEST"
+REGEN_BOOTSTRAP_MANIFEST=false
+while getopts ":b:hi:rt" opt; do
+ case $opt in
+ b) BUILDDIR="$OPTARG";;
+ h)
+ usage
+ exit 1
+ ;;
+ i) IN="$OPTARG";;
+ r) REGEN_BOOTSTRAP_MANIFEST=true;;
+ t) EXTRA_ARGS="$EXTRA_ARGS -t";;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2
+ usage
+ exit 1
+ ;;
+ :)
+ echo "Option -$OPTARG requires an argument." >&2
+ exit 1
+ ;;
+ esac
+done
+
+if [ $REGEN_BOOTSTRAP_MANIFEST = true ]; then
+ # This assumes that the script is being run from a build output directory
+ # that has been built in the past.
+ if [ -x $BUILDDIR/.bootstrap/bin/minibp ]; then
+ echo "Regenerating $BOOTSTRAP_MANIFEST"
+ $BUILDDIR/.bootstrap/bin/minibp $EXTRA_ARGS -o $BOOTSTRAP_MANIFEST $SRCDIR/$TOPNAME
+ else
+ echo "Executable minibp not found at $BUILDDIR/.bootstrap/bin/minibp" >&2
+ exit 1
+ fi
+fi
+
+mkdir -p $BUILDDIR
+
+sed -e "s|@@SrcDir@@|$SRCDIR|g" \
+ -e "s|@@BuildDir@@|$BUILDDIR|g" \
+ -e "s|@@GoRoot@@|$GOROOT|g" \
+ -e "s|@@GoCompile@@|$GOCOMPILE|g" \
+ -e "s|@@GoLink@@|$GOLINK|g" \
+ -e "s|@@Bootstrap@@|$BOOTSTRAP|g" \
+ -e "s|@@BootstrapManifest@@|$BOOTSTRAP_MANIFEST|g" \
+ $IN > $BUILDDIR/build.ninja
+
+echo "BOOTSTRAP=\"${BOOTSTRAP}\"" > $BUILDDIR/.blueprint.bootstrap
+echo "BOOTSTRAP_MANIFEST=\"${BOOTSTRAP_MANIFEST}\"" >> $BUILDDIR/.blueprint.bootstrap
+
+if [ ! -z "$WRAPPER" ]; then
+ cp $WRAPPER $BUILDDIR/
+fi
diff --git a/blueprint/bootstrap/bootstrap.go b/blueprint/bootstrap/bootstrap.go
new file mode 100644
index 0000000000000000000000000000000000000000..c952c850ff63ba704f0852a602de14c0e7eaa3fd
--- /dev/null
+++ b/blueprint/bootstrap/bootstrap.go
@@ -0,0 +1,1001 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bootstrap
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "github.com/google/blueprint"
+ "github.com/google/blueprint/pathtools"
+)
+
+const bootstrapSubDir = ".bootstrap"
+const miniBootstrapSubDir = ".minibootstrap"
+
+var (
+ pctx = blueprint.NewPackageContext("github.com/google/blueprint/bootstrap")
+
+ goTestMainCmd = pctx.StaticVariable("goTestMainCmd", filepath.Join(bootstrapDir, "bin", "gotestmain"))
+ goTestRunnerCmd = pctx.StaticVariable("goTestRunnerCmd", filepath.Join(bootstrapDir, "bin", "gotestrunner"))
+ chooseStageCmd = pctx.StaticVariable("chooseStageCmd", filepath.Join(bootstrapDir, "bin", "choosestage"))
+ pluginGenSrcCmd = pctx.StaticVariable("pluginGenSrcCmd", filepath.Join(bootstrapDir, "bin", "loadplugins"))
+
+ compile = pctx.StaticRule("compile",
+ blueprint.RuleParams{
+ Command: "GOROOT='$goRoot' $compileCmd -o $out -p $pkgPath -complete " +
+ "$incFlags -pack $in",
+ CommandDeps: []string{"$compileCmd"},
+ Description: "compile $out",
+ },
+ "pkgPath", "incFlags")
+
+ link = pctx.StaticRule("link",
+ blueprint.RuleParams{
+ Command: "GOROOT='$goRoot' $linkCmd -o $out $libDirFlags $in",
+ CommandDeps: []string{"$linkCmd"},
+ Description: "link $out",
+ },
+ "libDirFlags")
+
+ goTestMain = pctx.StaticRule("gotestmain",
+ blueprint.RuleParams{
+ Command: "$goTestMainCmd -o $out -pkg $pkg $in",
+ CommandDeps: []string{"$goTestMainCmd"},
+ Description: "gotestmain $out",
+ },
+ "pkg")
+
+ pluginGenSrc = pctx.StaticRule("pluginGenSrc",
+ blueprint.RuleParams{
+ Command: "$pluginGenSrcCmd -o $out -p $pkg $plugins",
+ CommandDeps: []string{"$pluginGenSrcCmd"},
+ Description: "create $out",
+ },
+ "pkg", "plugins")
+
+ test = pctx.StaticRule("test",
+ blueprint.RuleParams{
+ Command: "$goTestRunnerCmd -p $pkgSrcDir -f $out -- $in -test.short",
+ CommandDeps: []string{"$goTestRunnerCmd"},
+ Description: "test $pkg",
+ },
+ "pkg", "pkgSrcDir")
+
+ cp = pctx.StaticRule("cp",
+ blueprint.RuleParams{
+ Command: "cp $in $out",
+ Description: "cp $out",
+ },
+ "generator")
+
+ bootstrap = pctx.StaticRule("bootstrap",
+ blueprint.RuleParams{
+ Command: "BUILDDIR=$buildDir $bootstrapCmd -i $in",
+ CommandDeps: []string{"$bootstrapCmd"},
+ Description: "bootstrap $in",
+ Generator: true,
+ })
+
+ chooseStage = pctx.StaticRule("chooseStage",
+ blueprint.RuleParams{
+ Command: "$chooseStageCmd --current $current --bootstrap $bootstrapManifest -o $out $in",
+ CommandDeps: []string{"$chooseStageCmd", "$bootstrapManifest"},
+ Description: "choosing next stage",
+ },
+ "current", "generator")
+
+ touch = pctx.StaticRule("touch",
+ blueprint.RuleParams{
+ Command: "touch $out",
+ Description: "touch $out",
+ },
+ "depfile", "generator")
+
+ // Work around a Ninja issue. See https://github.com/martine/ninja/pull/634
+ phony = pctx.StaticRule("phony",
+ blueprint.RuleParams{
+ Command: "# phony $out",
+ Description: "phony $out",
+ Generator: true,
+ },
+ "depfile")
+
+ binDir = pctx.StaticVariable("BinDir", filepath.Join(bootstrapDir, "bin"))
+ minibpFile = filepath.Join("$BinDir", "minibp")
+
+ docsDir = filepath.Join(bootstrapDir, "docs")
+
+ bootstrapDir = filepath.Join("$buildDir", bootstrapSubDir)
+ miniBootstrapDir = filepath.Join("$buildDir", miniBootstrapSubDir)
+)
+
+type bootstrapGoCore interface {
+ BuildStage() Stage
+ SetBuildStage(Stage)
+}
+
+func propagateStageBootstrap(mctx blueprint.TopDownMutatorContext) {
+ if mod, ok := mctx.Module().(bootstrapGoCore); !ok || mod.BuildStage() != StageBootstrap {
+ return
+ }
+
+ mctx.VisitDirectDeps(func(mod blueprint.Module) {
+ if m, ok := mod.(bootstrapGoCore); ok {
+ m.SetBuildStage(StageBootstrap)
+ }
+ })
+}
+
+func pluginDeps(ctx blueprint.BottomUpMutatorContext) {
+ if pkg, ok := ctx.Module().(*goPackage); ok {
+ for _, plugin := range pkg.properties.PluginFor {
+ ctx.AddReverseDependency(ctx.Module(), plugin)
+ }
+ }
+}
+
+type goPackageProducer interface {
+ GoPkgRoot() string
+ GoPackageTarget() string
+}
+
+func isGoPackageProducer(module blueprint.Module) bool {
+ _, ok := module.(goPackageProducer)
+ return ok
+}
+
+type goTestProducer interface {
+ GoTestTarget() string
+ BuildStage() Stage
+}
+
+func isGoTestProducer(module blueprint.Module) bool {
+ _, ok := module.(goTestProducer)
+ return ok
+}
+
+type goPluginProvider interface {
+ GoPkgPath() string
+ IsPluginFor(string) bool
+}
+
+func isGoPluginFor(name string) func(blueprint.Module) bool {
+ return func(module blueprint.Module) bool {
+ if plugin, ok := module.(goPluginProvider); ok {
+ return plugin.IsPluginFor(name)
+ }
+ return false
+ }
+}
+
+func isBootstrapModule(module blueprint.Module) bool {
+ _, isPackage := module.(*goPackage)
+ _, isBinary := module.(*goBinary)
+ return isPackage || isBinary
+}
+
+func isBootstrapBinaryModule(module blueprint.Module) bool {
+ _, isBinary := module.(*goBinary)
+ return isBinary
+}
+
+// A goPackage is a module for building Go packages.
+type goPackage struct {
+ properties struct {
+ PkgPath string
+ Srcs []string
+ TestSrcs []string
+ PluginFor []string
+ }
+
+ // The root dir in which the package .a file is located. The full .a file
+ // path will be "packageRoot/PkgPath.a"
+ pkgRoot string
+
+ // The path of the .a file that is to be built.
+ archiveFile string
+
+ // The path of the test .a file that is to be built.
+ testArchiveFile string
+
+ // The bootstrap Config
+ config *Config
+
+ // The stage in which this module should be built
+ buildStage Stage
+}
+
+var _ goPackageProducer = (*goPackage)(nil)
+
+func newGoPackageModuleFactory(config *Config) func() (blueprint.Module, []interface{}) {
+ return func() (blueprint.Module, []interface{}) {
+ module := &goPackage{
+ buildStage: StagePrimary,
+ config: config,
+ }
+ return module, []interface{}{&module.properties}
+ }
+}
+
+func (g *goPackage) GoPkgPath() string {
+ return g.properties.PkgPath
+}
+
+func (g *goPackage) GoPkgRoot() string {
+ return g.pkgRoot
+}
+
+func (g *goPackage) GoPackageTarget() string {
+ return g.archiveFile
+}
+
+func (g *goPackage) GoTestTarget() string {
+ return g.testArchiveFile
+}
+
+func (g *goPackage) BuildStage() Stage {
+ return g.buildStage
+}
+
+func (g *goPackage) SetBuildStage(buildStage Stage) {
+ g.buildStage = buildStage
+}
+
+func (g *goPackage) IsPluginFor(name string) bool {
+ for _, plugin := range g.properties.PluginFor {
+ if plugin == name {
+ return true
+ }
+ }
+ return false
+}
+
+func (g *goPackage) GenerateBuildActions(ctx blueprint.ModuleContext) {
+ var (
+ name = ctx.ModuleName()
+ hasPlugins = false
+ pluginSrc = ""
+ genSrcs = []string{}
+ )
+
+ if g.properties.PkgPath == "" {
+ ctx.ModuleErrorf("module %s did not specify a valid pkgPath", name)
+ return
+ }
+
+ g.pkgRoot = packageRoot(ctx)
+ g.archiveFile = filepath.Join(g.pkgRoot,
+ filepath.FromSlash(g.properties.PkgPath)+".a")
+ if len(g.properties.TestSrcs) > 0 && g.config.runGoTests {
+ g.testArchiveFile = filepath.Join(testRoot(ctx),
+ filepath.FromSlash(g.properties.PkgPath)+".a")
+ }
+
+ ctx.VisitDepsDepthFirstIf(isGoPluginFor(name),
+ func(module blueprint.Module) { hasPlugins = true })
+ if hasPlugins {
+ pluginSrc = filepath.Join(moduleGenSrcDir(ctx), "plugin.go")
+ genSrcs = append(genSrcs, pluginSrc)
+ }
+
+ // We only actually want to build the builder modules if we're running as
+ // minibp (i.e. we're generating a bootstrap Ninja file). This is to break
+ // the circular dependence that occurs when the builder requires a new Ninja
+ // file to be built, but building a new ninja file requires the builder to
+ // be built.
+ if g.config.stage == g.BuildStage() {
+ var deps []string
+
+ if hasPlugins && !buildGoPluginLoader(ctx, g.properties.PkgPath, pluginSrc, g.config.stage) {
+ return
+ }
+
+ if g.config.runGoTests {
+ deps = buildGoTest(ctx, testRoot(ctx), g.testArchiveFile,
+ g.properties.PkgPath, g.properties.Srcs, genSrcs,
+ g.properties.TestSrcs)
+ }
+
+ buildGoPackage(ctx, g.pkgRoot, g.properties.PkgPath, g.archiveFile,
+ g.properties.Srcs, genSrcs, deps)
+ } else if g.config.stage != StageBootstrap {
+ if len(g.properties.TestSrcs) > 0 && g.config.runGoTests {
+ phonyGoTarget(ctx, g.testArchiveFile, g.properties.TestSrcs, nil, nil)
+ }
+ phonyGoTarget(ctx, g.archiveFile, g.properties.Srcs, genSrcs, nil)
+ }
+}
+
+// A goBinary is a module for building executable binaries from Go sources.
+type goBinary struct {
+ properties struct {
+ Srcs []string
+ TestSrcs []string
+ PrimaryBuilder bool
+ }
+
+ // The path of the test .a file that is to be built.
+ testArchiveFile string
+
+ // The bootstrap Config
+ config *Config
+
+ // The stage in which this module should be built
+ buildStage Stage
+}
+
+func newGoBinaryModuleFactory(config *Config, buildStage Stage) func() (blueprint.Module, []interface{}) {
+ return func() (blueprint.Module, []interface{}) {
+ module := &goBinary{
+ config: config,
+ buildStage: buildStage,
+ }
+ return module, []interface{}{&module.properties}
+ }
+}
+
+func (g *goBinary) GoTestTarget() string {
+ return g.testArchiveFile
+}
+
+func (g *goBinary) BuildStage() Stage {
+ return g.buildStage
+}
+
+func (g *goBinary) SetBuildStage(buildStage Stage) {
+ g.buildStage = buildStage
+}
+
+func (g *goBinary) GenerateBuildActions(ctx blueprint.ModuleContext) {
+ var (
+ name = ctx.ModuleName()
+ objDir = moduleObjDir(ctx)
+ archiveFile = filepath.Join(objDir, name+".a")
+ aoutFile = filepath.Join(objDir, "a.out")
+ binaryFile = filepath.Join("$BinDir", name)
+ hasPlugins = false
+ pluginSrc = ""
+ genSrcs = []string{}
+ )
+
+ if len(g.properties.TestSrcs) > 0 && g.config.runGoTests {
+ g.testArchiveFile = filepath.Join(testRoot(ctx), name+".a")
+ }
+
+ ctx.VisitDepsDepthFirstIf(isGoPluginFor(name),
+ func(module blueprint.Module) { hasPlugins = true })
+ if hasPlugins {
+ pluginSrc = filepath.Join(moduleGenSrcDir(ctx), "plugin.go")
+ genSrcs = append(genSrcs, pluginSrc)
+ }
+
+ // We only actually want to build the builder modules if we're running as
+ // minibp (i.e. we're generating a bootstrap Ninja file). This is to break
+ // the circular dependence that occurs when the builder requires a new Ninja
+ // file to be built, but building a new ninja file requires the builder to
+ // be built.
+ if g.config.stage == g.BuildStage() {
+ var deps []string
+
+ if hasPlugins && !buildGoPluginLoader(ctx, "main", pluginSrc, g.config.stage) {
+ return
+ }
+
+ if g.config.runGoTests {
+ deps = buildGoTest(ctx, testRoot(ctx), g.testArchiveFile,
+ name, g.properties.Srcs, genSrcs, g.properties.TestSrcs)
+ }
+
+ buildGoPackage(ctx, objDir, name, archiveFile, g.properties.Srcs, genSrcs, deps)
+
+ var libDirFlags []string
+ ctx.VisitDepsDepthFirstIf(isGoPackageProducer,
+ func(module blueprint.Module) {
+ dep := module.(goPackageProducer)
+ libDir := dep.GoPkgRoot()
+ libDirFlags = append(libDirFlags, "-L "+libDir)
+ })
+
+ linkArgs := map[string]string{}
+ if len(libDirFlags) > 0 {
+ linkArgs["libDirFlags"] = strings.Join(libDirFlags, " ")
+ }
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: link,
+ Outputs: []string{aoutFile},
+ Inputs: []string{archiveFile},
+ Args: linkArgs,
+ })
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: cp,
+ Outputs: []string{binaryFile},
+ Inputs: []string{aoutFile},
+ })
+ } else if g.config.stage != StageBootstrap {
+ if len(g.properties.TestSrcs) > 0 && g.config.runGoTests {
+ phonyGoTarget(ctx, g.testArchiveFile, g.properties.TestSrcs, nil, nil)
+ }
+
+ intermediates := []string{aoutFile, archiveFile}
+ phonyGoTarget(ctx, binaryFile, g.properties.Srcs, genSrcs, intermediates)
+ }
+}
+
+func buildGoPluginLoader(ctx blueprint.ModuleContext, pkgPath, pluginSrc string, stage Stage) bool {
+ ret := true
+ name := ctx.ModuleName()
+
+ var pluginPaths []string
+ ctx.VisitDepsDepthFirstIf(isGoPluginFor(name),
+ func(module blueprint.Module) {
+ plugin := module.(goPluginProvider)
+ pluginPaths = append(pluginPaths, plugin.GoPkgPath())
+ if stage == StageBootstrap {
+ ctx.OtherModuleErrorf(module, "plugin %q may not be included in core module %q",
+ ctx.OtherModuleName(module), name)
+ ret = false
+ }
+ })
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: pluginGenSrc,
+ Outputs: []string{pluginSrc},
+ Args: map[string]string{
+ "pkg": pkgPath,
+ "plugins": strings.Join(pluginPaths, " "),
+ },
+ })
+
+ return ret
+}
+
+func buildGoPackage(ctx blueprint.ModuleContext, pkgRoot string,
+ pkgPath string, archiveFile string, srcs []string, genSrcs []string, orderDeps []string) {
+
+ srcDir := moduleSrcDir(ctx)
+ srcFiles := pathtools.PrefixPaths(srcs, srcDir)
+ srcFiles = append(srcFiles, genSrcs...)
+
+ var incFlags []string
+ var deps []string
+ ctx.VisitDepsDepthFirstIf(isGoPackageProducer,
+ func(module blueprint.Module) {
+ dep := module.(goPackageProducer)
+ incDir := dep.GoPkgRoot()
+ target := dep.GoPackageTarget()
+ incFlags = append(incFlags, "-I "+incDir)
+ deps = append(deps, target)
+ })
+
+ compileArgs := map[string]string{
+ "pkgPath": pkgPath,
+ }
+
+ if len(incFlags) > 0 {
+ compileArgs["incFlags"] = strings.Join(incFlags, " ")
+ }
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: compile,
+ Outputs: []string{archiveFile},
+ Inputs: srcFiles,
+ OrderOnly: orderDeps,
+ Implicits: deps,
+ Args: compileArgs,
+ })
+}
+
+func buildGoTest(ctx blueprint.ModuleContext, testRoot, testPkgArchive,
+ pkgPath string, srcs, genSrcs, testSrcs []string) []string {
+
+ if len(testSrcs) == 0 {
+ return nil
+ }
+
+ srcDir := moduleSrcDir(ctx)
+ testFiles := pathtools.PrefixPaths(testSrcs, srcDir)
+
+ mainFile := filepath.Join(testRoot, "test.go")
+ testArchive := filepath.Join(testRoot, "test.a")
+ testFile := filepath.Join(testRoot, "test")
+ testPassed := filepath.Join(testRoot, "test.passed")
+
+ buildGoPackage(ctx, testRoot, pkgPath, testPkgArchive,
+ append(srcs, testSrcs...), genSrcs, nil)
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: goTestMain,
+ Outputs: []string{mainFile},
+ Inputs: testFiles,
+ Args: map[string]string{
+ "pkg": pkgPath,
+ },
+ })
+
+ libDirFlags := []string{"-L " + testRoot}
+ ctx.VisitDepsDepthFirstIf(isGoPackageProducer,
+ func(module blueprint.Module) {
+ dep := module.(goPackageProducer)
+ libDir := dep.GoPkgRoot()
+ libDirFlags = append(libDirFlags, "-L "+libDir)
+ })
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: compile,
+ Outputs: []string{testArchive},
+ Inputs: []string{mainFile},
+ Implicits: []string{testPkgArchive},
+ Args: map[string]string{
+ "pkgPath": "main",
+ "incFlags": "-I " + testRoot,
+ },
+ })
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: link,
+ Outputs: []string{testFile},
+ Inputs: []string{testArchive},
+ Args: map[string]string{
+ "libDirFlags": strings.Join(libDirFlags, " "),
+ },
+ })
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: test,
+ Outputs: []string{testPassed},
+ Inputs: []string{testFile},
+ Args: map[string]string{
+ "pkg": pkgPath,
+ "pkgSrcDir": filepath.Dir(testFiles[0]),
+ },
+ })
+
+ return []string{testPassed}
+}
+
+func phonyGoTarget(ctx blueprint.ModuleContext, target string, srcs []string,
+ gensrcs []string, intermediates []string) {
+
+ var depTargets []string
+ ctx.VisitDepsDepthFirstIf(isGoPackageProducer,
+ func(module blueprint.Module) {
+ dep := module.(goPackageProducer)
+ target := dep.GoPackageTarget()
+ depTargets = append(depTargets, target)
+ })
+
+ moduleDir := ctx.ModuleDir()
+ srcs = pathtools.PrefixPaths(srcs, filepath.Join("$srcDir", moduleDir))
+ srcs = append(srcs, gensrcs...)
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: phony,
+ Outputs: []string{target},
+ Inputs: srcs,
+ Implicits: depTargets,
+ })
+
+ // If one of the source files gets deleted or renamed that will prevent the
+ // re-bootstrapping happening because it depends on the missing source file.
+ // To get around this we add a build statement using the built-in phony rule
+ // for each source file, which will cause Ninja to treat it as dirty if its
+ // missing.
+ for _, src := range srcs {
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: blueprint.Phony,
+ Outputs: []string{src},
+ })
+ }
+
+ // If there is no rule to build the intermediate files of a bootstrap go package
+ // the cleanup phase of the primary builder will delete the intermediate files,
+ // forcing an unnecessary rebuild. Add phony rules for all of them.
+ for _, intermediate := range intermediates {
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: blueprint.Phony,
+ Outputs: []string{intermediate},
+ })
+ }
+
+}
+
+type singleton struct {
+ // The bootstrap Config
+ config *Config
+}
+
+func newSingletonFactory(config *Config) func() blueprint.Singleton {
+ return func() blueprint.Singleton {
+ return &singleton{
+ config: config,
+ }
+ }
+}
+
+func (s *singleton) GenerateBuildActions(ctx blueprint.SingletonContext) {
+ // Find the module that's marked as the "primary builder", which means it's
+ // creating the binary that we'll use to generate the non-bootstrap
+ // build.ninja file.
+ var primaryBuilders []*goBinary
+ // rebootstrapDeps contains modules that will be built in StageBootstrap
+ var rebootstrapDeps []string
+ // primaryRebootstrapDeps contains modules that will be built in StagePrimary
+ var primaryRebootstrapDeps []string
+ ctx.VisitAllModulesIf(isBootstrapBinaryModule,
+ func(module blueprint.Module) {
+ binaryModule := module.(*goBinary)
+ binaryModuleName := ctx.ModuleName(binaryModule)
+ binaryModulePath := filepath.Join("$BinDir", binaryModuleName)
+
+ if binaryModule.BuildStage() == StageBootstrap {
+ rebootstrapDeps = append(rebootstrapDeps, binaryModulePath)
+ } else {
+ primaryRebootstrapDeps = append(primaryRebootstrapDeps, binaryModulePath)
+ }
+ if binaryModule.properties.PrimaryBuilder {
+ primaryBuilders = append(primaryBuilders, binaryModule)
+ }
+ })
+
+ var primaryBuilderName, primaryBuilderExtraFlags string
+ switch len(primaryBuilders) {
+ case 0:
+ // If there's no primary builder module then that means we'll use minibp
+ // as the primary builder. We can trigger its primary builder mode with
+ // the -p flag.
+ primaryBuilderName = "minibp"
+ primaryBuilderExtraFlags = "-p"
+
+ case 1:
+ primaryBuilderName = ctx.ModuleName(primaryBuilders[0])
+
+ default:
+ ctx.Errorf("multiple primary builder modules present:")
+ for _, primaryBuilder := range primaryBuilders {
+ ctx.ModuleErrorf(primaryBuilder, "<-- module %s",
+ ctx.ModuleName(primaryBuilder))
+ }
+ return
+ }
+
+ primaryBuilderFile := filepath.Join("$BinDir", primaryBuilderName)
+
+ if s.config.runGoTests {
+ primaryBuilderExtraFlags += " -t"
+ }
+
+ // Get the filename of the top-level Blueprints file to pass to minibp.
+ topLevelBlueprints := filepath.Join("$srcDir",
+ filepath.Base(s.config.topLevelBlueprintsFile))
+
+ rebootstrapDeps = append(rebootstrapDeps, topLevelBlueprints)
+ primaryRebootstrapDeps = append(primaryRebootstrapDeps, topLevelBlueprints)
+
+ mainNinjaFile := filepath.Join(bootstrapDir, "main.ninja.in")
+ mainNinjaTimestampFile := mainNinjaFile + ".timestamp"
+ mainNinjaTimestampDepFile := mainNinjaTimestampFile + ".d"
+ primaryBuilderNinjaFile := filepath.Join(bootstrapDir, "primary.ninja.in")
+ primaryBuilderNinjaTimestampFile := primaryBuilderNinjaFile + ".timestamp"
+ primaryBuilderNinjaTimestampDepFile := primaryBuilderNinjaTimestampFile + ".d"
+ bootstrapNinjaFile := filepath.Join(bootstrapDir, "bootstrap.ninja.in")
+ docsFile := filepath.Join(docsDir, primaryBuilderName+".html")
+
+ primaryRebootstrapDeps = append(primaryRebootstrapDeps, docsFile)
+
+ // If the tests change, be sure to re-run them. These need to be
+ // dependencies for the ninja file so that it's updated after these
+ // run. Otherwise we'd never leave the bootstrap stage, since the
+ // timestamp file would be newer than the ninja file.
+ ctx.VisitAllModulesIf(isGoTestProducer,
+ func(module blueprint.Module) {
+ testModule := module.(goTestProducer)
+ target := testModule.GoTestTarget()
+ if target != "" {
+ if testModule.BuildStage() == StageBootstrap {
+ rebootstrapDeps = append(rebootstrapDeps, target)
+ } else {
+ primaryRebootstrapDeps = append(primaryRebootstrapDeps, target)
+ }
+ }
+ })
+
+ switch s.config.stage {
+ case StageBootstrap:
+ // We're generating a bootstrapper Ninja file, so we need to set things
+ // up to rebuild the build.ninja file using the primary builder.
+
+ // BuildDir must be different between the three stages, otherwise the
+ // cleanup process will remove files from the other builds.
+ ctx.SetNinjaBuildDir(pctx, miniBootstrapDir)
+
+ // Generate the Ninja file to build the primary builder. Save the
+ // timestamps and deps, so that we can come back to this stage if
+ // it needs to be regenerated.
+ primarybp := ctx.Rule(pctx, "primarybp",
+ blueprint.RuleParams{
+ Command: fmt.Sprintf("%s --build-primary $runTests -m $bootstrapManifest "+
+ "--timestamp $timestamp --timestampdep $timestampdep "+
+ "-b $buildDir -d $outfile.d -o $outfile $in", minibpFile),
+ Description: "minibp $outfile",
+ Depfile: "$outfile.d",
+ },
+ "runTests", "timestamp", "timestampdep", "outfile")
+
+ args := map[string]string{
+ "outfile": primaryBuilderNinjaFile,
+ "timestamp": primaryBuilderNinjaTimestampFile,
+ "timestampdep": primaryBuilderNinjaTimestampDepFile,
+ }
+
+ if s.config.runGoTests {
+ args["runTests"] = "-t"
+ }
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: primarybp,
+ Outputs: []string{primaryBuilderNinjaFile, primaryBuilderNinjaTimestampFile},
+ Inputs: []string{topLevelBlueprints},
+ Implicits: rebootstrapDeps,
+ Args: args,
+ })
+
+ // Rebuild the bootstrap Ninja file using the minibp that we just built.
+ // If this produces a difference, choosestage will retrigger this stage.
+ minibp := ctx.Rule(pctx, "minibp",
+ blueprint.RuleParams{
+ Command: fmt.Sprintf("%s $runTests -m $bootstrapManifest "+
+ "-b $buildDir -d $out.d -o $out $in", minibpFile),
+ // $bootstrapManifest is here so that when it is updated, we
+ // force a rebuild of bootstrap.ninja.in. chooseStage should
+ // have already copied the new version over, but kept the old
+ // timestamps to force this regeneration.
+ CommandDeps: []string{"$bootstrapManifest", minibpFile},
+ Description: "minibp $out",
+ Generator: true,
+ Depfile: "$out.d",
+ },
+ "runTests")
+
+ args = map[string]string{}
+
+ if s.config.runGoTests {
+ args["runTests"] = "-t"
+ }
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: minibp,
+ Outputs: []string{bootstrapNinjaFile},
+ Inputs: []string{topLevelBlueprints},
+ Args: args,
+ })
+
+ // When the current build.ninja file is a bootstrapper, we always want
+ // to have it replace itself with a non-bootstrapper build.ninja. To
+ // accomplish that we depend on a file that should never exist and
+ // "build" it using Ninja's built-in phony rule.
+ notAFile := filepath.Join(bootstrapDir, "notAFile")
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: blueprint.Phony,
+ Outputs: []string{notAFile},
+ })
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: chooseStage,
+ Outputs: []string{filepath.Join(bootstrapDir, "build.ninja.in")},
+ Inputs: []string{bootstrapNinjaFile, primaryBuilderNinjaFile},
+ Implicits: []string{notAFile},
+ Args: map[string]string{
+ "current": bootstrapNinjaFile,
+ },
+ })
+
+ case StagePrimary:
+ // We're generating a bootstrapper Ninja file, so we need to set things
+ // up to rebuild the build.ninja file using the primary builder.
+
+ // BuildDir must be different between the three stages, otherwise the
+ // cleanup process will remove files from the other builds.
+ ctx.SetNinjaBuildDir(pctx, bootstrapDir)
+
+ // We generate the depfile here that includes the dependencies for all
+ // the Blueprints files that contribute to generating the big build
+ // manifest (build.ninja file). This depfile will be used by the non-
+ // bootstrap build manifest to determine whether it should touch the
+ // timestamp file to trigger a re-bootstrap.
+ bigbp := ctx.Rule(pctx, "bigbp",
+ blueprint.RuleParams{
+ Command: fmt.Sprintf("%s %s -m $bootstrapManifest "+
+ "--timestamp $timestamp --timestampdep $timestampdep "+
+ "-b $buildDir -d $outfile.d -o $outfile $in", primaryBuilderFile,
+ primaryBuilderExtraFlags),
+ Description: fmt.Sprintf("%s $outfile", primaryBuilderName),
+ Depfile: "$outfile.d",
+ },
+ "timestamp", "timestampdep", "outfile")
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: bigbp,
+ Outputs: []string{mainNinjaFile, mainNinjaTimestampFile},
+ Inputs: []string{topLevelBlueprints},
+ Implicits: primaryRebootstrapDeps,
+ Args: map[string]string{
+ "timestamp": mainNinjaTimestampFile,
+ "timestampdep": mainNinjaTimestampDepFile,
+ "outfile": mainNinjaFile,
+ },
+ })
+
+ // Generate build system docs for the primary builder. Generating docs reads the source
+ // files used to build the primary builder, but that dependency will be picked up through
+ // the dependency on the primary builder itself. There are no dependencies on the
+ // Blueprints files, as any relevant changes to the Blueprints files would have caused
+ // a rebuild of the primary builder.
+ bigbpDocs := ctx.Rule(pctx, "bigbpDocs",
+ blueprint.RuleParams{
+ Command: fmt.Sprintf("%s %s -b $buildDir --docs $out %s", primaryBuilderFile,
+ primaryBuilderExtraFlags, topLevelBlueprints),
+ CommandDeps: []string{primaryBuilderFile},
+ Description: fmt.Sprintf("%s docs $out", primaryBuilderName),
+ })
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: bigbpDocs,
+ Outputs: []string{docsFile},
+ })
+
+ // Detect whether we need to rebuild the primary stage by going back to
+ // the bootstrapper. If this is newer than the primaryBuilderNinjaFile,
+ // then chooseStage will trigger a rebuild of primaryBuilderNinjaFile by
+ // returning to the bootstrap stage.
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: touch,
+ Outputs: []string{primaryBuilderNinjaTimestampFile},
+ Implicits: rebootstrapDeps,
+ Args: map[string]string{
+ "depfile": primaryBuilderNinjaTimestampDepFile,
+ "generator": "true",
+ },
+ })
+
+ // When the current build.ninja file is a bootstrapper, we always want
+ // to have it replace itself with a non-bootstrapper build.ninja. To
+ // accomplish that we depend on a file that should never exist and
+ // "build" it using Ninja's built-in phony rule.
+ notAFile := filepath.Join(bootstrapDir, "notAFile")
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: blueprint.Phony,
+ Outputs: []string{notAFile},
+ })
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: chooseStage,
+ Outputs: []string{filepath.Join(bootstrapDir, "build.ninja.in")},
+ Inputs: []string{bootstrapNinjaFile, primaryBuilderNinjaFile, mainNinjaFile},
+ Implicits: []string{notAFile, primaryBuilderNinjaTimestampFile},
+ Args: map[string]string{
+ "current": primaryBuilderNinjaFile,
+ },
+ })
+
+ // Create this phony rule so that upgrades don't delete these during
+ // cleanup
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: blueprint.Phony,
+ Outputs: []string{bootstrapNinjaFile},
+ })
+
+ case StageMain:
+ ctx.SetNinjaBuildDir(pctx, "${buildDir}")
+
+ // We're generating a non-bootstrapper Ninja file, so we need to set it
+ // up to re-bootstrap if necessary. We do this by making build.ninja.in
+ // depend on the various Ninja files, the source build.ninja.in, and
+ // on the timestamp files.
+ //
+ // The timestamp files themselves are set up with the same dependencies
+ // as their Ninja files, including their own depfile. If any of the
+ // dependencies need to be updated, we'll touch the timestamp file,
+ // which will tell choosestage to switch to the stage that rebuilds
+ // that Ninja file.
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: touch,
+ Outputs: []string{primaryBuilderNinjaTimestampFile},
+ Implicits: rebootstrapDeps,
+ Args: map[string]string{
+ "depfile": primaryBuilderNinjaTimestampDepFile,
+ "generator": "true",
+ },
+ })
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: touch,
+ Outputs: []string{mainNinjaTimestampFile},
+ Implicits: primaryRebootstrapDeps,
+ Args: map[string]string{
+ "depfile": mainNinjaTimestampDepFile,
+ "generator": "true",
+ },
+ })
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: chooseStage,
+ Outputs: []string{filepath.Join(bootstrapDir, "build.ninja.in")},
+ Inputs: []string{bootstrapNinjaFile, primaryBuilderNinjaFile, mainNinjaFile},
+ Implicits: []string{primaryBuilderNinjaTimestampFile, mainNinjaTimestampFile},
+ Args: map[string]string{
+ "current": mainNinjaFile,
+ "generator": "true",
+ },
+ })
+
+ // Create this phony rule so that upgrades don't delete these during
+ // cleanup
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: blueprint.Phony,
+ Outputs: []string{mainNinjaFile, docsFile, "$bootstrapManifest"},
+ })
+
+ if primaryBuilderName == "minibp" {
+ // This is a standalone Blueprint build, so we copy the minibp
+ // binary to the "bin" directory to make it easier to find.
+ finalMinibp := filepath.Join("$buildDir", "bin", primaryBuilderName)
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: cp,
+ Inputs: []string{primaryBuilderFile},
+ Outputs: []string{finalMinibp},
+ })
+ }
+ }
+
+ ctx.Build(pctx, blueprint.BuildParams{
+ Rule: bootstrap,
+ Outputs: []string{"$buildDir/build.ninja"},
+ Inputs: []string{filepath.Join(bootstrapDir, "build.ninja.in")},
+ })
+}
+
+// packageRoot returns the module-specific package root directory path. This
+// directory is where the final package .a files are output and where dependant
+// modules search for this package via -I arguments.
+func packageRoot(ctx blueprint.ModuleContext) string {
+ return filepath.Join(bootstrapDir, ctx.ModuleName(), "pkg")
+}
+
+// testRoot returns the module-specific package root directory path used for
+// building tests. The .a files generated here will include everything from
+// packageRoot, plus the test-only code.
+func testRoot(ctx blueprint.ModuleContext) string {
+ return filepath.Join(bootstrapDir, ctx.ModuleName(), "test")
+}
+
+// moduleSrcDir returns the path of the directory that all source file paths are
+// specified relative to.
+func moduleSrcDir(ctx blueprint.ModuleContext) string {
+ return filepath.Join("$srcDir", ctx.ModuleDir())
+}
+
+// moduleObjDir returns the module-specific object directory path.
+func moduleObjDir(ctx blueprint.ModuleContext) string {
+ return filepath.Join(bootstrapDir, ctx.ModuleName(), "obj")
+}
+
+// moduleGenSrcDir returns the module-specific generated sources path.
+func moduleGenSrcDir(ctx blueprint.ModuleContext) string {
+ return filepath.Join(bootstrapDir, ctx.ModuleName(), "gen")
+}
diff --git a/blueprint/bootstrap/bpdoc/bpdoc.go b/blueprint/bootstrap/bpdoc/bpdoc.go
new file mode 100644
index 0000000000000000000000000000000000000000..f96d37e40d10f8ec89dc70536e58be7b86a70475
--- /dev/null
+++ b/blueprint/bootstrap/bpdoc/bpdoc.go
@@ -0,0 +1,707 @@
+package bpdoc
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/doc"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "text/template"
+
+ "github.com/google/blueprint"
+ "github.com/google/blueprint/proptools"
+)
+
+type DocCollector struct {
+ pkgFiles map[string][]string // Map of package name to source files, provided by constructor
+
+ mutex sync.Mutex
+ pkgDocs map[string]*doc.Package // Map of package name to parsed Go AST, protected by mutex
+ docs map[string]*PropertyStructDocs // Map of type name to docs, protected by mutex
+}
+
+func NewDocCollector(pkgFiles map[string][]string) *DocCollector {
+ return &DocCollector{
+ pkgFiles: pkgFiles,
+ pkgDocs: make(map[string]*doc.Package),
+ docs: make(map[string]*PropertyStructDocs),
+ }
+}
+
+// Return the PropertyStructDocs associated with a property struct type. The type should be in the
+// format .
+func (dc *DocCollector) Docs(pkg, name string, defaults reflect.Value) (*PropertyStructDocs, error) {
+ docs := dc.getDocs(pkg, name)
+
+ if docs == nil {
+ pkgDocs, err := dc.packageDocs(pkg)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, t := range pkgDocs.Types {
+ if t.Name == name {
+ docs, err = newDocs(t)
+ if err != nil {
+ return nil, err
+ }
+ docs = dc.putDocs(pkg, name, docs)
+ }
+ }
+ }
+
+ if docs == nil {
+ return nil, fmt.Errorf("package %q type %q not found", pkg, name)
+ }
+
+ docs = docs.Clone()
+ docs.SetDefaults(defaults)
+
+ return docs, nil
+}
+
+func (dc *DocCollector) getDocs(pkg, name string) *PropertyStructDocs {
+ dc.mutex.Lock()
+ defer dc.mutex.Unlock()
+
+ name = pkg + "." + name
+
+ return dc.docs[name]
+}
+
+func (dc *DocCollector) putDocs(pkg, name string, docs *PropertyStructDocs) *PropertyStructDocs {
+ dc.mutex.Lock()
+ defer dc.mutex.Unlock()
+
+ name = pkg + "." + name
+
+ if dc.docs[name] != nil {
+ return dc.docs[name]
+ } else {
+ dc.docs[name] = docs
+ return docs
+ }
+}
+
+type PropertyStructDocs struct {
+ Name string
+ Text string
+ Properties []PropertyDocs
+}
+
+type PropertyDocs struct {
+ Name string
+ OtherNames []string
+ Type string
+ Tag reflect.StructTag
+ Text string
+ OtherTexts []string
+ Properties []PropertyDocs
+ Default string
+}
+
+func (docs *PropertyStructDocs) Clone() *PropertyStructDocs {
+ ret := *docs
+ ret.Properties = append([]PropertyDocs(nil), ret.Properties...)
+ for i, prop := range ret.Properties {
+ ret.Properties[i] = prop.Clone()
+ }
+
+ return &ret
+}
+
+func (docs *PropertyDocs) Clone() PropertyDocs {
+ ret := *docs
+ ret.Properties = append([]PropertyDocs(nil), ret.Properties...)
+ for i, prop := range ret.Properties {
+ ret.Properties[i] = prop.Clone()
+ }
+
+ return ret
+}
+
+func (docs *PropertyDocs) Equal(other PropertyDocs) bool {
+ return docs.Name == other.Name && docs.Type == other.Type && docs.Tag == other.Tag &&
+ docs.Text == other.Text && docs.Default == other.Default &&
+ stringArrayEqual(docs.OtherNames, other.OtherNames) &&
+ stringArrayEqual(docs.OtherTexts, other.OtherTexts) &&
+ docs.SameSubProperties(other)
+}
+
+func (docs *PropertyStructDocs) SetDefaults(defaults reflect.Value) {
+ setDefaults(docs.Properties, defaults)
+}
+
+func setDefaults(properties []PropertyDocs, defaults reflect.Value) {
+ for i := range properties {
+ prop := &properties[i]
+ fieldName := proptools.FieldNameForProperty(prop.Name)
+ f := defaults.FieldByName(fieldName)
+ if (f == reflect.Value{}) {
+ panic(fmt.Errorf("property %q does not exist in %q", fieldName, defaults.Type()))
+ }
+
+ if reflect.DeepEqual(f.Interface(), reflect.Zero(f.Type()).Interface()) {
+ continue
+ }
+
+ if f.Type().Kind() == reflect.Interface {
+ f = f.Elem()
+ }
+
+ if f.Type().Kind() == reflect.Ptr {
+ f = f.Elem()
+ }
+
+ if f.Type().Kind() == reflect.Struct {
+ setDefaults(prop.Properties, f)
+ } else {
+ prop.Default = fmt.Sprintf("%v", f.Interface())
+ }
+ }
+}
+
+func stringArrayEqual(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (docs *PropertyDocs) SameSubProperties(other PropertyDocs) bool {
+ if len(docs.Properties) != len(other.Properties) {
+ return false
+ }
+
+ for i := range docs.Properties {
+ if !docs.Properties[i].Equal(other.Properties[i]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (docs *PropertyStructDocs) GetByName(name string) *PropertyDocs {
+ return getByName(name, "", &docs.Properties)
+}
+
+func getByName(name string, prefix string, props *[]PropertyDocs) *PropertyDocs {
+ for i := range *props {
+ if prefix+(*props)[i].Name == name {
+ return &(*props)[i]
+ } else if strings.HasPrefix(name, prefix+(*props)[i].Name+".") {
+ return getByName(name, prefix+(*props)[i].Name+".", &(*props)[i].Properties)
+ }
+ }
+ return nil
+}
+
+func (prop *PropertyDocs) Nest(nested *PropertyStructDocs) {
+ //prop.Name += "(" + nested.Name + ")"
+ //prop.Text += "(" + nested.Text + ")"
+ prop.Properties = append(prop.Properties, nested.Properties...)
+}
+
+func newDocs(t *doc.Type) (*PropertyStructDocs, error) {
+ typeSpec := t.Decl.Specs[0].(*ast.TypeSpec)
+ docs := PropertyStructDocs{
+ Name: t.Name,
+ Text: t.Doc,
+ }
+
+ structType, ok := typeSpec.Type.(*ast.StructType)
+ if !ok {
+ return nil, fmt.Errorf("type of %q is not a struct", t.Name)
+ }
+
+ var err error
+ docs.Properties, err = structProperties(structType)
+ if err != nil {
+ return nil, err
+ }
+
+ return &docs, nil
+}
+
+func structProperties(structType *ast.StructType) (props []PropertyDocs, err error) {
+ for _, f := range structType.Fields.List {
+ names := f.Names
+ if names == nil {
+ // Anonymous fields have no name, use the type as the name
+ // TODO: hide the name and make the properties show up in the embedding struct
+ if t, ok := f.Type.(*ast.Ident); ok {
+ names = append(names, t)
+ }
+ }
+ for _, n := range names {
+ var name, typ, tag, text string
+ var innerProps []PropertyDocs
+ if n != nil {
+ name = proptools.PropertyNameForField(n.Name)
+ }
+ if f.Doc != nil {
+ text = f.Doc.Text()
+ }
+ if f.Tag != nil {
+ tag, err = strconv.Unquote(f.Tag.Value)
+ if err != nil {
+ return nil, err
+ }
+ }
+ switch a := f.Type.(type) {
+ case *ast.ArrayType:
+ typ = "list of strings"
+ case *ast.InterfaceType:
+ typ = "interface"
+ case *ast.Ident:
+ typ = a.Name
+ case *ast.StructType:
+ innerProps, err = structProperties(a)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ typ = fmt.Sprintf("%T", f.Type)
+ }
+
+ props = append(props, PropertyDocs{
+ Name: name,
+ Type: typ,
+ Tag: reflect.StructTag(tag),
+ Text: text,
+ Properties: innerProps,
+ })
+ }
+ }
+
+ return props, nil
+}
+
+func (docs *PropertyStructDocs) ExcludeByTag(key, value string) {
+ filterPropsByTag(&docs.Properties, key, value, true)
+}
+
+func (docs *PropertyStructDocs) IncludeByTag(key, value string) {
+ filterPropsByTag(&docs.Properties, key, value, false)
+}
+
+func filterPropsByTag(props *[]PropertyDocs, key, value string, exclude bool) {
+ // Create a slice that shares the storage of props but has 0 length. Appending up to
+ // len(props) times to this slice will overwrite the original slice contents
+ filtered := (*props)[:0]
+ for _, x := range *props {
+ tag := x.Tag.Get(key)
+ for _, entry := range strings.Split(tag, ",") {
+ if (entry == value) == !exclude {
+ filtered = append(filtered, x)
+ }
+ }
+ }
+
+ *props = filtered
+}
+
+// Package AST generation and storage
+func (dc *DocCollector) packageDocs(pkg string) (*doc.Package, error) {
+ pkgDocs := dc.getPackageDocs(pkg)
+ if pkgDocs == nil {
+ if files, ok := dc.pkgFiles[pkg]; ok {
+ var err error
+ pkgAST, err := NewPackageAST(files)
+ if err != nil {
+ return nil, err
+ }
+ pkgDocs = doc.New(pkgAST, pkg, doc.AllDecls)
+ pkgDocs = dc.putPackageDocs(pkg, pkgDocs)
+ } else {
+ return nil, fmt.Errorf("unknown package %q", pkg)
+ }
+ }
+ return pkgDocs, nil
+}
+
+func (dc *DocCollector) getPackageDocs(pkg string) *doc.Package {
+ dc.mutex.Lock()
+ defer dc.mutex.Unlock()
+
+ return dc.pkgDocs[pkg]
+}
+
+func (dc *DocCollector) putPackageDocs(pkg string, pkgDocs *doc.Package) *doc.Package {
+ dc.mutex.Lock()
+ defer dc.mutex.Unlock()
+
+ if dc.pkgDocs[pkg] != nil {
+ return dc.pkgDocs[pkg]
+ } else {
+ dc.pkgDocs[pkg] = pkgDocs
+ return pkgDocs
+ }
+}
+
+func NewPackageAST(files []string) (*ast.Package, error) {
+ asts := make(map[string]*ast.File)
+
+ fset := token.NewFileSet()
+ for _, file := range files {
+ ast, err := parser.ParseFile(fset, file, nil, parser.ParseComments)
+ if err != nil {
+ return nil, err
+ }
+ asts[file] = ast
+ }
+
+ pkg, _ := ast.NewPackage(fset, asts, nil, nil)
+ return pkg, nil
+}
+
+func Write(filename string, pkgFiles map[string][]string,
+ moduleTypePropertyStructs map[string][]interface{}) error {
+
+ docSet := NewDocCollector(pkgFiles)
+
+ var moduleTypeList []*moduleTypeDoc
+ for moduleType, propertyStructs := range moduleTypePropertyStructs {
+ mtDoc, err := getModuleTypeDoc(docSet, moduleType, propertyStructs)
+ if err != nil {
+ return err
+ }
+ removeEmptyPropertyStructs(mtDoc)
+ collapseDuplicatePropertyStructs(mtDoc)
+ collapseNestedPropertyStructs(mtDoc)
+ combineDuplicateProperties(mtDoc)
+ moduleTypeList = append(moduleTypeList, mtDoc)
+ }
+
+ sort.Sort(moduleTypeByName(moduleTypeList))
+
+ buf := &bytes.Buffer{}
+
+ unique := 0
+
+ tmpl, err := template.New("file").Funcs(map[string]interface{}{
+ "unique": func() int {
+ unique++
+ return unique
+ }}).Parse(fileTemplate)
+ if err != nil {
+ return err
+ }
+
+ err = tmpl.Execute(buf, moduleTypeList)
+ if err != nil {
+ return err
+ }
+
+ err = ioutil.WriteFile(filename, buf.Bytes(), 0666)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func getModuleTypeDoc(docSet *DocCollector, moduleType string,
+ propertyStructs []interface{}) (*moduleTypeDoc, error) {
+ mtDoc := &moduleTypeDoc{
+ Name: moduleType,
+ //Text: docSet.ModuleTypeDocs(moduleType),
+ }
+
+ for _, s := range propertyStructs {
+ v := reflect.ValueOf(s).Elem()
+ t := v.Type()
+
+ // Ignore property structs with unexported or unnamed types
+ if t.PkgPath() == "" {
+ continue
+ }
+ psDoc, err := docSet.Docs(t.PkgPath(), t.Name(), v)
+ if err != nil {
+ return nil, err
+ }
+ psDoc.ExcludeByTag("blueprint", "mutated")
+
+ for nested, nestedValue := range nestedPropertyStructs(v) {
+ nestedType := nestedValue.Type()
+
+ // Ignore property structs with unexported or unnamed types
+ if nestedType.PkgPath() == "" {
+ continue
+ }
+ nestedDoc, err := docSet.Docs(nestedType.PkgPath(), nestedType.Name(), nestedValue)
+ if err != nil {
+ return nil, err
+ }
+ nestedDoc.ExcludeByTag("blueprint", "mutated")
+ nestPoint := psDoc.GetByName(nested)
+ if nestPoint == nil {
+ return nil, fmt.Errorf("nesting point %q not found", nested)
+ }
+
+ key, value, err := blueprint.HasFilter(nestPoint.Tag)
+ if err != nil {
+ return nil, err
+ }
+ if key != "" {
+ nestedDoc.IncludeByTag(key, value)
+ }
+
+ nestPoint.Nest(nestedDoc)
+ }
+ mtDoc.PropertyStructs = append(mtDoc.PropertyStructs, psDoc)
+ }
+
+ return mtDoc, nil
+}
+
+func nestedPropertyStructs(s reflect.Value) map[string]reflect.Value {
+ ret := make(map[string]reflect.Value)
+ var walk func(structValue reflect.Value, prefix string)
+ walk = func(structValue reflect.Value, prefix string) {
+ typ := structValue.Type()
+ for i := 0; i < structValue.NumField(); i++ {
+ field := typ.Field(i)
+ if field.PkgPath != "" {
+ // The field is not exported so just skip it.
+ continue
+ }
+
+ fieldValue := structValue.Field(i)
+
+ switch fieldValue.Kind() {
+ case reflect.Bool, reflect.String, reflect.Slice, reflect.Int, reflect.Uint:
+ // Nothing
+ case reflect.Struct:
+ walk(fieldValue, prefix+proptools.PropertyNameForField(field.Name)+".")
+ case reflect.Ptr, reflect.Interface:
+ if !fieldValue.IsNil() {
+ // We leave the pointer intact and zero out the struct that's
+ // pointed to.
+ elem := fieldValue.Elem()
+ if fieldValue.Kind() == reflect.Interface {
+ if elem.Kind() != reflect.Ptr {
+ panic(fmt.Errorf("can't get type of field %q: interface "+
+ "refers to a non-pointer", field.Name))
+ }
+ elem = elem.Elem()
+ }
+ if elem.Kind() == reflect.Struct {
+ nestPoint := prefix + proptools.PropertyNameForField(field.Name)
+ ret[nestPoint] = elem
+ walk(elem, nestPoint+".")
+ }
+ }
+ default:
+ panic(fmt.Errorf("unexpected kind for property struct field %q: %s",
+ field.Name, fieldValue.Kind()))
+ }
+ }
+
+ }
+
+ walk(s, "")
+ return ret
+}
+
+// Remove any property structs that have no exported fields
+func removeEmptyPropertyStructs(mtDoc *moduleTypeDoc) {
+ for i := 0; i < len(mtDoc.PropertyStructs); i++ {
+ if len(mtDoc.PropertyStructs[i].Properties) == 0 {
+ mtDoc.PropertyStructs = append(mtDoc.PropertyStructs[:i], mtDoc.PropertyStructs[i+1:]...)
+ i--
+ }
+ }
+}
+
+// Squashes duplicates of the same property struct into single entries
+func collapseDuplicatePropertyStructs(mtDoc *moduleTypeDoc) {
+ var collapsedDocs []*PropertyStructDocs
+
+propertyStructLoop:
+ for _, from := range mtDoc.PropertyStructs {
+ for _, to := range collapsedDocs {
+ if from.Name == to.Name {
+ collapseDuplicateProperties(&to.Properties, &from.Properties)
+ continue propertyStructLoop
+ }
+ }
+ collapsedDocs = append(collapsedDocs, from)
+ }
+ mtDoc.PropertyStructs = collapsedDocs
+}
+
+func collapseDuplicateProperties(to, from *[]PropertyDocs) {
+propertyLoop:
+ for _, f := range *from {
+ for i := range *to {
+ t := &(*to)[i]
+ if f.Name == t.Name {
+ collapseDuplicateProperties(&t.Properties, &f.Properties)
+ continue propertyLoop
+ }
+ }
+ *to = append(*to, f)
+ }
+}
+
+// Find all property structs that only contain structs, and move their children up one with
+// a prefixed name
+func collapseNestedPropertyStructs(mtDoc *moduleTypeDoc) {
+ for _, ps := range mtDoc.PropertyStructs {
+ collapseNestedProperties(&ps.Properties)
+ }
+}
+
+func collapseNestedProperties(p *[]PropertyDocs) {
+ var n []PropertyDocs
+
+ for _, parent := range *p {
+ var containsProperty bool
+ for j := range parent.Properties {
+ child := &parent.Properties[j]
+ if len(child.Properties) > 0 {
+ collapseNestedProperties(&child.Properties)
+ } else {
+ containsProperty = true
+ }
+ }
+ if containsProperty || len(parent.Properties) == 0 {
+ n = append(n, parent)
+ } else {
+ for j := range parent.Properties {
+ child := parent.Properties[j]
+ child.Name = parent.Name + "." + child.Name
+ n = append(n, child)
+ }
+ }
+ }
+ *p = n
+}
+
+func combineDuplicateProperties(mtDoc *moduleTypeDoc) {
+ for _, ps := range mtDoc.PropertyStructs {
+ combineDuplicateSubProperties(&ps.Properties)
+ }
+}
+
+func combineDuplicateSubProperties(p *[]PropertyDocs) {
+ var n []PropertyDocs
+propertyLoop:
+ for _, child := range *p {
+ if len(child.Properties) > 0 {
+ combineDuplicateSubProperties(&child.Properties)
+ for i := range n {
+ s := &n[i]
+ if s.SameSubProperties(child) {
+ s.OtherNames = append(s.OtherNames, child.Name)
+ s.OtherTexts = append(s.OtherTexts, child.Text)
+ continue propertyLoop
+ }
+ }
+ }
+ n = append(n, child)
+ }
+
+ *p = n
+}
+
+type moduleTypeByName []*moduleTypeDoc
+
+func (l moduleTypeByName) Len() int { return len(l) }
+func (l moduleTypeByName) Less(i, j int) bool { return l[i].Name < l[j].Name }
+func (l moduleTypeByName) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+
+type moduleTypeDoc struct {
+ Name string
+ Text string
+ PropertyStructs []*PropertyStructDocs
+}
+
+var (
+ fileTemplate = `
+
+
+Build Docs
+
+
+
+
+
+
+{{end}}
+`
+)
diff --git a/blueprint/bootstrap/cleanup.go b/blueprint/bootstrap/cleanup.go
new file mode 100644
index 0000000000000000000000000000000000000000..69ef06b91c29ecf06326620fa2d4096946f4a404
--- /dev/null
+++ b/blueprint/bootstrap/cleanup.go
@@ -0,0 +1,164 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bootstrap
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/google/blueprint"
+)
+
+const logFileName = ".ninja_log"
+
+// removeAbandonedFiles removes any files that appear in the Ninja log that are
+// not currently build targets.
+func removeAbandonedFiles(ctx *blueprint.Context, config *Config,
+ srcDir, manifestFile string) error {
+
+ ninjaBuildDir, err := ctx.NinjaBuildDir()
+ if err != nil {
+ return err
+ }
+
+ targetRules, err := ctx.AllTargets()
+ if err != nil {
+ return fmt.Errorf("error determining target list: %s", err)
+ }
+
+ replacer := strings.NewReplacer(
+ "@@SrcDir@@", srcDir,
+ "@@BuildDir@@", BuildDir,
+ "@@BootstrapManifest@@", manifestFile)
+ ninjaBuildDir = replacer.Replace(ninjaBuildDir)
+ targets := make(map[string]bool)
+ for target := range targetRules {
+ replacedTarget := replacer.Replace(target)
+ targets[filepath.Clean(replacedTarget)] = true
+ }
+
+ filePaths, err := parseNinjaLog(ninjaBuildDir)
+ if err != nil {
+ return err
+ }
+
+ for _, filePath := range filePaths {
+ isTarget := targets[filePath]
+ if !isTarget {
+ err = removeFileAndEmptyDirs(filePath)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func parseNinjaLog(ninjaBuildDir string) ([]string, error) {
+ logFilePath := filepath.Join(ninjaBuildDir, logFileName)
+ logFile, err := os.Open(logFilePath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ defer logFile.Close()
+
+ scanner := bufio.NewScanner(logFile)
+
+ // Check that the first line indicates that this is a Ninja log version 5
+ const expectedFirstLine = "# ninja log v5"
+ if !scanner.Scan() || scanner.Text() != expectedFirstLine {
+ return nil, errors.New("unrecognized ninja log format")
+ }
+
+ var filePaths []string
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ const fieldSeperator = "\t"
+ fields := strings.Split(line, fieldSeperator)
+
+ const precedingFields = 3
+ const followingFields = 1
+
+ if len(fields) < precedingFields+followingFields+1 {
+ return nil, fmt.Errorf("log entry has too few fields: %q", line)
+ }
+
+ start := precedingFields
+ end := len(fields) - followingFields
+ filePath := strings.Join(fields[start:end], fieldSeperator)
+
+ filePaths = append(filePaths, filePath)
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return filePaths, nil
+}
+
+func removeFileAndEmptyDirs(path string) error {
+ err := os.Remove(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ pathErr := err.(*os.PathError)
+ switch pathErr.Err {
+ case syscall.ENOTEMPTY, syscall.EEXIST, syscall.ENOTDIR:
+ return nil
+ }
+ return err
+ }
+ fmt.Printf("removed old ninja-created file %s because it has no rule to generate it\n", path)
+
+ path, err = filepath.Abs(path)
+ if err != nil {
+ return err
+ }
+
+ cwd, err := os.Getwd()
+ if err != nil {
+ return err
+ }
+
+ for dir := filepath.Dir(path); dir != cwd; dir = filepath.Dir(dir) {
+ err = os.Remove(dir)
+ if err != nil {
+ pathErr := err.(*os.PathError)
+ switch pathErr.Err {
+ case syscall.ENOTEMPTY, syscall.EEXIST:
+ // We've come to a nonempty directory, so we're done.
+ return nil
+ default:
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/blueprint/bootstrap/command.go b/blueprint/bootstrap/command.go
new file mode 100644
index 0000000000000000000000000000000000000000..69b22711a64230fc58fd52b86842419c9524ada1
--- /dev/null
+++ b/blueprint/bootstrap/command.go
@@ -0,0 +1,192 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bootstrap
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "runtime/pprof"
+
+ "github.com/google/blueprint"
+ "github.com/google/blueprint/deptools"
+)
+
+var (
+ outFile string
+ depFile string
+ timestampFile string
+ timestampDepFile string
+ manifestFile string
+ docFile string
+ cpuprofile string
+ runGoTests bool
+
+ BuildDir string
+)
+
+func init() {
+ flag.StringVar(&outFile, "o", "build.ninja.in", "the Ninja file to output")
+ flag.StringVar(&BuildDir, "b", ".", "the build output directory")
+ flag.StringVar(&depFile, "d", "", "the dependency file to output")
+ flag.StringVar(×tampFile, "timestamp", "", "file to write before the output file")
+ flag.StringVar(×tampDepFile, "timestampdep", "", "the dependency file for the timestamp file")
+ flag.StringVar(&manifestFile, "m", "", "the bootstrap manifest file")
+ flag.StringVar(&docFile, "docs", "", "build documentation file to output")
+ flag.StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to file")
+ flag.BoolVar(&runGoTests, "t", false, "build and run go tests during bootstrap")
+}
+
+func Main(ctx *blueprint.Context, config interface{}, extraNinjaFileDeps ...string) {
+ if !flag.Parsed() {
+ flag.Parse()
+ }
+
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ if cpuprofile != "" {
+ f, err := os.Create(cpuprofile)
+ if err != nil {
+ fatalf("error opening cpuprofile: %s", err)
+ }
+ pprof.StartCPUProfile(f)
+ defer f.Close()
+ defer pprof.StopCPUProfile()
+ }
+
+ if flag.NArg() != 1 {
+ fatalf("no Blueprints file specified")
+ }
+
+ stage := StageMain
+ if c, ok := config.(ConfigInterface); ok {
+ if c.GeneratingBootstrapper() {
+ stage = StageBootstrap
+ }
+ if c.GeneratingPrimaryBuilder() {
+ stage = StagePrimary
+ }
+ }
+
+ bootstrapConfig := &Config{
+ stage: stage,
+ topLevelBlueprintsFile: flag.Arg(0),
+ runGoTests: runGoTests,
+ }
+
+ ctx.RegisterBottomUpMutator("bootstrap_plugin_deps", pluginDeps)
+ ctx.RegisterModuleType("bootstrap_go_package", newGoPackageModuleFactory(bootstrapConfig))
+ ctx.RegisterModuleType("bootstrap_core_go_binary", newGoBinaryModuleFactory(bootstrapConfig, StageBootstrap))
+ ctx.RegisterModuleType("bootstrap_go_binary", newGoBinaryModuleFactory(bootstrapConfig, StagePrimary))
+ ctx.RegisterTopDownMutator("bootstrap_stage", propagateStageBootstrap)
+ ctx.RegisterSingletonType("bootstrap", newSingletonFactory(bootstrapConfig))
+
+ deps, errs := ctx.ParseBlueprintsFiles(bootstrapConfig.topLevelBlueprintsFile)
+ if len(errs) > 0 {
+ fatalErrors(errs)
+ }
+
+ // Add extra ninja file dependencies
+ deps = append(deps, extraNinjaFileDeps...)
+
+ errs = ctx.ResolveDependencies(config)
+ if len(errs) > 0 {
+ fatalErrors(errs)
+ }
+
+ if docFile != "" {
+ err := writeDocs(ctx, filepath.Dir(bootstrapConfig.topLevelBlueprintsFile), docFile)
+ if err != nil {
+ fatalErrors([]error{err})
+ }
+ return
+ }
+
+ extraDeps, errs := ctx.PrepareBuildActions(config)
+ if len(errs) > 0 {
+ fatalErrors(errs)
+ }
+ deps = append(deps, extraDeps...)
+
+ buf := bytes.NewBuffer(nil)
+ err := ctx.WriteBuildFile(buf)
+ if err != nil {
+ fatalf("error generating Ninja file contents: %s", err)
+ }
+
+ const outFilePermissions = 0666
+ if timestampFile != "" {
+ err := ioutil.WriteFile(timestampFile, []byte{}, outFilePermissions)
+ if err != nil {
+ fatalf("error writing %s: %s", timestampFile, err)
+ }
+
+ if timestampDepFile != "" {
+ err := deptools.WriteDepFile(timestampDepFile, timestampFile, deps)
+ if err != nil {
+ fatalf("error writing depfile: %s", err)
+ }
+ }
+ }
+
+ err = ioutil.WriteFile(outFile, buf.Bytes(), outFilePermissions)
+ if err != nil {
+ fatalf("error writing %s: %s", outFile, err)
+ }
+
+ if depFile != "" {
+ err := deptools.WriteDepFile(depFile, outFile, deps)
+ if err != nil {
+ fatalf("error writing depfile: %s", err)
+ }
+ err = deptools.WriteDepFile(depFile+".timestamp", outFile+".timestamp", deps)
+ if err != nil {
+ fatalf("error writing depfile: %s", err)
+ }
+ }
+
+ if c, ok := config.(ConfigRemoveAbandonedFiles); !ok || c.RemoveAbandonedFiles() {
+ srcDir := filepath.Dir(bootstrapConfig.topLevelBlueprintsFile)
+ err := removeAbandonedFiles(ctx, bootstrapConfig, srcDir, manifestFile)
+ if err != nil {
+ fatalf("error removing abandoned files: %s", err)
+ }
+ }
+}
+
+func fatalf(format string, args ...interface{}) {
+ fmt.Printf(format, args...)
+ fmt.Print("\n")
+ os.Exit(1)
+}
+
+func fatalErrors(errs []error) {
+ red := "\x1b[31m"
+ unred := "\x1b[0m"
+
+ for _, err := range errs {
+ switch err := err.(type) {
+ case *blueprint.Error:
+ fmt.Printf("%serror:%s %s\n", red, unred, err.Error())
+ default:
+ fmt.Printf("%sinternal error:%s %s\n", red, unred, err)
+ }
+ }
+ os.Exit(1)
+}
diff --git a/blueprint/bootstrap/config.go b/blueprint/bootstrap/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..7730ce970392e6fc6aae4575c88c5651746180bb
--- /dev/null
+++ b/blueprint/bootstrap/config.go
@@ -0,0 +1,61 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bootstrap
+
+var (
+ // These variables are the only configuration needed by the boostrap
+ // modules. They are always set to the variable name enclosed in "@@" so
+ // that their values can be easily replaced in the generated Ninja file.
+ srcDir = pctx.StaticVariable("srcDir", "@@SrcDir@@")
+ buildDir = pctx.StaticVariable("buildDir", "@@BuildDir@@")
+ goRoot = pctx.StaticVariable("goRoot", "@@GoRoot@@")
+ compileCmd = pctx.StaticVariable("compileCmd", "@@GoCompile@@")
+ linkCmd = pctx.StaticVariable("linkCmd", "@@GoLink@@")
+ bootstrapCmd = pctx.StaticVariable("bootstrapCmd", "@@Bootstrap@@")
+ bootstrapManifest = pctx.StaticVariable("bootstrapManifest",
+ "@@BootstrapManifest@@")
+)
+
+type ConfigInterface interface {
+ // GeneratingBootstrapper should return true if this build invocation is
+ // creating a build.ninja.in file to be used in a build bootstrapping
+ // sequence.
+ GeneratingBootstrapper() bool
+ // GeneratingPrimaryBuilder should return true if this build invocation is
+ // creating a build.ninja.in file to be used to build the primary builder
+ GeneratingPrimaryBuilder() bool
+}
+
+type ConfigRemoveAbandonedFiles interface {
+ // RemoveAbandonedFiles should return true if files listed in the
+ // .ninja_log but not the output build.ninja file should be deleted.
+ RemoveAbandonedFiles() bool
+}
+
+type Stage int
+
+const (
+ StageBootstrap Stage = iota
+ StagePrimary
+ StageMain
+)
+
+type Config struct {
+ stage Stage
+
+ topLevelBlueprintsFile string
+
+ runGoTests bool
+}
diff --git a/blueprint/bootstrap/doc.go b/blueprint/bootstrap/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..4394ebe73a17d2e9240ca13348db92e523717334
--- /dev/null
+++ b/blueprint/bootstrap/doc.go
@@ -0,0 +1,169 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The Blueprint bootstrapping mechanism is intended to enable building a source
+// tree using a Blueprint-based build system that is embedded (as source) in
+// that source tree. The only prerequisites for performing such a build are:
+//
+// 1. A Ninja binary
+// 2. A script interpreter (e.g. Bash or Python)
+// 3. A Go toolchain
+//
+// The Primary Builder
+//
+// As part of the bootstrapping process, a binary called the "primary builder"
+// is created. This primary builder is the binary that includes both the core
+// Blueprint library and the build logic specific to the source tree. It is
+// used to generate the Ninja file that describes how to build the entire source
+// tree.
+//
+// The primary builder must be a pure Go (i.e. no cgo) module built with the
+// module type 'bootstrap_go_binary'. It should have the 'primaryBuilder'
+// module property set to true in its Blueprints file. If more than one module
+// sets primaryBuilder to true the build will fail.
+//
+// The primary builder main function should look something like:
+//
+// package main
+//
+// import (
+// "flag"
+// "github.com/google/blueprint"
+// "github.com/google/blueprint/bootstrap"
+// "path/filepath"
+//
+// "my/custom/build/logic"
+// )
+//
+// func main() {
+// // The primary builder should use the global flag set because the
+// // bootstrap package registers its own flags there.
+// flag.Parse()
+//
+// // The top-level Blueprints file is passed as the first argument.
+// srcDir := filepath.Dir(flag.Arg(0))
+//
+// // Create the build context.
+// ctx := blueprint.NewContext()
+//
+// // Register custom module types
+// ctx.RegisterModuleType("foo", logic.FooModule)
+// ctx.RegisterModuleType("bar", logic.BarModule)
+//
+// // Register custom singletons
+// ctx.RegisterSingleton("baz", logic.NewBazSingleton())
+//
+// // Create and initialize the custom Config object.
+// config := logic.NewConfig(srcDir)
+//
+// // This call never returns
+// bootstrap.Main(ctx, config)
+// }
+//
+// Required Source Files
+//
+// There are three files that must be included in the source tree to facilitate
+// the build bootstrapping:
+//
+// 1. The top-level Blueprints file
+// 2. The bootstrap Ninja file template
+// 3. The bootstrap script
+//
+// The top-level Blueprints file describes how the entire source tree should be
+// built. It must have a 'subdirs' assignment that includes both the core
+// Blueprint library and the custom build logic for the source tree. It should
+// also include (either directly or through a subdirs entry) describe all the
+// modules to be built in the source tree.
+//
+// The bootstrap Ninja file template describes the build actions necessary to
+// build the primary builder for the source tree. This template contains a set
+// of placeholder Ninja variable values that get filled in by the bootstrap
+// script to create a usable Ninja file. It can be created by running the
+// minibp binary that gets created as part of the standalone Blueprint build.
+// Passing minibp the path to the top-level Blueprints file will cause it to
+// create a bootstrap Ninja file template named 'build.ninja.in'.
+//
+// The bootstrap script is a small script (or theoretically a compiled binary)
+// that is included in the source tree to begin the bootstrapping process. It
+// is responsible for filling in the bootstrap Ninja file template with some
+// basic information about the Go build environemnt and the path to the root
+// source directory. It does this by performing a simple string substitution on
+// the template file to produce a usable build.ninja file.
+//
+// The Bootstrapping Process
+//
+// A bootstrap-enabled build directory has two states, each with a corresponding
+// Ninja file. The states are referred to as the "bootstrap" state and the
+// "main" state. Changing the directory to a particular state means replacing
+// the build.ninja file with one that will perform the build actions for the
+// state.
+//
+// The bootstrapping process begins with the user running the bootstrap script
+// to initialize a new build directory. The script is run from the build
+// directory, and when run with no arguments it copies the source bootstrap
+// Ninja file into the build directory as "build.ninja". It also performs a set
+// of string substitutions on the file to configure it for the user's build
+// environment. Specifically, the following strings are substituted in the file:
+//
+// @@SrcDir@@ - The path to the root source directory (either
+// absolute or relative to the build dir)
+// @@GoRoot@@ - The path to the root directory of the Go toolchain
+// @@GoCompile@@ - The path to the Go compiler (6g or compile)
+// @@GoLink@@ - The path to the Go linker (6l or link)
+// @@Bootstrap@@ - The path to the bootstrap script
+// @@BootstrapManifest@@ - The path to the source bootstrap Ninja file
+//
+// Once the script completes the build directory is initialized in the bootstrap
+// build state. In this state, running Ninja may perform the following build
+// actions. Each one but the last can be skipped if its output is determined to
+// be up-to-date.
+//
+// - Build the minibp binary
+// - Run minibp to generate .bootstrap/bootstrap.ninja.in
+// - Build the primary builder binary
+// - Run the primary builder to generate .bootstrap/main.ninja.in
+// - Run the bootstrap script to "copy" .bootstrap/main.ninja.in to build.ninja
+//
+// The last of these build actions results in transitioning the build directory
+// to the main build state.
+//
+// The main state (potentially) performs the following actions:
+// - Copy .bootstrap/bootstrap.ninja.in to the source bootstrap Ninja location
+// - Run the bootstrap script to "copy" the source bootstrap Ninja file to
+// build.ninja
+// - Build all the non-bootstrap modules defined in Blueprints files
+//
+// Updating the Bootstrap Ninja File Template
+//
+// The main purpose of the bootstrap state is to generate the Ninja file for the
+// main state. The one additional thing it does is generate a new bootstrap
+// Ninja file template at .bootstrap/bootstrap.ninja.in. When generating this
+// file, minibp will compare the new bootstrap Ninja file contents with the
+// original (in the source tree). If the contents match, the new file will be
+// created with a timestamp that matches that of the original, indicating that
+// the original file in the source tree is up-to-date.
+//
+// This is done so that in the main state if the bootstrap Ninja file template
+// in the source tree is out of date it can be automatically updated. Note,
+// however, that we can't have the main state generate the new bootstrap Ninja
+// file template contents itself, because it may be using an older minibp.
+// Recall that minibp is only built during the bootstrap state (to break a
+// circular dependence), so if a new bootstrap Ninja file template were
+// generated then it could replace a new file (from an updated source tree) with
+// one generated using an old minibp.
+//
+// This scheme ensures that updates to the source tree are always incorporated
+// into the build process and that changes that require a new bootstrap Ninja
+// file template automatically update the template in the source tree.
+package bootstrap
diff --git a/blueprint/bootstrap/minibp/main.go b/blueprint/bootstrap/minibp/main.go
new file mode 100644
index 0000000000000000000000000000000000000000..4e25bc83ef0ed4383cead9b91c77b074155c4a69
--- /dev/null
+++ b/blueprint/bootstrap/minibp/main.go
@@ -0,0 +1,58 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "flag"
+ "github.com/google/blueprint"
+ "github.com/google/blueprint/bootstrap"
+)
+
+var runAsPrimaryBuilder bool
+var buildPrimaryBuilder bool
+
+func init() {
+ flag.BoolVar(&runAsPrimaryBuilder, "p", false, "run as a primary builder")
+ flag.BoolVar(&buildPrimaryBuilder, "build-primary", false, "build the primary builder")
+}
+
+type Config struct {
+ generatingBootstrapper bool
+ generatingPrimaryBuilder bool
+}
+
+func (c Config) GeneratingBootstrapper() bool {
+ return c.generatingBootstrapper
+}
+
+func (c Config) GeneratingPrimaryBuilder() bool {
+ return c.generatingPrimaryBuilder
+}
+
+func main() {
+ flag.Parse()
+
+ ctx := blueprint.NewContext()
+ if !runAsPrimaryBuilder {
+ ctx.SetIgnoreUnknownModuleTypes(true)
+ }
+
+ config := Config{
+ generatingBootstrapper: !runAsPrimaryBuilder && !buildPrimaryBuilder,
+ generatingPrimaryBuilder: !runAsPrimaryBuilder && buildPrimaryBuilder,
+ }
+
+ bootstrap.Main(ctx, config)
+}
diff --git a/blueprint/bootstrap/writedocs.go b/blueprint/bootstrap/writedocs.go
new file mode 100644
index 0000000000000000000000000000000000000000..868fd65edd9197923efe1e25b17d4b0bafc19fa0
--- /dev/null
+++ b/blueprint/bootstrap/writedocs.go
@@ -0,0 +1,59 @@
+package bootstrap
+
+import (
+ "fmt"
+ "path/filepath"
+
+ "github.com/google/blueprint"
+ "github.com/google/blueprint/bootstrap/bpdoc"
+ "github.com/google/blueprint/pathtools"
+)
+
+func writeDocs(ctx *blueprint.Context, srcDir, filename string) error {
+ // Find the module that's marked as the "primary builder", which means it's
+ // creating the binary that we'll use to generate the non-bootstrap
+ // build.ninja file.
+ var primaryBuilders []*goBinary
+ var minibp *goBinary
+ ctx.VisitAllModulesIf(isBootstrapBinaryModule,
+ func(module blueprint.Module) {
+ binaryModule := module.(*goBinary)
+ if binaryModule.properties.PrimaryBuilder {
+ primaryBuilders = append(primaryBuilders, binaryModule)
+ }
+ if ctx.ModuleName(binaryModule) == "minibp" {
+ minibp = binaryModule
+ }
+ })
+
+ if minibp == nil {
+ panic("missing minibp")
+ }
+
+ var primaryBuilder *goBinary
+ switch len(primaryBuilders) {
+ case 0:
+ // If there's no primary builder module then that means we'll use minibp
+ // as the primary builder.
+ primaryBuilder = minibp
+
+ case 1:
+ primaryBuilder = primaryBuilders[0]
+
+ default:
+ return fmt.Errorf("multiple primary builder modules present")
+ }
+
+ pkgFiles := make(map[string][]string)
+ ctx.VisitDepsDepthFirst(primaryBuilder, func(module blueprint.Module) {
+ switch m := module.(type) {
+ case (*goPackage):
+ pkgFiles[m.properties.PkgPath] = pathtools.PrefixPaths(m.properties.Srcs,
+ filepath.Join(srcDir, ctx.ModuleDir(m)))
+ default:
+ panic(fmt.Errorf("unknown dependency type %T", module))
+ }
+ })
+
+ return bpdoc.Write(filename, pkgFiles, ctx.ModuleTypePropertyStructs())
+}
diff --git a/blueprint/bpfmt/bpfmt.go b/blueprint/bpfmt/bpfmt.go
new file mode 100644
index 0000000000000000000000000000000000000000..96e0b1e991a95754912397488c97098ba5d66567
--- /dev/null
+++ b/blueprint/bpfmt/bpfmt.go
@@ -0,0 +1,176 @@
+// Mostly copied from Go's src/cmd/gofmt:
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "github.com/google/blueprint/parser"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+)
+
+var (
+ // main operation modes
+ list = flag.Bool("l", false, "list files whose formatting differs from bpfmt's")
+ write = flag.Bool("w", false, "write result to (source) file instead of stdout")
+ doDiff = flag.Bool("d", false, "display diffs instead of rewriting files")
+ sortLists = flag.Bool("s", false, "sort arrays")
+)
+
+var (
+ exitCode = 0
+)
+
+func report(err error) {
+ fmt.Fprintln(os.Stderr, err)
+ exitCode = 2
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: bpfmt [flags] [path ...]\n")
+ flag.PrintDefaults()
+ os.Exit(2)
+}
+
+// If in == nil, the source is the contents of the file with the given filename.
+func processFile(filename string, in io.Reader, out io.Writer) error {
+ if in == nil {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ in = f
+ }
+
+ src, err := ioutil.ReadAll(in)
+ if err != nil {
+ return err
+ }
+
+ r := bytes.NewBuffer(src)
+
+ file, errs := parser.Parse(filename, r, parser.NewScope(nil))
+ if len(errs) > 0 {
+ for _, err := range errs {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ return fmt.Errorf("%d parsing errors", len(errs))
+ }
+
+ if *sortLists {
+ parser.SortLists(file)
+ }
+
+ res, err := parser.Print(file)
+ if err != nil {
+ return err
+ }
+
+ if !bytes.Equal(src, res) {
+ // formatting has changed
+ if *list {
+ fmt.Fprintln(out, filename)
+ }
+ if *write {
+ err = ioutil.WriteFile(filename, res, 0644)
+ if err != nil {
+ return err
+ }
+ }
+ if *doDiff {
+ data, err := diff(src, res)
+ if err != nil {
+ return fmt.Errorf("computing diff: %s", err)
+ }
+ fmt.Printf("diff %s bpfmt/%s\n", filename, filename)
+ out.Write(data)
+ }
+ }
+
+ if !*list && !*write && !*doDiff {
+ _, err = out.Write(res)
+ }
+
+ return err
+}
+
+func visitFile(path string, f os.FileInfo, err error) error {
+ if err == nil && f.Name() == "Blueprints" {
+ err = processFile(path, nil, os.Stdout)
+ }
+ if err != nil {
+ report(err)
+ }
+ return nil
+}
+
+func walkDir(path string) {
+ filepath.Walk(path, visitFile)
+}
+
+func main() {
+ flag.Parse()
+
+ if flag.NArg() == 0 {
+ if *write {
+ fmt.Fprintln(os.Stderr, "error: cannot use -w with standard input")
+ exitCode = 2
+ return
+ }
+ if err := processFile("", os.Stdin, os.Stdout); err != nil {
+ report(err)
+ }
+ return
+ }
+
+ for i := 0; i < flag.NArg(); i++ {
+ path := flag.Arg(i)
+ switch dir, err := os.Stat(path); {
+ case err != nil:
+ report(err)
+ case dir.IsDir():
+ walkDir(path)
+ default:
+ if err := processFile(path, nil, os.Stdout); err != nil {
+ report(err)
+ }
+ }
+ }
+}
+
+func diff(b1, b2 []byte) (data []byte, err error) {
+ f1, err := ioutil.TempFile("", "bpfmt")
+ if err != nil {
+ return
+ }
+ defer os.Remove(f1.Name())
+ defer f1.Close()
+
+ f2, err := ioutil.TempFile("", "bpfmt")
+ if err != nil {
+ return
+ }
+ defer os.Remove(f2.Name())
+ defer f2.Close()
+
+ f1.Write(b1)
+ f2.Write(b2)
+
+ data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
+ if len(data) > 0 {
+ // diff exits with a non-zero status when the files don't match.
+ // Ignore that failure as long as we get output.
+ err = nil
+ }
+ return
+
+}
diff --git a/blueprint/bpmodify/bpmodify.go b/blueprint/bpmodify/bpmodify.go
new file mode 100644
index 0000000000000000000000000000000000000000..f4216ddb84c4509477779a6996df024fac309de7
--- /dev/null
+++ b/blueprint/bpmodify/bpmodify.go
@@ -0,0 +1,305 @@
+// Mostly copied from Go's src/cmd/gofmt:
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "github.com/google/blueprint/parser"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "unicode"
+)
+
+var (
+ // main operation modes
+ list = flag.Bool("l", false, "list files that would be modified by bpmodify")
+ write = flag.Bool("w", false, "write result to (source) file instead of stdout")
+ doDiff = flag.Bool("d", false, "display diffs instead of rewriting files")
+ sortLists = flag.Bool("s", false, "sort touched lists, even if they were unsorted")
+ parameter = flag.String("parameter", "deps", "name of parameter to modify on each module")
+ targetedModules = new(identSet)
+ addIdents = new(identSet)
+ removeIdents = new(identSet)
+)
+
+func init() {
+ flag.Var(targetedModules, "m", "comma or whitespace separated list of modules on which to operate")
+ flag.Var(addIdents, "a", "comma or whitespace separated list of identifiers to add")
+ flag.Var(removeIdents, "r", "comma or whitespace separated list of identifiers to remove")
+}
+
+var (
+ exitCode = 0
+)
+
+func report(err error) {
+ fmt.Fprintln(os.Stderr, err)
+ exitCode = 2
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: bpmodify [flags] [path ...]\n")
+ flag.PrintDefaults()
+ os.Exit(2)
+}
+
+// If in == nil, the source is the contents of the file with the given filename.
+func processFile(filename string, in io.Reader, out io.Writer) error {
+ if in == nil {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ in = f
+ }
+
+ src, err := ioutil.ReadAll(in)
+ if err != nil {
+ return err
+ }
+
+ r := bytes.NewBuffer(src)
+
+ file, errs := parser.Parse(filename, r, parser.NewScope(nil))
+ if len(errs) > 0 {
+ for _, err := range errs {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ return fmt.Errorf("%d parsing errors", len(errs))
+ }
+
+ modified, errs := findModules(file)
+ if len(errs) > 0 {
+ for _, err := range errs {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ fmt.Fprintln(os.Stderr, "continuing...")
+ }
+
+ if modified {
+ res, err := parser.Print(file)
+ if err != nil {
+ return err
+ }
+
+ if *list {
+ fmt.Fprintln(out, filename)
+ }
+ if *write {
+ err = ioutil.WriteFile(filename, res, 0644)
+ if err != nil {
+ return err
+ }
+ }
+ if *doDiff {
+ data, err := diff(src, res)
+ if err != nil {
+ return fmt.Errorf("computing diff: %s", err)
+ }
+ fmt.Printf("diff %s bpfmt/%s\n", filename, filename)
+ out.Write(data)
+ }
+
+ if !*list && !*write && !*doDiff {
+ _, err = out.Write(res)
+ }
+ }
+
+ return err
+}
+
+func findModules(file *parser.File) (modified bool, errs []error) {
+
+ for _, def := range file.Defs {
+ if module, ok := def.(*parser.Module); ok {
+ for _, prop := range module.Properties {
+ if prop.Name.Name == "name" && prop.Value.Type == parser.String {
+ if targetedModule(prop.Value.StringValue) {
+ m, newErrs := processModule(module, prop.Name.Name, file)
+ errs = append(errs, newErrs...)
+ modified = modified || m
+ }
+ }
+ }
+ }
+ }
+
+ return modified, errs
+}
+
+func processModule(module *parser.Module, moduleName string,
+ file *parser.File) (modified bool, errs []error) {
+
+ for _, prop := range module.Properties {
+ if prop.Name.Name == *parameter {
+ modified, errs = processParameter(&prop.Value, *parameter, moduleName, file)
+ return
+ }
+ }
+
+ return false, nil
+}
+
+func processParameter(value *parser.Value, paramName, moduleName string,
+ file *parser.File) (modified bool, errs []error) {
+ if value.Type != parser.List {
+ return false, []error{fmt.Errorf("expected parameter %s in module %s to be list, found %s",
+ paramName, moduleName, value.Type.String())}
+ }
+
+ if value.Variable != "" {
+ return false, []error{fmt.Errorf("parameter %s in module %s is a variable, unsupported",
+ paramName, moduleName)}
+ }
+
+ if value.Expression != nil {
+ return false, []error{fmt.Errorf("parameter %s in module %s is an expression, unsupported",
+ paramName, moduleName)}
+ }
+
+ wasSorted := parser.ListIsSorted(*value)
+
+ for _, a := range addIdents.idents {
+ m := parser.AddStringToList(value, a)
+ modified = modified || m
+ }
+
+ for _, r := range removeIdents.idents {
+ m := parser.RemoveStringFromList(value, r)
+ modified = modified || m
+ }
+
+ if (wasSorted || *sortLists) && modified {
+ parser.SortList(file, *value)
+ }
+
+ return modified, nil
+}
+
+func targetedModule(name string) bool {
+ if targetedModules.all {
+ return true
+ }
+ for _, m := range targetedModules.idents {
+ if m == name {
+ return true
+ }
+ }
+
+ return false
+}
+
+func visitFile(path string, f os.FileInfo, err error) error {
+ if err == nil && f.Name() == "Blueprints" {
+ err = processFile(path, nil, os.Stdout)
+ }
+ if err != nil {
+ report(err)
+ }
+ return nil
+}
+
+func walkDir(path string) {
+ filepath.Walk(path, visitFile)
+}
+
+func main() {
+ flag.Parse()
+
+ if flag.NArg() == 0 {
+ if *write {
+ fmt.Fprintln(os.Stderr, "error: cannot use -w with standard input")
+ exitCode = 2
+ return
+ }
+ if err := processFile("", os.Stdin, os.Stdout); err != nil {
+ report(err)
+ }
+ return
+ }
+
+ if len(targetedModules.idents) == 0 {
+ report(fmt.Errorf("-m parameter is required"))
+ return
+ }
+
+ if len(addIdents.idents) == 0 && len(removeIdents.idents) == 0 {
+ report(fmt.Errorf("-a or -r parameter is required"))
+ return
+ }
+
+ for i := 0; i < flag.NArg(); i++ {
+ path := flag.Arg(i)
+ switch dir, err := os.Stat(path); {
+ case err != nil:
+ report(err)
+ case dir.IsDir():
+ walkDir(path)
+ default:
+ if err := processFile(path, nil, os.Stdout); err != nil {
+ report(err)
+ }
+ }
+ }
+}
+
+func diff(b1, b2 []byte) (data []byte, err error) {
+ f1, err := ioutil.TempFile("", "bpfmt")
+ if err != nil {
+ return
+ }
+ defer os.Remove(f1.Name())
+ defer f1.Close()
+
+ f2, err := ioutil.TempFile("", "bpfmt")
+ if err != nil {
+ return
+ }
+ defer os.Remove(f2.Name())
+ defer f2.Close()
+
+ f1.Write(b1)
+ f2.Write(b2)
+
+ data, err = exec.Command("diff", "-uw", f1.Name(), f2.Name()).CombinedOutput()
+ if len(data) > 0 {
+ // diff exits with a non-zero status when the files don't match.
+ // Ignore that failure as long as we get output.
+ err = nil
+ }
+ return
+
+}
+
+type identSet struct {
+ idents []string
+ all bool
+}
+
+func (m *identSet) String() string {
+ return strings.Join(m.idents, ",")
+}
+
+func (m *identSet) Set(s string) error {
+ m.idents = strings.FieldsFunc(s, func(c rune) bool {
+ return unicode.IsSpace(c) || c == ','
+ })
+ if len(m.idents) == 1 && m.idents[0] == "*" {
+ m.all = true
+ }
+ return nil
+}
+
+func (m *identSet) Get() interface{} {
+ return m.idents
+}
diff --git a/blueprint/build.ninja.in b/blueprint/build.ninja.in
new file mode 100644
index 0000000000000000000000000000000000000000..9ba88c808c1094da0c1b1a34d81e3ab781f7ea13
--- /dev/null
+++ b/blueprint/build.ninja.in
@@ -0,0 +1,339 @@
+# ******************************************************************************
+# *** This file is generated and should not be edited ***
+# ******************************************************************************
+#
+# This file contains variables, rules, and pools with name prefixes indicating
+# they were generated by the following Go packages:
+#
+# bootstrap [from Go package github.com/google/blueprint/bootstrap]
+#
+ninja_required_version = 1.6.0
+
+g.bootstrap.buildDir = @@BuildDir@@
+
+g.bootstrap.BinDir = ${g.bootstrap.buildDir}/.bootstrap/bin
+
+g.bootstrap.bootstrapCmd = @@Bootstrap@@
+
+g.bootstrap.bootstrapManifest = @@BootstrapManifest@@
+
+g.bootstrap.chooseStageCmd = ${g.bootstrap.buildDir}/.bootstrap/bin/choosestage
+
+g.bootstrap.compileCmd = @@GoCompile@@
+
+g.bootstrap.goRoot = @@GoRoot@@
+
+g.bootstrap.linkCmd = @@GoLink@@
+
+g.bootstrap.srcDir = @@SrcDir@@
+
+builddir = ${g.bootstrap.buildDir}/.minibootstrap
+
+rule g.bootstrap.bootstrap
+ command = BUILDDIR=${g.bootstrap.buildDir} ${g.bootstrap.bootstrapCmd} -i ${in}
+ description = bootstrap ${in}
+ generator = true
+
+rule g.bootstrap.chooseStage
+ command = ${g.bootstrap.chooseStageCmd} --current ${current} --bootstrap ${g.bootstrap.bootstrapManifest} -o ${out} ${in}
+ description = choosing next stage
+
+rule g.bootstrap.compile
+ command = GOROOT='${g.bootstrap.goRoot}' ${g.bootstrap.compileCmd} -o ${out} -p ${pkgPath} -complete ${incFlags} -pack ${in}
+ description = compile ${out}
+
+rule g.bootstrap.cp
+ command = cp ${in} ${out}
+ description = cp ${out}
+
+rule g.bootstrap.link
+ command = GOROOT='${g.bootstrap.goRoot}' ${g.bootstrap.linkCmd} -o ${out} ${libDirFlags} ${in}
+ description = link ${out}
+
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+# Module: blueprint
+# Variant:
+# Type: bootstrap_go_package
+# Factory: github.com/google/blueprint/bootstrap.newGoPackageModuleFactory.func1
+# Defined: Blueprints:1:1
+
+build $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint/pkg/github.com/google/blueprint.a $
+ : g.bootstrap.compile ${g.bootstrap.srcDir}/context.go $
+ ${g.bootstrap.srcDir}/live_tracker.go ${g.bootstrap.srcDir}/mangle.go $
+ ${g.bootstrap.srcDir}/module_ctx.go $
+ ${g.bootstrap.srcDir}/ninja_defs.go $
+ ${g.bootstrap.srcDir}/ninja_strings.go $
+ ${g.bootstrap.srcDir}/ninja_writer.go $
+ ${g.bootstrap.srcDir}/package_ctx.go ${g.bootstrap.srcDir}/scope.go $
+ ${g.bootstrap.srcDir}/singleton_ctx.go ${g.bootstrap.srcDir}/unpack.go $
+ | ${g.bootstrap.compileCmd} $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-parser/pkg/github.com/google/blueprint/parser.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-pathtools/pkg/github.com/google/blueprint/pathtools.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-proptools/pkg/github.com/google/blueprint/proptools.a
+ incFlags = -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-parser/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-pathtools/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-proptools/pkg
+ pkgPath = github.com/google/blueprint
+default $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint/pkg/github.com/google/blueprint.a
+
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+# Module: blueprint-bootstrap
+# Variant:
+# Type: bootstrap_go_package
+# Factory: github.com/google/blueprint/bootstrap.newGoPackageModuleFactory.func1
+# Defined: Blueprints:80:1
+
+build $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-bootstrap/pkg/github.com/google/blueprint/bootstrap.a $
+ : g.bootstrap.compile ${g.bootstrap.srcDir}/bootstrap/bootstrap.go $
+ ${g.bootstrap.srcDir}/bootstrap/cleanup.go $
+ ${g.bootstrap.srcDir}/bootstrap/command.go $
+ ${g.bootstrap.srcDir}/bootstrap/config.go $
+ ${g.bootstrap.srcDir}/bootstrap/doc.go $
+ ${g.bootstrap.srcDir}/bootstrap/writedocs.go | $
+ ${g.bootstrap.compileCmd} $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-parser/pkg/github.com/google/blueprint/parser.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-pathtools/pkg/github.com/google/blueprint/pathtools.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-proptools/pkg/github.com/google/blueprint/proptools.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint/pkg/github.com/google/blueprint.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-deptools/pkg/github.com/google/blueprint/deptools.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-bootstrap-bpdoc/pkg/github.com/google/blueprint/bootstrap/bpdoc.a
+ incFlags = -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-parser/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-pathtools/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-proptools/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-deptools/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-bootstrap-bpdoc/pkg
+ pkgPath = github.com/google/blueprint/bootstrap
+default $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-bootstrap/pkg/github.com/google/blueprint/bootstrap.a
+
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+# Module: blueprint-bootstrap-bpdoc
+# Variant:
+# Type: bootstrap_go_package
+# Factory: github.com/google/blueprint/bootstrap.newGoPackageModuleFactory.func1
+# Defined: Blueprints:99:1
+
+build $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-bootstrap-bpdoc/pkg/github.com/google/blueprint/bootstrap/bpdoc.a $
+ : g.bootstrap.compile ${g.bootstrap.srcDir}/bootstrap/bpdoc/bpdoc.go | $
+ ${g.bootstrap.compileCmd} $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-parser/pkg/github.com/google/blueprint/parser.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-pathtools/pkg/github.com/google/blueprint/pathtools.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-proptools/pkg/github.com/google/blueprint/proptools.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint/pkg/github.com/google/blueprint.a
+ incFlags = -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-parser/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-pathtools/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-proptools/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint/pkg
+ pkgPath = github.com/google/blueprint/bootstrap/bpdoc
+default $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-bootstrap-bpdoc/pkg/github.com/google/blueprint/bootstrap/bpdoc.a
+
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+# Module: blueprint-deptools
+# Variant:
+# Type: bootstrap_go_package
+# Factory: github.com/google/blueprint/bootstrap.newGoPackageModuleFactory.func1
+# Defined: Blueprints:46:1
+
+build $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-deptools/pkg/github.com/google/blueprint/deptools.a $
+ : g.bootstrap.compile ${g.bootstrap.srcDir}/deptools/depfile.go | $
+ ${g.bootstrap.compileCmd}
+ pkgPath = github.com/google/blueprint/deptools
+default $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-deptools/pkg/github.com/google/blueprint/deptools.a
+
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+# Module: blueprint-parser
+# Variant:
+# Type: bootstrap_go_package
+# Factory: github.com/google/blueprint/bootstrap.newGoPackageModuleFactory.func1
+# Defined: Blueprints:31:1
+
+build $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-parser/pkg/github.com/google/blueprint/parser.a $
+ : g.bootstrap.compile ${g.bootstrap.srcDir}/parser/modify.go $
+ ${g.bootstrap.srcDir}/parser/parser.go $
+ ${g.bootstrap.srcDir}/parser/printer.go $
+ ${g.bootstrap.srcDir}/parser/sort.go | ${g.bootstrap.compileCmd}
+ pkgPath = github.com/google/blueprint/parser
+default $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-parser/pkg/github.com/google/blueprint/parser.a
+
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+# Module: blueprint-pathtools
+# Variant:
+# Type: bootstrap_go_package
+# Factory: github.com/google/blueprint/bootstrap.newGoPackageModuleFactory.func1
+# Defined: Blueprints:52:1
+
+build $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-pathtools/pkg/github.com/google/blueprint/pathtools.a $
+ : g.bootstrap.compile ${g.bootstrap.srcDir}/pathtools/lists.go $
+ ${g.bootstrap.srcDir}/pathtools/glob.go | ${g.bootstrap.compileCmd}
+ pkgPath = github.com/google/blueprint/pathtools
+default $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-pathtools/pkg/github.com/google/blueprint/pathtools.a
+
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+# Module: blueprint-proptools
+# Variant:
+# Type: bootstrap_go_package
+# Factory: github.com/google/blueprint/bootstrap.newGoPackageModuleFactory.func1
+# Defined: Blueprints:64:1
+
+build $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-proptools/pkg/github.com/google/blueprint/proptools.a $
+ : g.bootstrap.compile ${g.bootstrap.srcDir}/proptools/clone.go $
+ ${g.bootstrap.srcDir}/proptools/extend.go $
+ ${g.bootstrap.srcDir}/proptools/proptools.go $
+ ${g.bootstrap.srcDir}/proptools/typeequal.go | $
+ ${g.bootstrap.compileCmd}
+ pkgPath = github.com/google/blueprint/proptools
+default $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-proptools/pkg/github.com/google/blueprint/proptools.a
+
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+# Module: choosestage
+# Variant:
+# Type: bootstrap_core_go_binary
+# Factory: github.com/google/blueprint/bootstrap.newGoBinaryModuleFactory.func1
+# Defined: Blueprints:142:1
+
+build ${g.bootstrap.buildDir}/.bootstrap/choosestage/obj/choosestage.a: $
+ g.bootstrap.compile ${g.bootstrap.srcDir}/choosestage/choosestage.go | $
+ ${g.bootstrap.compileCmd}
+ pkgPath = choosestage
+default ${g.bootstrap.buildDir}/.bootstrap/choosestage/obj/choosestage.a
+
+build ${g.bootstrap.buildDir}/.bootstrap/choosestage/obj/a.out: $
+ g.bootstrap.link $
+ ${g.bootstrap.buildDir}/.bootstrap/choosestage/obj/choosestage.a | $
+ ${g.bootstrap.linkCmd}
+default ${g.bootstrap.buildDir}/.bootstrap/choosestage/obj/a.out
+
+build ${g.bootstrap.BinDir}/choosestage: g.bootstrap.cp $
+ ${g.bootstrap.buildDir}/.bootstrap/choosestage/obj/a.out
+default ${g.bootstrap.BinDir}/choosestage
+
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+# Module: gotestmain
+# Variant:
+# Type: bootstrap_core_go_binary
+# Factory: github.com/google/blueprint/bootstrap.newGoBinaryModuleFactory.func1
+# Defined: Blueprints:132:1
+
+build ${g.bootstrap.buildDir}/.bootstrap/gotestmain/obj/gotestmain.a: $
+ g.bootstrap.compile ${g.bootstrap.srcDir}/gotestmain/gotestmain.go | $
+ ${g.bootstrap.compileCmd}
+ pkgPath = gotestmain
+default ${g.bootstrap.buildDir}/.bootstrap/gotestmain/obj/gotestmain.a
+
+build ${g.bootstrap.buildDir}/.bootstrap/gotestmain/obj/a.out: $
+ g.bootstrap.link $
+ ${g.bootstrap.buildDir}/.bootstrap/gotestmain/obj/gotestmain.a | $
+ ${g.bootstrap.linkCmd}
+default ${g.bootstrap.buildDir}/.bootstrap/gotestmain/obj/a.out
+
+build ${g.bootstrap.BinDir}/gotestmain: g.bootstrap.cp $
+ ${g.bootstrap.buildDir}/.bootstrap/gotestmain/obj/a.out
+default ${g.bootstrap.BinDir}/gotestmain
+
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+# Module: gotestrunner
+# Variant:
+# Type: bootstrap_core_go_binary
+# Factory: github.com/google/blueprint/bootstrap.newGoBinaryModuleFactory.func1
+# Defined: Blueprints:137:1
+
+build ${g.bootstrap.buildDir}/.bootstrap/gotestrunner/obj/gotestrunner.a: $
+ g.bootstrap.compile ${g.bootstrap.srcDir}/gotestrunner/gotestrunner.go $
+ | ${g.bootstrap.compileCmd}
+ pkgPath = gotestrunner
+default ${g.bootstrap.buildDir}/.bootstrap/gotestrunner/obj/gotestrunner.a
+
+build ${g.bootstrap.buildDir}/.bootstrap/gotestrunner/obj/a.out: $
+ g.bootstrap.link $
+ ${g.bootstrap.buildDir}/.bootstrap/gotestrunner/obj/gotestrunner.a | $
+ ${g.bootstrap.linkCmd}
+default ${g.bootstrap.buildDir}/.bootstrap/gotestrunner/obj/a.out
+
+build ${g.bootstrap.BinDir}/gotestrunner: g.bootstrap.cp $
+ ${g.bootstrap.buildDir}/.bootstrap/gotestrunner/obj/a.out
+default ${g.bootstrap.BinDir}/gotestrunner
+
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+# Module: minibp
+# Variant:
+# Type: bootstrap_core_go_binary
+# Factory: github.com/google/blueprint/bootstrap.newGoBinaryModuleFactory.func1
+# Defined: Blueprints:111:1
+
+build ${g.bootstrap.buildDir}/.bootstrap/minibp/obj/minibp.a: $
+ g.bootstrap.compile ${g.bootstrap.srcDir}/bootstrap/minibp/main.go | $
+ ${g.bootstrap.compileCmd} $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-parser/pkg/github.com/google/blueprint/parser.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-pathtools/pkg/github.com/google/blueprint/pathtools.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-proptools/pkg/github.com/google/blueprint/proptools.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint/pkg/github.com/google/blueprint.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-deptools/pkg/github.com/google/blueprint/deptools.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-bootstrap-bpdoc/pkg/github.com/google/blueprint/bootstrap/bpdoc.a $
+ ${g.bootstrap.buildDir}/.bootstrap/blueprint-bootstrap/pkg/github.com/google/blueprint/bootstrap.a
+ incFlags = -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-parser/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-pathtools/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-proptools/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-deptools/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-bootstrap-bpdoc/pkg -I ${g.bootstrap.buildDir}/.bootstrap/blueprint-bootstrap/pkg
+ pkgPath = minibp
+default ${g.bootstrap.buildDir}/.bootstrap/minibp/obj/minibp.a
+
+build ${g.bootstrap.buildDir}/.bootstrap/minibp/obj/a.out: g.bootstrap.link $
+ ${g.bootstrap.buildDir}/.bootstrap/minibp/obj/minibp.a | $
+ ${g.bootstrap.linkCmd}
+ libDirFlags = -L ${g.bootstrap.buildDir}/.bootstrap/blueprint-parser/pkg -L ${g.bootstrap.buildDir}/.bootstrap/blueprint-pathtools/pkg -L ${g.bootstrap.buildDir}/.bootstrap/blueprint-proptools/pkg -L ${g.bootstrap.buildDir}/.bootstrap/blueprint/pkg -L ${g.bootstrap.buildDir}/.bootstrap/blueprint-deptools/pkg -L ${g.bootstrap.buildDir}/.bootstrap/blueprint-bootstrap-bpdoc/pkg -L ${g.bootstrap.buildDir}/.bootstrap/blueprint-bootstrap/pkg
+default ${g.bootstrap.buildDir}/.bootstrap/minibp/obj/a.out
+
+build ${g.bootstrap.BinDir}/minibp: g.bootstrap.cp $
+ ${g.bootstrap.buildDir}/.bootstrap/minibp/obj/a.out
+default ${g.bootstrap.BinDir}/minibp
+
+# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+# Singleton: bootstrap
+# Factory: github.com/google/blueprint/bootstrap.newSingletonFactory.func1
+
+rule s.bootstrap.primarybp
+ command = ${g.bootstrap.BinDir}/minibp --build-primary ${runTests} -m ${g.bootstrap.bootstrapManifest} --timestamp ${timestamp} --timestampdep ${timestampdep} -b ${g.bootstrap.buildDir} -d ${outfile}.d -o ${outfile} ${in}
+ depfile = ${outfile}.d
+ description = minibp ${outfile}
+
+rule s.bootstrap.minibp
+ command = ${g.bootstrap.BinDir}/minibp ${runTests} -m ${g.bootstrap.bootstrapManifest} -b ${g.bootstrap.buildDir} -d ${out}.d -o ${out} ${in}
+ depfile = ${out}.d
+ description = minibp ${out}
+ generator = true
+
+build ${g.bootstrap.buildDir}/.bootstrap/primary.ninja.in $
+ ${g.bootstrap.buildDir}/.bootstrap/primary.ninja.in.timestamp: $
+ s.bootstrap.primarybp ${g.bootstrap.srcDir}/Blueprints | $
+ ${g.bootstrap.BinDir}/choosestage ${g.bootstrap.BinDir}/gotestmain $
+ ${g.bootstrap.BinDir}/gotestrunner ${g.bootstrap.BinDir}/minibp $
+ ${g.bootstrap.srcDir}/Blueprints
+ outfile = ${g.bootstrap.buildDir}/.bootstrap/primary.ninja.in
+ timestamp = ${g.bootstrap.buildDir}/.bootstrap/primary.ninja.in.timestamp
+ timestampdep = ${g.bootstrap.buildDir}/.bootstrap/primary.ninja.in.timestamp.d
+default ${g.bootstrap.buildDir}/.bootstrap/primary.ninja.in $
+ ${g.bootstrap.buildDir}/.bootstrap/primary.ninja.in.timestamp
+
+build ${g.bootstrap.buildDir}/.bootstrap/bootstrap.ninja.in: $
+ s.bootstrap.minibp ${g.bootstrap.srcDir}/Blueprints | $
+ ${g.bootstrap.bootstrapManifest} ${g.bootstrap.BinDir}/minibp
+default ${g.bootstrap.buildDir}/.bootstrap/bootstrap.ninja.in
+
+build ${g.bootstrap.buildDir}/.bootstrap/notAFile: phony
+default ${g.bootstrap.buildDir}/.bootstrap/notAFile
+
+build ${g.bootstrap.buildDir}/.bootstrap/build.ninja.in: $
+ g.bootstrap.chooseStage $
+ ${g.bootstrap.buildDir}/.bootstrap/bootstrap.ninja.in $
+ ${g.bootstrap.buildDir}/.bootstrap/primary.ninja.in | $
+ ${g.bootstrap.chooseStageCmd} ${g.bootstrap.bootstrapManifest} $
+ ${g.bootstrap.buildDir}/.bootstrap/notAFile
+ current = ${g.bootstrap.buildDir}/.bootstrap/bootstrap.ninja.in
+default ${g.bootstrap.buildDir}/.bootstrap/build.ninja.in
+
+build ${g.bootstrap.buildDir}/build.ninja: g.bootstrap.bootstrap $
+ ${g.bootstrap.buildDir}/.bootstrap/build.ninja.in | $
+ ${g.bootstrap.bootstrapCmd}
+default ${g.bootstrap.buildDir}/build.ninja
+
diff --git a/blueprint/choosestage/choosestage.go b/blueprint/choosestage/choosestage.go
new file mode 100644
index 0000000000000000000000000000000000000000..c3d027336178131a873f6ef6c49d71036c604d50
--- /dev/null
+++ b/blueprint/choosestage/choosestage.go
@@ -0,0 +1,194 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Choose which ninja file (stage) to run next
+//
+// In the common case, this program takes a list of ninja files, compares their
+// mtimes against their $file.timestamp mtimes, and picks the last up to date
+// ninja file to output. That stage is expected to rebuild the next file in the
+// list and call this program again. If none of the ninja files are considered
+// dirty, the last stage is output.
+//
+// One exception is if the current stage's ninja file was rewritten, it will be
+// run again.
+//
+// Another exception is if the source bootstrap file has been updated more
+// recently than the first stage, the source file will be copied to the first
+// stage, and output. This would be expected with a new source drop via git.
+// The timestamp of the first file is not updated so that it can be regenerated
+// with any local changes.
+
+package choosestage
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+var (
+ outputFile string
+ currentFile string
+ bootstrapFile string
+ verbose bool
+)
+
+func init() {
+ flag.StringVar(&outputFile, "o", "", "Output file")
+ flag.StringVar(¤tFile, "current", "", "Current stage's file")
+ flag.StringVar(&bootstrapFile, "bootstrap", "", "Bootstrap file checked into source")
+ flag.BoolVar(&verbose, "v", false, "Verbose mode")
+}
+
+func compareFiles(a, b string) (bool, error) {
+ aData, err := ioutil.ReadFile(a)
+ if err != nil {
+ return false, err
+ }
+
+ bData, err := ioutil.ReadFile(b)
+ if err != nil {
+ return false, err
+ }
+
+ return bytes.Equal(aData, bData), nil
+}
+
+// If the source bootstrap reference file is newer, then we may have gotten
+// other source updates too. So we need to restart everything with the file
+// that was checked in instead of the bootstrap that we last built.
+func copyBootstrapIfNecessary(bootstrapFile, filename string) (bool, error) {
+ if bootstrapFile == "" {
+ return false, nil
+ }
+
+ bootstrapStat, err := os.Stat(bootstrapFile)
+ if err != nil {
+ return false, err
+ }
+
+ fileStat, err := os.Stat(filename)
+ if err != nil {
+ return false, err
+ }
+
+ time := fileStat.ModTime()
+ if !bootstrapStat.ModTime().After(time) {
+ return false, nil
+ }
+
+ fmt.Printf("Newer source version of %s. Copying to %s\n", filepath.Base(bootstrapFile), filepath.Base(filename))
+ if verbose {
+ fmt.Printf("Source: %s\nBuilt: %s\n", bootstrapStat.ModTime(), time)
+ }
+
+ data, err := ioutil.ReadFile(bootstrapFile)
+ if err != nil {
+ return false, err
+ }
+
+ err = ioutil.WriteFile(filename, data, 0666)
+ if err != nil {
+ return false, err
+ }
+
+ // Restore timestamp to force regeneration of the bootstrap.ninja.in
+ err = os.Chtimes(filename, time, time)
+ return true, err
+}
+
+func main() {
+ flag.Parse()
+
+ if flag.NArg() == 0 {
+ fmt.Fprintf(os.Stderr, "Must specify at least one ninja file\n")
+ os.Exit(1)
+ }
+
+ if outputFile == "" {
+ fmt.Fprintf(os.Stderr, "Must specify an output file\n")
+ os.Exit(1)
+ }
+
+ gotoFile := flag.Arg(0)
+ if copied, err := copyBootstrapIfNecessary(bootstrapFile, flag.Arg(0)); err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to copy bootstrap ninja file: %s\n", err)
+ os.Exit(1)
+ } else if !copied {
+ for _, fileName := range flag.Args() {
+ timestampName := fileName + ".timestamp"
+
+ // If we're currently running this stage, and the build.ninja.in
+ // file differs from the current stage file, then it has been rebuilt.
+ // Restart the stage.
+ if filepath.Clean(currentFile) == filepath.Clean(fileName) {
+ if _, err := os.Stat(outputFile); !os.IsNotExist(err) {
+ if ok, err := compareFiles(fileName, outputFile); err != nil {
+ fmt.Fprintf(os.Stderr, "Failure when comparing files: %s\n", err)
+ os.Exit(1)
+ } else if !ok {
+ fmt.Printf("Stage %s has changed, restarting\n", filepath.Base(fileName))
+ gotoFile = fileName
+ break
+ }
+ }
+ }
+
+ fileStat, err := os.Stat(fileName)
+ if err != nil {
+ // Regenerate this stage on error
+ break
+ }
+
+ timestampStat, err := os.Stat(timestampName)
+ if err != nil {
+ // This file may not exist. There's no point for
+ // the first stage to have one, as it should be
+ // a subset of the second stage dependencies,
+ // and both will return to the first stage.
+ continue
+ }
+
+ if verbose {
+ fmt.Printf("For %s:\n file: %s\n time: %s\n", fileName, fileStat.ModTime(), timestampStat.ModTime())
+ }
+
+ // If the timestamp file has a later modification time, that
+ // means that this stage needs to be regenerated. Break, so
+ // that we run the last found stage.
+ if timestampStat.ModTime().After(fileStat.ModTime()) {
+ break
+ }
+
+ gotoFile = fileName
+ }
+ }
+
+ fmt.Printf("Choosing %s for next stage\n", filepath.Base(gotoFile))
+
+ data, err := ioutil.ReadFile(gotoFile)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Can't read file: %s", err)
+ os.Exit(1)
+ }
+
+ err = ioutil.WriteFile(outputFile, data, 0666)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Can't write file: %s", err)
+ os.Exit(1)
+ }
+}
diff --git a/blueprint/context.go b/blueprint/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..30dc451d34c70872cbc992db5e8f7cc40a4dbd6a
--- /dev/null
+++ b/blueprint/context.go
@@ -0,0 +1,2952 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync/atomic"
+ "text/scanner"
+ "text/template"
+
+ "github.com/google/blueprint/parser"
+ "github.com/google/blueprint/pathtools"
+ "github.com/google/blueprint/proptools"
+)
+
+var ErrBuildActionsNotReady = errors.New("build actions are not ready")
+
+const maxErrors = 10
+
+// A Context contains all the state needed to parse a set of Blueprints files
+// and generate a Ninja file. The process of generating a Ninja file proceeds
+// through a series of four phases. Each phase corresponds with a some methods
+// on the Context object
+//
+// Phase Methods
+// ------------ -------------------------------------------
+// 1. Registration RegisterModuleType, RegisterSingletonType
+//
+// 2. Parse ParseBlueprintsFiles, Parse
+//
+// 3. Generate ResolveDependencies, PrepareBuildActions
+//
+// 4. Write WriteBuildFile
+//
+// The registration phase prepares the context to process Blueprints files
+// containing various types of modules. The parse phase reads in one or more
+// Blueprints files and validates their contents against the module types that
+// have been registered. The generate phase then analyzes the parsed Blueprints
+// contents to create an internal representation for the build actions that must
+// be performed. This phase also performs validation of the module dependencies
+// and property values defined in the parsed Blueprints files. Finally, the
+// write phase generates the Ninja manifest text based on the generated build
+// actions.
+type Context struct {
+ // set at instantiation
+ moduleFactories map[string]ModuleFactory
+ moduleGroups map[string]*moduleGroup
+ moduleInfo map[Module]*moduleInfo
+ modulesSorted []*moduleInfo
+ singletonInfo []*singletonInfo
+ mutatorInfo []*mutatorInfo
+ earlyMutatorInfo []*earlyMutatorInfo
+ variantMutatorNames []string
+ moduleNinjaNames map[string]*moduleGroup
+
+ dependenciesReady bool // set to true on a successful ResolveDependencies
+ buildActionsReady bool // set to true on a successful PrepareBuildActions
+
+ // set by SetIgnoreUnknownModuleTypes
+ ignoreUnknownModuleTypes bool
+
+ // set by SetAllowMissingDependencies
+ allowMissingDependencies bool
+
+ // set during PrepareBuildActions
+ pkgNames map[*packageContext]string
+ globalVariables map[Variable]*ninjaString
+ globalPools map[Pool]*poolDef
+ globalRules map[Rule]*ruleDef
+
+ // set during PrepareBuildActions
+ ninjaBuildDir *ninjaString // The builddir special Ninja variable
+ requiredNinjaMajor int // For the ninja_required_version variable
+ requiredNinjaMinor int // For the ninja_required_version variable
+ requiredNinjaMicro int // For the ninja_required_version variable
+
+ // set lazily by sortedModuleNames
+ cachedSortedModuleNames []string
+}
+
+// An Error describes a problem that was encountered that is related to a
+// particular location in a Blueprints file.
+type Error struct {
+ Err error // the error that occurred
+ Pos scanner.Position // the relevant Blueprints file location
+}
+
+type localBuildActions struct {
+ variables []*localVariable
+ rules []*localRule
+ buildDefs []*buildDef
+}
+
+type moduleGroup struct {
+ name string
+ ninjaName string
+
+ modules []*moduleInfo
+}
+
+type moduleInfo struct {
+ // set during Parse
+ typeName string
+ relBlueprintsFile string
+ pos scanner.Position
+ propertyPos map[string]scanner.Position
+ properties struct {
+ Name string
+ Deps []string
+ }
+
+ variantName string
+ variant variationMap
+ dependencyVariant variationMap
+
+ logicModule Module
+ group *moduleGroup
+ moduleProperties []interface{}
+
+ // set during ResolveDependencies
+ directDeps []*moduleInfo
+ missingDeps []string
+
+ // set during updateDependencies
+ reverseDeps []*moduleInfo
+ depsCount int
+
+ // used by parallelVisitAllBottomUp
+ waitingCount int
+
+ // set during each runMutator
+ splitModules []*moduleInfo
+
+ // set during PrepareBuildActions
+ actionDefs localBuildActions
+}
+
+func (module *moduleInfo) String() string {
+ s := fmt.Sprintf("module %q", module.properties.Name)
+ if module.variantName != "" {
+ s += fmt.Sprintf(" variant %q", module.variantName)
+ }
+ return s
+}
+
+// A Variation is a way that a variant of a module differs from other variants of the same module.
+// For example, two variants of the same module might have Variation{"arch","arm"} and
+// Variation{"arch","arm64"}
+type Variation struct {
+ // Mutator is the axis on which this variation applies, i.e. "arch" or "link"
+ Mutator string
+ // Variation is the name of the variation on the axis, i.e. "arm" or "arm64" for arch, or
+ // "shared" or "static" for link.
+ Variation string
+}
+
+// A variationMap stores a map of Mutator to Variation to specify a variant of a module.
+type variationMap map[string]string
+
+func (vm variationMap) clone() variationMap {
+ newVm := make(variationMap)
+ for k, v := range vm {
+ newVm[k] = v
+ }
+
+ return newVm
+}
+
+// Compare this variationMap to another one. Returns true if the every entry in this map
+// is either the same in the other map or doesn't exist in the other map.
+func (vm variationMap) subset(other variationMap) bool {
+ for k, v1 := range vm {
+ if v2, ok := other[k]; ok && v1 != v2 {
+ return false
+ }
+ }
+ return true
+}
+
+func (vm variationMap) equal(other variationMap) bool {
+ return reflect.DeepEqual(vm, other)
+}
+
+type singletonInfo struct {
+ // set during RegisterSingletonType
+ factory SingletonFactory
+ singleton Singleton
+ name string
+
+ // set during PrepareBuildActions
+ actionDefs localBuildActions
+}
+
+type mutatorInfo struct {
+ // set during RegisterMutator
+ topDownMutator TopDownMutator
+ bottomUpMutator BottomUpMutator
+ name string
+}
+
+type earlyMutatorInfo struct {
+ // set during RegisterEarlyMutator
+ mutator EarlyMutator
+ name string
+}
+
+func (e *Error) Error() string {
+
+ return fmt.Sprintf("%s: %s", e.Pos, e.Err)
+}
+
+// NewContext creates a new Context object. The created context initially has
+// no module or singleton factories registered, so the RegisterModuleFactory and
+// RegisterSingletonFactory methods must be called before it can do anything
+// useful.
+func NewContext() *Context {
+ ctx := &Context{
+ moduleFactories: make(map[string]ModuleFactory),
+ moduleGroups: make(map[string]*moduleGroup),
+ moduleInfo: make(map[Module]*moduleInfo),
+ moduleNinjaNames: make(map[string]*moduleGroup),
+ }
+
+ ctx.RegisterBottomUpMutator("blueprint_deps", blueprintDepsMutator)
+
+ return ctx
+}
+
+// A ModuleFactory function creates a new Module object. See the
+// Context.RegisterModuleType method for details about how a registered
+// ModuleFactory is used by a Context.
+type ModuleFactory func() (m Module, propertyStructs []interface{})
+
+// RegisterModuleType associates a module type name (which can appear in a
+// Blueprints file) with a Module factory function. When the given module type
+// name is encountered in a Blueprints file during parsing, the Module factory
+// is invoked to instantiate a new Module object to handle the build action
+// generation for the module. If a Mutator splits a module into multiple variants,
+// the factory is invoked again to create a new Module for each variant.
+//
+// The module type names given here must be unique for the context. The factory
+// function should be a named function so that its package and name can be
+// included in the generated Ninja file for debugging purposes.
+//
+// The factory function returns two values. The first is the newly created
+// Module object. The second is a slice of pointers to that Module object's
+// properties structs. Each properties struct is examined when parsing a module
+// definition of this type in a Blueprints file. Exported fields of the
+// properties structs are automatically set to the property values specified in
+// the Blueprints file. The properties struct field names determine the name of
+// the Blueprints file properties that are used - the Blueprints property name
+// matches that of the properties struct field name with the first letter
+// converted to lower-case.
+//
+// The fields of the properties struct must be either []string, a string, or
+// bool. The Context will panic if a Module gets instantiated with a properties
+// struct containing a field that is not one these supported types.
+//
+// Any properties that appear in the Blueprints files that are not built-in
+// module properties (such as "name" and "deps") and do not have a corresponding
+// field in the returned module properties struct result in an error during the
+// Context's parse phase.
+//
+// As an example, the follow code:
+//
+// type myModule struct {
+// properties struct {
+// Foo string
+// Bar []string
+// }
+// }
+//
+// func NewMyModule() (blueprint.Module, []interface{}) {
+// module := new(myModule)
+// properties := &module.properties
+// return module, []interface{}{properties}
+// }
+//
+// func main() {
+// ctx := blueprint.NewContext()
+// ctx.RegisterModuleType("my_module", NewMyModule)
+// // ...
+// }
+//
+// would support parsing a module defined in a Blueprints file as follows:
+//
+// my_module {
+// name: "myName",
+// foo: "my foo string",
+// bar: ["my", "bar", "strings"],
+// }
+//
+// The factory function may be called from multiple goroutines. Any accesses
+// to global variables must be synchronized.
+func (c *Context) RegisterModuleType(name string, factory ModuleFactory) {
+ if _, present := c.moduleFactories[name]; present {
+ panic(errors.New("module type name is already registered"))
+ }
+ c.moduleFactories[name] = factory
+}
+
+// A SingletonFactory function creates a new Singleton object. See the
+// Context.RegisterSingletonType method for details about how a registered
+// SingletonFactory is used by a Context.
+type SingletonFactory func() Singleton
+
+// RegisterSingletonType registers a singleton type that will be invoked to
+// generate build actions. Each registered singleton type is instantiated and
+// and invoked exactly once as part of the generate phase. Each registered
+// singleton is invoked in registration order.
+//
+// The singleton type names given here must be unique for the context. The
+// factory function should be a named function so that its package and name can
+// be included in the generated Ninja file for debugging purposes.
+func (c *Context) RegisterSingletonType(name string, factory SingletonFactory) {
+ for _, s := range c.singletonInfo {
+ if s.name == name {
+ panic(errors.New("singleton name is already registered"))
+ }
+ }
+
+ c.singletonInfo = append(c.singletonInfo, &singletonInfo{
+ factory: factory,
+ singleton: factory(),
+ name: name,
+ })
+}
+
+func singletonPkgPath(singleton Singleton) string {
+ typ := reflect.TypeOf(singleton)
+ for typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ }
+ return typ.PkgPath()
+}
+
+func singletonTypeName(singleton Singleton) string {
+ typ := reflect.TypeOf(singleton)
+ for typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ }
+ return typ.PkgPath() + "." + typ.Name()
+}
+
+// RegisterTopDownMutator registers a mutator that will be invoked to propagate
+// dependency info top-down between Modules. Each registered mutator
+// is invoked in registration order (mixing TopDownMutators and BottomUpMutators)
+// once per Module, and is invoked on a module before being invoked on any of its
+// dependencies.
+//
+// The mutator type names given here must be unique to all top down mutators in
+// the Context.
+func (c *Context) RegisterTopDownMutator(name string, mutator TopDownMutator) {
+ for _, m := range c.mutatorInfo {
+ if m.name == name && m.topDownMutator != nil {
+ panic(fmt.Errorf("mutator name %s is already registered", name))
+ }
+ }
+
+ c.mutatorInfo = append(c.mutatorInfo, &mutatorInfo{
+ topDownMutator: mutator,
+ name: name,
+ })
+}
+
+// RegisterBottomUpMutator registers a mutator that will be invoked to split
+// Modules into variants. Each registered mutator is invoked in registration
+// order (mixing TopDownMutators and BottomUpMutators) once per Module, and is
+// invoked on dependencies before being invoked on dependers.
+//
+// The mutator type names given here must be unique to all bottom up or early
+// mutators in the Context.
+func (c *Context) RegisterBottomUpMutator(name string, mutator BottomUpMutator) {
+ for _, m := range c.variantMutatorNames {
+ if m == name {
+ panic(fmt.Errorf("mutator name %s is already registered", name))
+ }
+ }
+
+ c.mutatorInfo = append(c.mutatorInfo, &mutatorInfo{
+ bottomUpMutator: mutator,
+ name: name,
+ })
+
+ c.variantMutatorNames = append(c.variantMutatorNames, name)
+}
+
+// RegisterEarlyMutator registers a mutator that will be invoked to split
+// Modules into multiple variant Modules before any dependencies have been
+// created. Each registered mutator is invoked in registration order once
+// per Module (including each variant from previous early mutators). Module
+// order is unpredictable.
+//
+// In order for dependencies to be satisifed in a later pass, all dependencies
+// of a module either must have an identical variant or must have no variations.
+//
+// The mutator type names given here must be unique to all bottom up or early
+// mutators in the Context.
+//
+// Deprecated, use a BottomUpMutator instead. The only difference between
+// EarlyMutator and BottomUpMutator is that EarlyMutator runs before the
+// deprecated DynamicDependencies.
+func (c *Context) RegisterEarlyMutator(name string, mutator EarlyMutator) {
+ for _, m := range c.variantMutatorNames {
+ if m == name {
+ panic(fmt.Errorf("mutator name %s is already registered", name))
+ }
+ }
+
+ c.earlyMutatorInfo = append(c.earlyMutatorInfo, &earlyMutatorInfo{
+ mutator: mutator,
+ name: name,
+ })
+
+ c.variantMutatorNames = append(c.variantMutatorNames, name)
+}
+
+// SetIgnoreUnknownModuleTypes sets the behavior of the context in the case
+// where it encounters an unknown module type while parsing Blueprints files. By
+// default, the context will report unknown module types as an error. If this
+// method is called with ignoreUnknownModuleTypes set to true then the context
+// will silently ignore unknown module types.
+//
+// This method should generally not be used. It exists to facilitate the
+// bootstrapping process.
+func (c *Context) SetIgnoreUnknownModuleTypes(ignoreUnknownModuleTypes bool) {
+ c.ignoreUnknownModuleTypes = ignoreUnknownModuleTypes
+}
+
+// SetAllowMissingDependencies changes the behavior of Blueprint to ignore
+// unresolved dependencies. If the module's GenerateBuildActions calls
+// ModuleContext.GetMissingDependencies Blueprint will not emit any errors
+// for missing dependencies.
+func (c *Context) SetAllowMissingDependencies(allowMissingDependencies bool) {
+ c.allowMissingDependencies = allowMissingDependencies
+}
+
+// Parse parses a single Blueprints file from r, creating Module objects for
+// each of the module definitions encountered. If the Blueprints file contains
+// an assignment to the "subdirs" variable, then the subdirectories listed are
+// searched for Blueprints files returned in the subBlueprints return value.
+// If the Blueprints file contains an assignment to the "build" variable, then
+// the file listed are returned in the subBlueprints return value.
+//
+// rootDir specifies the path to the root directory of the source tree, while
+// filename specifies the path to the Blueprints file. These paths are used for
+// error reporting and for determining the module's directory.
+func (c *Context) parse(rootDir, filename string, r io.Reader,
+ scope *parser.Scope) (file *parser.File, subBlueprints []stringAndScope, deps []string,
+ errs []error) {
+
+ relBlueprintsFile, err := filepath.Rel(rootDir, filename)
+ if err != nil {
+ return nil, nil, nil, []error{err}
+ }
+
+ scope = parser.NewScope(scope)
+ scope.Remove("subdirs")
+ scope.Remove("optional_subdirs")
+ scope.Remove("build")
+ file, errs = parser.ParseAndEval(filename, r, scope)
+ if len(errs) > 0 {
+ for i, err := range errs {
+ if parseErr, ok := err.(*parser.ParseError); ok {
+ err = &Error{
+ Err: parseErr.Err,
+ Pos: parseErr.Pos,
+ }
+ errs[i] = err
+ }
+ }
+
+ // If there were any parse errors don't bother trying to interpret the
+ // result.
+ return nil, nil, nil, errs
+ }
+ file.Name = relBlueprintsFile
+
+ subdirs, subdirsPos, err := getLocalStringListFromScope(scope, "subdirs")
+ if err != nil {
+ errs = append(errs, err)
+ }
+
+ optionalSubdirs, optionalSubdirsPos, err := getLocalStringListFromScope(scope, "optional_subdirs")
+ if err != nil {
+ errs = append(errs, err)
+ }
+
+ build, buildPos, err := getLocalStringListFromScope(scope, "build")
+ if err != nil {
+ errs = append(errs, err)
+ }
+
+ subBlueprintsName, _, err := getStringFromScope(scope, "subname")
+
+ var blueprints []string
+
+ newBlueprints, newDeps, newErrs := c.findBuildBlueprints(filepath.Dir(filename), build, buildPos)
+ blueprints = append(blueprints, newBlueprints...)
+ deps = append(deps, newDeps...)
+ errs = append(errs, newErrs...)
+
+ newBlueprints, newDeps, newErrs = c.findSubdirBlueprints(filepath.Dir(filename), subdirs, subdirsPos,
+ subBlueprintsName, false)
+ blueprints = append(blueprints, newBlueprints...)
+ deps = append(deps, newDeps...)
+ errs = append(errs, newErrs...)
+
+ newBlueprints, newDeps, newErrs = c.findSubdirBlueprints(filepath.Dir(filename), optionalSubdirs,
+ optionalSubdirsPos, subBlueprintsName, true)
+ blueprints = append(blueprints, newBlueprints...)
+ deps = append(deps, newDeps...)
+ errs = append(errs, newErrs...)
+
+ subBlueprintsAndScope := make([]stringAndScope, len(blueprints))
+ for i, b := range blueprints {
+ subBlueprintsAndScope[i] = stringAndScope{b, scope}
+ }
+
+ return file, subBlueprintsAndScope, deps, errs
+}
+
+type stringAndScope struct {
+ string
+ *parser.Scope
+}
+
+// ParseBlueprintsFiles parses a set of Blueprints files starting with the file
+// at rootFile. When it encounters a Blueprints file with a set of subdirs
+// listed it recursively parses any Blueprints files found in those
+// subdirectories.
+//
+// If no errors are encountered while parsing the files, the list of paths on
+// which the future output will depend is returned. This list will include both
+// Blueprints file paths as well as directory paths for cases where wildcard
+// subdirs are found.
+func (c *Context) ParseBlueprintsFiles(rootFile string) (deps []string,
+ errs []error) {
+
+ c.dependenciesReady = false
+
+ moduleCh := make(chan *moduleInfo)
+ errsCh := make(chan []error)
+ doneCh := make(chan struct{})
+ var numErrs uint32
+ var numGoroutines int32
+
+ // handler must be reentrant
+ handler := func(file *parser.File) {
+ if atomic.LoadUint32(&numErrs) > maxErrors {
+ return
+ }
+
+ atomic.AddInt32(&numGoroutines, 1)
+ go func() {
+ for _, def := range file.Defs {
+ var module *moduleInfo
+ var errs []error
+ switch def := def.(type) {
+ case *parser.Module:
+ module, errs = c.processModuleDef(def, file.Name)
+ case *parser.Assignment:
+ // Already handled via Scope object
+ default:
+ panic("unknown definition type")
+ }
+
+ if len(errs) > 0 {
+ atomic.AddUint32(&numErrs, uint32(len(errs)))
+ errsCh <- errs
+ } else if module != nil {
+ moduleCh <- module
+ }
+ }
+ doneCh <- struct{}{}
+ }()
+ }
+
+ atomic.AddInt32(&numGoroutines, 1)
+ go func() {
+ var errs []error
+ deps, errs = c.WalkBlueprintsFiles(rootFile, handler)
+ if len(errs) > 0 {
+ errsCh <- errs
+ }
+ doneCh <- struct{}{}
+ }()
+
+loop:
+ for {
+ select {
+ case newErrs := <-errsCh:
+ errs = append(errs, newErrs...)
+ case module := <-moduleCh:
+ newErrs := c.addModule(module)
+ if len(newErrs) > 0 {
+ errs = append(errs, newErrs...)
+ }
+ case <-doneCh:
+ n := atomic.AddInt32(&numGoroutines, -1)
+ if n == 0 {
+ break loop
+ }
+ }
+ }
+
+ return deps, errs
+}
+
+type FileHandler func(*parser.File)
+
+// Walk a set of Blueprints files starting with the file at rootFile, calling handler on each.
+// When it encounters a Blueprints file with a set of subdirs listed it recursively parses any
+// Blueprints files found in those subdirectories. handler will be called from a goroutine, so
+// it must be reentrant.
+//
+// If no errors are encountered while parsing the files, the list of paths on
+// which the future output will depend is returned. This list will include both
+// Blueprints file paths as well as directory paths for cases where wildcard
+// subdirs are found.
+func (c *Context) WalkBlueprintsFiles(rootFile string, handler FileHandler) (deps []string,
+ errs []error) {
+
+ rootDir := filepath.Dir(rootFile)
+
+ blueprintsSet := make(map[string]bool)
+
+ // Channels to receive data back from parseBlueprintsFile goroutines
+ blueprintsCh := make(chan stringAndScope)
+ errsCh := make(chan []error)
+ fileCh := make(chan *parser.File)
+ depsCh := make(chan string)
+
+ // Channel to notify main loop that a parseBlueprintsFile goroutine has finished
+ doneCh := make(chan struct{})
+
+ // Number of outstanding goroutines to wait for
+ count := 0
+
+ startParseBlueprintsFile := func(filename string, scope *parser.Scope) {
+ count++
+ go func() {
+ c.parseBlueprintsFile(filename, scope, rootDir,
+ errsCh, fileCh, blueprintsCh, depsCh)
+ doneCh <- struct{}{}
+ }()
+ }
+
+ tooManyErrors := false
+
+ startParseBlueprintsFile(rootFile, nil)
+
+loop:
+ for {
+ if len(errs) > maxErrors {
+ tooManyErrors = true
+ }
+
+ select {
+ case newErrs := <-errsCh:
+ errs = append(errs, newErrs...)
+ case dep := <-depsCh:
+ deps = append(deps, dep)
+ case file := <-fileCh:
+ handler(file)
+ case blueprint := <-blueprintsCh:
+ if tooManyErrors {
+ continue
+ }
+ if blueprintsSet[blueprint.string] {
+ continue
+ }
+
+ blueprintsSet[blueprint.string] = true
+ startParseBlueprintsFile(blueprint.string, blueprint.Scope)
+ case <-doneCh:
+ count--
+ if count == 0 {
+ break loop
+ }
+ }
+ }
+
+ return
+}
+
+// parseBlueprintFile parses a single Blueprints file, returning any errors through
+// errsCh, any defined modules through modulesCh, any sub-Blueprints files through
+// blueprintsCh, and any dependencies on Blueprints files or directories through
+// depsCh.
+func (c *Context) parseBlueprintsFile(filename string, scope *parser.Scope, rootDir string,
+ errsCh chan<- []error, fileCh chan<- *parser.File, blueprintsCh chan<- stringAndScope,
+ depsCh chan<- string) {
+
+ f, err := os.Open(filename)
+ if err != nil {
+ errsCh <- []error{err}
+ return
+ }
+ defer func() {
+ err = f.Close()
+ if err != nil {
+ errsCh <- []error{err}
+ }
+ }()
+
+ file, subBlueprints, deps, errs := c.parse(rootDir, filename, f, scope)
+ if len(errs) > 0 {
+ errsCh <- errs
+ } else {
+ fileCh <- file
+ }
+
+ for _, b := range subBlueprints {
+ blueprintsCh <- b
+ }
+
+ for _, d := range deps {
+ depsCh <- d
+ }
+}
+
+func (c *Context) findBuildBlueprints(dir string, build []string,
+ buildPos scanner.Position) (blueprints, deps []string, errs []error) {
+
+ for _, file := range build {
+ globPattern := filepath.Join(dir, file)
+ matches, matchedDirs, err := pathtools.Glob(globPattern)
+ if err != nil {
+ errs = append(errs, &Error{
+ Err: fmt.Errorf("%q: %s", globPattern, err.Error()),
+ Pos: buildPos,
+ })
+ continue
+ }
+
+ if len(matches) == 0 {
+ errs = append(errs, &Error{
+ Err: fmt.Errorf("%q: not found", globPattern),
+ Pos: buildPos,
+ })
+ }
+
+ // Depend on all searched directories so we pick up future changes.
+ deps = append(deps, matchedDirs...)
+
+ for _, foundBlueprints := range matches {
+ fileInfo, err := os.Stat(foundBlueprints)
+ if os.IsNotExist(err) {
+ errs = append(errs, &Error{
+ Err: fmt.Errorf("%q not found", foundBlueprints),
+ })
+ continue
+ }
+
+ if fileInfo.IsDir() {
+ errs = append(errs, &Error{
+ Err: fmt.Errorf("%q is a directory", foundBlueprints),
+ })
+ continue
+ }
+
+ blueprints = append(blueprints, foundBlueprints)
+ }
+ }
+
+ return blueprints, deps, errs
+}
+
+func (c *Context) findSubdirBlueprints(dir string, subdirs []string, subdirsPos scanner.Position,
+ subBlueprintsName string, optional bool) (blueprints, deps []string, errs []error) {
+
+ for _, subdir := range subdirs {
+ globPattern := filepath.Join(dir, subdir)
+ matches, matchedDirs, err := pathtools.Glob(globPattern)
+ if err != nil {
+ errs = append(errs, &Error{
+ Err: fmt.Errorf("%q: %s", globPattern, err.Error()),
+ Pos: subdirsPos,
+ })
+ continue
+ }
+
+ if len(matches) == 0 && !optional {
+ errs = append(errs, &Error{
+ Err: fmt.Errorf("%q: not found", globPattern),
+ Pos: subdirsPos,
+ })
+ }
+
+ // Depend on all searched directories so we pick up future changes.
+ deps = append(deps, matchedDirs...)
+
+ for _, foundSubdir := range matches {
+ fileInfo, subdirStatErr := os.Stat(foundSubdir)
+ if subdirStatErr != nil {
+ errs = append(errs, subdirStatErr)
+ continue
+ }
+
+ // Skip files
+ if !fileInfo.IsDir() {
+ continue
+ }
+
+ var subBlueprints string
+ if subBlueprintsName != "" {
+ subBlueprints = filepath.Join(foundSubdir, subBlueprintsName)
+ _, err = os.Stat(subBlueprints)
+ }
+
+ if os.IsNotExist(err) || subBlueprints == "" {
+ subBlueprints = filepath.Join(foundSubdir, "Blueprints")
+ _, err = os.Stat(subBlueprints)
+ }
+
+ if os.IsNotExist(err) {
+ // There is no Blueprints file in this subdirectory. We
+ // need to add the directory to the list of dependencies
+ // so that if someone adds a Blueprints file in the
+ // future we'll pick it up.
+ deps = append(deps, foundSubdir)
+ } else {
+ deps = append(deps, subBlueprints)
+ blueprints = append(blueprints, subBlueprints)
+ }
+ }
+ }
+
+ return blueprints, deps, errs
+}
+
+func getLocalStringListFromScope(scope *parser.Scope, v string) ([]string, scanner.Position, error) {
+ if assignment, local := scope.Get(v); assignment == nil || !local {
+ return nil, scanner.Position{}, nil
+ } else {
+ switch assignment.Value.Type {
+ case parser.List:
+ ret := make([]string, 0, len(assignment.Value.ListValue))
+
+ for _, value := range assignment.Value.ListValue {
+ if value.Type != parser.String {
+ // The parser should not produce this.
+ panic("non-string value found in list")
+ }
+
+ ret = append(ret, value.StringValue)
+ }
+
+ return ret, assignment.Pos, nil
+ case parser.Bool, parser.String:
+ return nil, scanner.Position{}, &Error{
+ Err: fmt.Errorf("%q must be a list of strings", v),
+ Pos: assignment.Pos,
+ }
+ default:
+ panic(fmt.Errorf("unknown value type: %d", assignment.Value.Type))
+ }
+ }
+}
+
+func getStringFromScope(scope *parser.Scope, v string) (string, scanner.Position, error) {
+ if assignment, _ := scope.Get(v); assignment == nil {
+ return "", scanner.Position{}, nil
+ } else {
+ switch assignment.Value.Type {
+ case parser.String:
+ return assignment.Value.StringValue, assignment.Pos, nil
+ case parser.Bool, parser.List:
+ return "", scanner.Position{}, &Error{
+ Err: fmt.Errorf("%q must be a string", v),
+ Pos: assignment.Pos,
+ }
+ default:
+ panic(fmt.Errorf("unknown value type: %d", assignment.Value.Type))
+ }
+ }
+}
+
+func (c *Context) createVariations(origModule *moduleInfo, mutatorName string,
+ variationNames []string) ([]*moduleInfo, []error) {
+
+ if len(variationNames) == 0 {
+ panic(fmt.Errorf("mutator %q passed zero-length variation list for module %q",
+ mutatorName, origModule.properties.Name))
+ }
+
+ newModules := []*moduleInfo{}
+
+ var errs []error
+
+ for i, variationName := range variationNames {
+ typeName := origModule.typeName
+ factory, ok := c.moduleFactories[typeName]
+ if !ok {
+ panic(fmt.Sprintf("unrecognized module type %q during cloning", typeName))
+ }
+
+ var newLogicModule Module
+ var newProperties []interface{}
+
+ if i == 0 {
+ // Reuse the existing module for the first new variant
+ // This both saves creating a new module, and causes the insertion in c.moduleInfo below
+ // with logicModule as the key to replace the original entry in c.moduleInfo
+ newLogicModule = origModule.logicModule
+ newProperties = origModule.moduleProperties
+ } else {
+ props := []interface{}{
+ &origModule.properties,
+ }
+ newLogicModule, newProperties = factory()
+
+ newProperties = append(props, newProperties...)
+
+ if len(newProperties) != len(origModule.moduleProperties) {
+ panic("mismatched properties array length in " + origModule.properties.Name)
+ }
+
+ for i := range newProperties {
+ dst := reflect.ValueOf(newProperties[i]).Elem()
+ src := reflect.ValueOf(origModule.moduleProperties[i]).Elem()
+
+ proptools.CopyProperties(dst, src)
+ }
+ }
+
+ newVariant := origModule.variant.clone()
+ newVariant[mutatorName] = variationName
+
+ m := *origModule
+ newModule := &m
+ newModule.directDeps = append([]*moduleInfo(nil), origModule.directDeps...)
+ newModule.logicModule = newLogicModule
+ newModule.variant = newVariant
+ newModule.dependencyVariant = origModule.dependencyVariant.clone()
+ newModule.moduleProperties = newProperties
+
+ if newModule.variantName == "" {
+ newModule.variantName = variationName
+ } else {
+ newModule.variantName += "_" + variationName
+ }
+
+ newModules = append(newModules, newModule)
+
+ // Insert the new variant into the global module map. If this is the first variant then
+ // it reuses logicModule from the original module, which causes this to replace the
+ // original module in the global module map.
+ c.moduleInfo[newModule.logicModule] = newModule
+
+ newErrs := c.convertDepsToVariation(newModule, mutatorName, variationName)
+ if len(newErrs) > 0 {
+ errs = append(errs, newErrs...)
+ }
+ }
+
+ // Mark original variant as invalid. Modules that depend on this module will still
+ // depend on origModule, but we'll fix it when the mutator is called on them.
+ origModule.logicModule = nil
+ origModule.splitModules = newModules
+
+ return newModules, errs
+}
+
+func (c *Context) convertDepsToVariation(module *moduleInfo,
+ mutatorName, variationName string) (errs []error) {
+
+ for i, dep := range module.directDeps {
+ if dep.logicModule == nil {
+ var newDep *moduleInfo
+ for _, m := range dep.splitModules {
+ if m.variant[mutatorName] == variationName {
+ newDep = m
+ break
+ }
+ }
+ if newDep == nil {
+ errs = append(errs, &Error{
+ Err: fmt.Errorf("failed to find variation %q for module %q needed by %q",
+ variationName, dep.properties.Name, module.properties.Name),
+ Pos: module.pos,
+ })
+ continue
+ }
+ module.directDeps[i] = newDep
+ }
+ }
+
+ return errs
+}
+
+func (c *Context) prettyPrintVariant(variant variationMap) string {
+ names := make([]string, 0, len(variant))
+ for _, m := range c.variantMutatorNames {
+ if v, ok := variant[m]; ok {
+ names = append(names, m+":"+v)
+ }
+ }
+
+ return strings.Join(names, ", ")
+}
+
+func (c *Context) processModuleDef(moduleDef *parser.Module,
+ relBlueprintsFile string) (*moduleInfo, []error) {
+
+ typeName := moduleDef.Type.Name
+ factory, ok := c.moduleFactories[typeName]
+ if !ok {
+ if c.ignoreUnknownModuleTypes {
+ return nil, nil
+ }
+
+ return nil, []error{
+ &Error{
+ Err: fmt.Errorf("unrecognized module type %q", typeName),
+ Pos: moduleDef.Type.Pos,
+ },
+ }
+ }
+
+ logicModule, properties := factory()
+
+ module := &moduleInfo{
+ logicModule: logicModule,
+ typeName: typeName,
+ relBlueprintsFile: relBlueprintsFile,
+ }
+
+ props := []interface{}{
+ &module.properties,
+ }
+ properties = append(props, properties...)
+ module.moduleProperties = properties
+
+ propertyMap, errs := unpackProperties(moduleDef.Properties, properties...)
+ if len(errs) > 0 {
+ return nil, errs
+ }
+
+ module.pos = moduleDef.Type.Pos
+ module.propertyPos = make(map[string]scanner.Position)
+ for name, propertyDef := range propertyMap {
+ module.propertyPos[name] = propertyDef.Pos
+ }
+
+ return module, nil
+}
+
+func (c *Context) addModule(module *moduleInfo) []error {
+ name := module.properties.Name
+ c.moduleInfo[module.logicModule] = module
+
+ if group, present := c.moduleGroups[name]; present {
+ return []error{
+ &Error{
+ Err: fmt.Errorf("module %q already defined", name),
+ Pos: module.pos,
+ },
+ &Error{
+ Err: fmt.Errorf("<-- previous definition here"),
+ Pos: group.modules[0].pos,
+ },
+ }
+ } else {
+ ninjaName := toNinjaName(module.properties.Name)
+
+ // The sanitizing in toNinjaName can result in collisions, uniquify the name if it
+ // already exists
+ for i := 0; c.moduleNinjaNames[ninjaName] != nil; i++ {
+ ninjaName = toNinjaName(module.properties.Name) + strconv.Itoa(i)
+ }
+
+ group := &moduleGroup{
+ name: module.properties.Name,
+ ninjaName: ninjaName,
+ modules: []*moduleInfo{module},
+ }
+ module.group = group
+ c.moduleGroups[name] = group
+ c.moduleNinjaNames[ninjaName] = group
+ }
+
+ return nil
+}
+
+// ResolveDependencies checks that the dependencies specified by all of the
+// modules defined in the parsed Blueprints files are valid. This means that
+// the modules depended upon are defined and that no circular dependencies
+// exist.
+func (c *Context) ResolveDependencies(config interface{}) []error {
+ errs := c.runMutators(config)
+ if len(errs) > 0 {
+ return errs
+ }
+
+ c.dependenciesReady = true
+ return nil
+}
+
+// Default dependencies handling. If the module implements the (deprecated)
+// DynamicDependerModule interface then this set consists of the union of those
+// module names listed in its "deps" property, those returned by its
+// DynamicDependencies method, and those added by calling AddDependencies or
+// AddVariationDependencies on DynamicDependencyModuleContext. Otherwise it
+// is simply those names listed in its "deps" property.
+func blueprintDepsMutator(ctx BottomUpMutatorContext) {
+ ctx.AddDependency(ctx.Module(), ctx.moduleInfo().properties.Deps...)
+
+ if dynamicDepender, ok := ctx.Module().(DynamicDependerModule); ok {
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ ctx.error(newPanicErrorf(r, "DynamicDependencies for %s", ctx.moduleInfo()))
+ }
+ }()
+ dynamicDeps := dynamicDepender.DynamicDependencies(ctx)
+
+ if ctx.Failed() {
+ return
+ }
+
+ ctx.AddDependency(ctx.Module(), dynamicDeps...)
+ }()
+ }
+}
+
+// findMatchingVariant searches the moduleGroup for a module with the same variant as module,
+// and returns the matching module, or nil if one is not found.
+func (c *Context) findMatchingVariant(module *moduleInfo, group *moduleGroup) *moduleInfo {
+ if len(group.modules) == 1 {
+ return group.modules[0]
+ } else {
+ for _, m := range group.modules {
+ if m.variant.equal(module.dependencyVariant) {
+ return m
+ }
+ }
+ }
+
+ return nil
+}
+
+func (c *Context) addDependency(module *moduleInfo, depName string) []error {
+ if depName == module.properties.Name {
+ return []error{&Error{
+ Err: fmt.Errorf("%q depends on itself", depName),
+ Pos: module.pos,
+ }}
+ }
+
+ depInfo, ok := c.moduleGroups[depName]
+ if !ok {
+ if c.allowMissingDependencies {
+ module.missingDeps = append(module.missingDeps, depName)
+ return nil
+ }
+ return []error{&Error{
+ Err: fmt.Errorf("%q depends on undefined module %q",
+ module.properties.Name, depName),
+ Pos: module.pos,
+ }}
+ }
+
+ for _, m := range module.directDeps {
+ if m.group == depInfo {
+ return nil
+ }
+ }
+
+ if m := c.findMatchingVariant(module, depInfo); m != nil {
+ module.directDeps = append(module.directDeps, m)
+ return nil
+ }
+
+ return []error{&Error{
+ Err: fmt.Errorf("dependency %q of %q missing variant %q",
+ depInfo.modules[0].properties.Name, module.properties.Name,
+ c.prettyPrintVariant(module.dependencyVariant)),
+ Pos: module.pos,
+ }}
+}
+
+func (c *Context) findReverseDependency(module *moduleInfo, destName string) (*moduleInfo, []error) {
+ if destName == module.properties.Name {
+ return nil, []error{&Error{
+ Err: fmt.Errorf("%q depends on itself", destName),
+ Pos: module.pos,
+ }}
+ }
+
+ destInfo, ok := c.moduleGroups[destName]
+ if !ok {
+ return nil, []error{&Error{
+ Err: fmt.Errorf("%q has a reverse dependency on undefined module %q",
+ module.properties.Name, destName),
+ Pos: module.pos,
+ }}
+ }
+
+ if m := c.findMatchingVariant(module, destInfo); m != nil {
+ return m, nil
+ }
+
+ return nil, []error{&Error{
+ Err: fmt.Errorf("reverse dependency %q of %q missing variant %q",
+ destName, module.properties.Name,
+ c.prettyPrintVariant(module.dependencyVariant)),
+ Pos: module.pos,
+ }}
+}
+
+func (c *Context) addVariationDependency(module *moduleInfo, variations []Variation,
+ depName string, far bool) []error {
+
+ depInfo, ok := c.moduleGroups[depName]
+ if !ok {
+ if c.allowMissingDependencies {
+ module.missingDeps = append(module.missingDeps, depName)
+ return nil
+ }
+ return []error{&Error{
+ Err: fmt.Errorf("%q depends on undefined module %q",
+ module.properties.Name, depName),
+ Pos: module.pos,
+ }}
+ }
+
+ // We can't just append variant.Variant to module.dependencyVariants.variantName and
+ // compare the strings because the result won't be in mutator registration order.
+ // Create a new map instead, and then deep compare the maps.
+ var newVariant variationMap
+ if !far {
+ newVariant = module.dependencyVariant.clone()
+ } else {
+ newVariant = make(variationMap)
+ }
+ for _, v := range variations {
+ newVariant[v.Mutator] = v.Variation
+ }
+
+ for _, m := range depInfo.modules {
+ var found bool
+ if far {
+ found = m.variant.subset(newVariant)
+ } else {
+ found = m.variant.equal(newVariant)
+ }
+ if found {
+ if module == m {
+ return []error{&Error{
+ Err: fmt.Errorf("%q depends on itself", depName),
+ Pos: module.pos,
+ }}
+ }
+ // AddVariationDependency allows adding a dependency on itself, but only if
+ // that module is earlier in the module list than this one, since we always
+ // run GenerateBuildActions in order for the variants of a module
+ if depInfo == module.group && beforeInModuleList(module, m, module.group.modules) {
+ return []error{&Error{
+ Err: fmt.Errorf("%q depends on later version of itself", depName),
+ Pos: module.pos,
+ }}
+ }
+ module.directDeps = append(module.directDeps, m)
+ return nil
+ }
+ }
+
+ return []error{&Error{
+ Err: fmt.Errorf("dependency %q of %q missing variant %q",
+ depInfo.modules[0].properties.Name, module.properties.Name,
+ c.prettyPrintVariant(newVariant)),
+ Pos: module.pos,
+ }}
+}
+
+func (c *Context) parallelVisitAllBottomUp(visit func(group *moduleInfo) bool) {
+ doneCh := make(chan *moduleInfo)
+ count := 0
+ cancel := false
+
+ for _, module := range c.modulesSorted {
+ module.waitingCount = module.depsCount
+ }
+
+ visitOne := func(module *moduleInfo) {
+ count++
+ go func() {
+ ret := visit(module)
+ if ret {
+ cancel = true
+ }
+ doneCh <- module
+ }()
+ }
+
+ for _, module := range c.modulesSorted {
+ if module.waitingCount == 0 {
+ visitOne(module)
+ }
+ }
+
+ for count > 0 {
+ select {
+ case doneModule := <-doneCh:
+ if !cancel {
+ for _, parent := range doneModule.reverseDeps {
+ parent.waitingCount--
+ if parent.waitingCount == 0 {
+ visitOne(parent)
+ }
+ }
+ }
+ count--
+ }
+ }
+}
+
+// updateDependencies recursively walks the module dependency graph and updates
+// additional fields based on the dependencies. It builds a sorted list of modules
+// such that dependencies of a module always appear first, and populates reverse
+// dependency links and counts of total dependencies. It also reports errors when
+// it encounters dependency cycles. This should called after resolveDependencies,
+// as well as after any mutator pass has called addDependency
+func (c *Context) updateDependencies() (errs []error) {
+ visited := make(map[*moduleInfo]bool) // modules that were already checked
+ checking := make(map[*moduleInfo]bool) // modules actively being checked
+
+ sorted := make([]*moduleInfo, 0, len(c.moduleInfo))
+
+ var check func(group *moduleInfo) []*moduleInfo
+
+ cycleError := func(cycle []*moduleInfo) {
+ // We are the "start" of the cycle, so we're responsible
+ // for generating the errors. The cycle list is in
+ // reverse order because all the 'check' calls append
+ // their own module to the list.
+ errs = append(errs, &Error{
+ Err: fmt.Errorf("encountered dependency cycle:"),
+ Pos: cycle[len(cycle)-1].pos,
+ })
+
+ // Iterate backwards through the cycle list.
+ curModule := cycle[0]
+ for i := len(cycle) - 1; i >= 0; i-- {
+ nextModule := cycle[i]
+ errs = append(errs, &Error{
+ Err: fmt.Errorf(" %q depends on %q",
+ curModule.properties.Name,
+ nextModule.properties.Name),
+ Pos: curModule.pos,
+ })
+ curModule = nextModule
+ }
+ }
+
+ check = func(module *moduleInfo) []*moduleInfo {
+ visited[module] = true
+ checking[module] = true
+ defer delete(checking, module)
+
+ deps := make(map[*moduleInfo]bool)
+
+ // Add an implicit dependency ordering on all earlier modules in the same module group
+ for _, dep := range module.group.modules {
+ if dep == module {
+ break
+ }
+ deps[dep] = true
+ }
+
+ for _, dep := range module.directDeps {
+ deps[dep] = true
+ }
+
+ module.reverseDeps = []*moduleInfo{}
+ module.depsCount = len(deps)
+
+ for dep := range deps {
+ if checking[dep] {
+ // This is a cycle.
+ return []*moduleInfo{dep, module}
+ }
+
+ if !visited[dep] {
+ cycle := check(dep)
+ if cycle != nil {
+ if cycle[0] == module {
+ // We are the "start" of the cycle, so we're responsible
+ // for generating the errors. The cycle list is in
+ // reverse order because all the 'check' calls append
+ // their own module to the list.
+ cycleError(cycle)
+
+ // We can continue processing this module's children to
+ // find more cycles. Since all the modules that were
+ // part of the found cycle were marked as visited we
+ // won't run into that cycle again.
+ } else {
+ // We're not the "start" of the cycle, so we just append
+ // our module to the list and return it.
+ return append(cycle, module)
+ }
+ }
+ }
+
+ dep.reverseDeps = append(dep.reverseDeps, module)
+ }
+
+ sorted = append(sorted, module)
+
+ return nil
+ }
+
+ for _, module := range c.moduleInfo {
+ if !visited[module] {
+ cycle := check(module)
+ if cycle != nil {
+ if cycle[len(cycle)-1] != module {
+ panic("inconceivable!")
+ }
+ cycleError(cycle)
+ }
+ }
+ }
+
+ c.modulesSorted = sorted
+
+ return
+}
+
+// PrepareBuildActions generates an internal representation of all the build
+// actions that need to be performed. This process involves invoking the
+// GenerateBuildActions method on each of the Module objects created during the
+// parse phase and then on each of the registered Singleton objects.
+//
+// If the ResolveDependencies method has not already been called it is called
+// automatically by this method.
+//
+// The config argument is made available to all of the Module and Singleton
+// objects via the Config method on the ModuleContext and SingletonContext
+// objects passed to GenerateBuildActions. It is also passed to the functions
+// specified via PoolFunc, RuleFunc, and VariableFunc so that they can compute
+// config-specific values.
+//
+// The returned deps is a list of the ninja files dependencies that were added
+// by the modules and singletons via the ModuleContext.AddNinjaFileDeps(),
+// SingletonContext.AddNinjaFileDeps(), and PackageContext.AddNinjaFileDeps()
+// methods.
+func (c *Context) PrepareBuildActions(config interface{}) (deps []string, errs []error) {
+ c.buildActionsReady = false
+
+ if !c.dependenciesReady {
+ errs := c.ResolveDependencies(config)
+ if len(errs) > 0 {
+ return nil, errs
+ }
+ }
+
+ liveGlobals := newLiveTracker(config)
+
+ c.initSpecialVariables()
+
+ depsModules, errs := c.generateModuleBuildActions(config, liveGlobals)
+ if len(errs) > 0 {
+ return nil, errs
+ }
+
+ depsSingletons, errs := c.generateSingletonBuildActions(config, liveGlobals)
+ if len(errs) > 0 {
+ return nil, errs
+ }
+
+ deps = append(depsModules, depsSingletons...)
+
+ if c.ninjaBuildDir != nil {
+ liveGlobals.addNinjaStringDeps(c.ninjaBuildDir)
+ }
+
+ pkgNames, depsPackages := c.makeUniquePackageNames(liveGlobals)
+
+ deps = append(deps, depsPackages...)
+
+ // This will panic if it finds a problem since it's a programming error.
+ c.checkForVariableReferenceCycles(liveGlobals.variables, pkgNames)
+
+ c.pkgNames = pkgNames
+ c.globalVariables = liveGlobals.variables
+ c.globalPools = liveGlobals.pools
+ c.globalRules = liveGlobals.rules
+
+ c.buildActionsReady = true
+
+ return deps, nil
+}
+
+func (c *Context) runEarlyMutators(config interface{}) (errs []error) {
+ for _, mutator := range c.earlyMutatorInfo {
+ for _, group := range c.moduleGroups {
+ newModules := make([]*moduleInfo, 0, len(group.modules))
+
+ for _, module := range group.modules {
+ mctx := &mutatorContext{
+ baseModuleContext: baseModuleContext{
+ context: c,
+ config: config,
+ module: module,
+ },
+ name: mutator.name,
+ }
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ in := fmt.Sprintf("early mutator %q for %s", mutator.name, module)
+ if err, ok := r.(panicError); ok {
+ err.addIn(in)
+ mctx.error(err)
+ } else {
+ mctx.error(newPanicErrorf(r, in))
+ }
+ }
+ }()
+ mutator.mutator(mctx)
+ }()
+ if len(mctx.errs) > 0 {
+ errs = append(errs, mctx.errs...)
+ return errs
+ }
+
+ if module.splitModules != nil {
+ newModules = append(newModules, module.splitModules...)
+ } else {
+ newModules = append(newModules, module)
+ }
+ }
+
+ group.modules = newModules
+ }
+ }
+
+ errs = c.updateDependencies()
+ if len(errs) > 0 {
+ return errs
+ }
+
+ return nil
+}
+
+func (c *Context) runMutators(config interface{}) (errs []error) {
+ errs = c.runEarlyMutators(config)
+ if len(errs) > 0 {
+ return errs
+ }
+
+ for _, mutator := range c.mutatorInfo {
+ if mutator.topDownMutator != nil {
+ errs = c.runTopDownMutator(config, mutator.name, mutator.topDownMutator)
+ } else if mutator.bottomUpMutator != nil {
+ errs = c.runBottomUpMutator(config, mutator.name, mutator.bottomUpMutator)
+ } else {
+ panic("no mutator set on " + mutator.name)
+ }
+ if len(errs) > 0 {
+ return errs
+ }
+ }
+
+ return nil
+}
+
+func (c *Context) runTopDownMutator(config interface{},
+ name string, mutator TopDownMutator) (errs []error) {
+
+ for i := 0; i < len(c.modulesSorted); i++ {
+ module := c.modulesSorted[len(c.modulesSorted)-1-i]
+ mctx := &mutatorContext{
+ baseModuleContext: baseModuleContext{
+ context: c,
+ config: config,
+ module: module,
+ },
+ name: name,
+ }
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ in := fmt.Sprintf("top down mutator %q for %s", name, module)
+ if err, ok := r.(panicError); ok {
+ err.addIn(in)
+ mctx.error(err)
+ } else {
+ mctx.error(newPanicErrorf(r, in))
+ }
+ }
+ }()
+ mutator(mctx)
+ }()
+
+ if len(mctx.errs) > 0 {
+ errs = append(errs, mctx.errs...)
+ return errs
+ }
+ }
+
+ return errs
+}
+
+func (c *Context) runBottomUpMutator(config interface{},
+ name string, mutator BottomUpMutator) (errs []error) {
+
+ reverseDeps := make(map[*moduleInfo][]*moduleInfo)
+
+ for _, module := range c.modulesSorted {
+ newModules := make([]*moduleInfo, 0, 1)
+
+ if module.splitModules != nil {
+ panic("split module found in sorted module list")
+ }
+
+ mctx := &mutatorContext{
+ baseModuleContext: baseModuleContext{
+ context: c,
+ config: config,
+ module: module,
+ },
+ name: name,
+ reverseDeps: reverseDeps,
+ }
+
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ in := fmt.Sprintf("bottom up mutator %q for %s", name, module)
+ if err, ok := r.(panicError); ok {
+ err.addIn(in)
+ mctx.error(err)
+ } else {
+ mctx.error(newPanicErrorf(r, in))
+ }
+ }
+ }()
+ mutator(mctx)
+ }()
+ if len(mctx.errs) > 0 {
+ errs = append(errs, mctx.errs...)
+ return errs
+ }
+
+ // Fix up any remaining dependencies on modules that were split into variants
+ // by replacing them with the first variant
+ for i, dep := range module.directDeps {
+ if dep.logicModule == nil {
+ module.directDeps[i] = dep.splitModules[0]
+ }
+ }
+
+ if module.splitModules != nil {
+ newModules = append(newModules, module.splitModules...)
+ } else {
+ newModules = append(newModules, module)
+ }
+
+ module.group.modules = spliceModules(module.group.modules, module, newModules)
+ }
+
+ for module, deps := range reverseDeps {
+ sort.Sort(moduleSorter(deps))
+ module.directDeps = append(module.directDeps, deps...)
+ }
+
+ errs = c.updateDependencies()
+ if len(errs) > 0 {
+ return errs
+ }
+
+ return errs
+}
+
+func spliceModules(modules []*moduleInfo, origModule *moduleInfo,
+ newModules []*moduleInfo) []*moduleInfo {
+ for i, m := range modules {
+ if m == origModule {
+ return spliceModulesAtIndex(modules, i, newModules)
+ }
+ }
+
+ panic("failed to find original module to splice")
+}
+
+func spliceModulesAtIndex(modules []*moduleInfo, i int, newModules []*moduleInfo) []*moduleInfo {
+ spliceSize := len(newModules)
+ newLen := len(modules) + spliceSize - 1
+ var dest []*moduleInfo
+ if cap(modules) >= len(modules)-1+len(newModules) {
+ // We can fit the splice in the existing capacity, do everything in place
+ dest = modules[:newLen]
+ } else {
+ dest = make([]*moduleInfo, newLen)
+ copy(dest, modules[:i])
+ }
+
+ // Move the end of the slice over by spliceSize-1
+ copy(dest[i+spliceSize:], modules[i+1:])
+
+ // Copy the new modules into the slice
+ copy(dest[i:], newModules)
+
+ return dest
+}
+
+func (c *Context) initSpecialVariables() {
+ c.ninjaBuildDir = nil
+ c.requiredNinjaMajor = 1
+ c.requiredNinjaMinor = 6
+ c.requiredNinjaMicro = 0
+}
+
+func (c *Context) generateModuleBuildActions(config interface{},
+ liveGlobals *liveTracker) ([]string, []error) {
+
+ var deps []string
+ var errs []error
+
+ cancelCh := make(chan struct{})
+ errsCh := make(chan []error)
+ depsCh := make(chan []string)
+
+ go func() {
+ for {
+ select {
+ case <-cancelCh:
+ close(cancelCh)
+ return
+ case newErrs := <-errsCh:
+ errs = append(errs, newErrs...)
+ case newDeps := <-depsCh:
+ deps = append(deps, newDeps...)
+
+ }
+ }
+ }()
+
+ c.parallelVisitAllBottomUp(func(module *moduleInfo) bool {
+ // The parent scope of the moduleContext's local scope gets overridden to be that of the
+ // calling Go package on a per-call basis. Since the initial parent scope doesn't matter we
+ // just set it to nil.
+ prefix := moduleNamespacePrefix(module.group.ninjaName + "_" + module.variantName)
+ scope := newLocalScope(nil, prefix)
+
+ mctx := &moduleContext{
+ baseModuleContext: baseModuleContext{
+ context: c,
+ config: config,
+ module: module,
+ },
+ scope: scope,
+ handledMissingDeps: module.missingDeps == nil,
+ }
+
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ in := fmt.Sprintf("GenerateBuildActions for %s", module)
+ if err, ok := r.(panicError); ok {
+ err.addIn(in)
+ mctx.error(err)
+ } else {
+ mctx.error(newPanicErrorf(r, in))
+ }
+ }
+ }()
+ mctx.module.logicModule.GenerateBuildActions(mctx)
+ }()
+
+ if len(mctx.errs) > 0 {
+ errsCh <- mctx.errs
+ return true
+ }
+
+ if module.missingDeps != nil && !mctx.handledMissingDeps {
+ var errs []error
+ for _, depName := range module.missingDeps {
+ errs = append(errs, &Error{
+ Err: fmt.Errorf("%q depends on undefined module %q",
+ module.properties.Name, depName),
+ Pos: module.pos,
+ })
+ }
+ errsCh <- errs
+ return true
+ }
+
+ depsCh <- mctx.ninjaFileDeps
+
+ newErrs := c.processLocalBuildActions(&module.actionDefs,
+ &mctx.actionDefs, liveGlobals)
+ if len(newErrs) > 0 {
+ errsCh <- newErrs
+ return true
+ }
+ return false
+ })
+
+ cancelCh <- struct{}{}
+ <-cancelCh
+
+ return deps, errs
+}
+
+func (c *Context) generateSingletonBuildActions(config interface{},
+ liveGlobals *liveTracker) ([]string, []error) {
+
+ var deps []string
+ var errs []error
+
+ for _, info := range c.singletonInfo {
+ // The parent scope of the singletonContext's local scope gets overridden to be that of the
+ // calling Go package on a per-call basis. Since the initial parent scope doesn't matter we
+ // just set it to nil.
+ scope := newLocalScope(nil, singletonNamespacePrefix(info.name))
+
+ sctx := &singletonContext{
+ context: c,
+ config: config,
+ scope: scope,
+ }
+
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ in := fmt.Sprintf("GenerateBuildActions for singleton %s", info.name)
+ if err, ok := r.(panicError); ok {
+ err.addIn(in)
+ sctx.error(err)
+ } else {
+ sctx.error(newPanicErrorf(r, in))
+ }
+ }
+ }()
+ info.singleton.GenerateBuildActions(sctx)
+ }()
+
+ if len(sctx.errs) > 0 {
+ errs = append(errs, sctx.errs...)
+ if len(errs) > maxErrors {
+ break
+ }
+ continue
+ }
+
+ deps = append(deps, sctx.ninjaFileDeps...)
+
+ newErrs := c.processLocalBuildActions(&info.actionDefs,
+ &sctx.actionDefs, liveGlobals)
+ errs = append(errs, newErrs...)
+ if len(errs) > maxErrors {
+ break
+ }
+ }
+
+ return deps, errs
+}
+
+func (c *Context) processLocalBuildActions(out, in *localBuildActions,
+ liveGlobals *liveTracker) []error {
+
+ var errs []error
+
+ // First we go through and add everything referenced by the module's
+ // buildDefs to the live globals set. This will end up adding the live
+ // locals to the set as well, but we'll take them out after.
+ for _, def := range in.buildDefs {
+ err := liveGlobals.AddBuildDefDeps(def)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ if len(errs) > 0 {
+ return errs
+ }
+
+ out.buildDefs = append(out.buildDefs, in.buildDefs...)
+
+ // We use the now-incorrect set of live "globals" to determine which local
+ // definitions are live. As we go through copying those live locals to the
+ // moduleGroup we remove them from the live globals set.
+ for _, v := range in.variables {
+ isLive := liveGlobals.RemoveVariableIfLive(v)
+ if isLive {
+ out.variables = append(out.variables, v)
+ }
+ }
+
+ for _, r := range in.rules {
+ isLive := liveGlobals.RemoveRuleIfLive(r)
+ if isLive {
+ out.rules = append(out.rules, r)
+ }
+ }
+
+ return nil
+}
+
+func (c *Context) walkDeps(topModule *moduleInfo,
+ visit func(Module, Module) bool) {
+
+ visited := make(map[*moduleInfo]bool)
+ var visiting *moduleInfo
+
+ defer func() {
+ if r := recover(); r != nil {
+ panic(newPanicErrorf(r, "WalkDeps(%s, %s) for dependency %s",
+ topModule, funcName(visit), visiting))
+ }
+ }()
+
+ var walk func(module *moduleInfo)
+ walk = func(module *moduleInfo) {
+ visited[module] = true
+
+ for _, moduleDep := range module.directDeps {
+ if !visited[moduleDep] {
+ visiting = moduleDep
+ if visit(moduleDep.logicModule, module.logicModule) {
+ walk(moduleDep)
+ }
+ }
+ }
+ }
+
+ walk(topModule)
+}
+
+type innerPanicError error
+
+func (c *Context) visitDepsDepthFirst(topModule *moduleInfo, visit func(Module)) {
+ visited := make(map[*moduleInfo]bool)
+ var visiting *moduleInfo
+
+ defer func() {
+ if r := recover(); r != nil {
+ panic(newPanicErrorf(r, "VisitDepsDepthFirst(%s, %s) for dependency %s",
+ topModule, funcName(visit), visiting))
+ }
+ }()
+
+ var walk func(module *moduleInfo)
+ walk = func(module *moduleInfo) {
+ visited[module] = true
+ for _, moduleDep := range module.directDeps {
+ if !visited[moduleDep] {
+ walk(moduleDep)
+ }
+ }
+
+ if module != topModule {
+ visiting = module
+ visit(module.logicModule)
+ }
+ }
+
+ walk(topModule)
+}
+
+func (c *Context) visitDepsDepthFirstIf(topModule *moduleInfo, pred func(Module) bool,
+ visit func(Module)) {
+
+ visited := make(map[*moduleInfo]bool)
+ var visiting *moduleInfo
+
+ defer func() {
+ if r := recover(); r != nil {
+ panic(newPanicErrorf(r, "VisitDepsDepthFirstIf(%s, %s, %s) for dependency %s",
+ topModule, funcName(pred), funcName(visit), visiting))
+ }
+ }()
+
+ var walk func(module *moduleInfo)
+ walk = func(module *moduleInfo) {
+ visited[module] = true
+ for _, moduleDep := range module.directDeps {
+ if !visited[moduleDep] {
+ walk(moduleDep)
+ }
+ }
+
+ if module != topModule {
+ if pred(module.logicModule) {
+ visiting = module
+ visit(module.logicModule)
+ }
+ }
+ }
+
+ walk(topModule)
+}
+
+func (c *Context) visitDirectDeps(module *moduleInfo, visit func(Module)) {
+ var dep *moduleInfo
+
+ defer func() {
+ if r := recover(); r != nil {
+ panic(newPanicErrorf(r, "VisitDirectDeps(%s, %s) for dependency %s",
+ module, funcName(visit), dep))
+ }
+ }()
+
+ for _, dep = range module.directDeps {
+ visit(dep.logicModule)
+ }
+}
+
+func (c *Context) visitDirectDepsIf(module *moduleInfo, pred func(Module) bool,
+ visit func(Module)) {
+
+ var dep *moduleInfo
+
+ defer func() {
+ if r := recover(); r != nil {
+ panic(newPanicErrorf(r, "VisitDirectDepsIf(%s, %s, %s) for dependency %s",
+ module, funcName(pred), funcName(visit), dep))
+ }
+ }()
+
+ for _, dep = range module.directDeps {
+ if pred(dep.logicModule) {
+ visit(dep.logicModule)
+ }
+ }
+}
+
+func (c *Context) sortedModuleNames() []string {
+ if c.cachedSortedModuleNames == nil {
+ c.cachedSortedModuleNames = make([]string, 0, len(c.moduleGroups))
+ for moduleName := range c.moduleGroups {
+ c.cachedSortedModuleNames = append(c.cachedSortedModuleNames,
+ moduleName)
+ }
+ sort.Strings(c.cachedSortedModuleNames)
+ }
+
+ return c.cachedSortedModuleNames
+}
+
+func (c *Context) visitAllModules(visit func(Module)) {
+ var module *moduleInfo
+
+ defer func() {
+ if r := recover(); r != nil {
+ panic(newPanicErrorf(r, "VisitAllModules(%s) for %s",
+ funcName(visit), module))
+ }
+ }()
+
+ for _, moduleName := range c.sortedModuleNames() {
+ group := c.moduleGroups[moduleName]
+ for _, module = range group.modules {
+ visit(module.logicModule)
+ }
+ }
+}
+
+func (c *Context) visitAllModulesIf(pred func(Module) bool,
+ visit func(Module)) {
+
+ var module *moduleInfo
+
+ defer func() {
+ if r := recover(); r != nil {
+ panic(newPanicErrorf(r, "VisitAllModulesIf(%s, %s) for %s",
+ funcName(pred), funcName(visit), module))
+ }
+ }()
+
+ for _, moduleName := range c.sortedModuleNames() {
+ group := c.moduleGroups[moduleName]
+ for _, module := range group.modules {
+ if pred(module.logicModule) {
+ visit(module.logicModule)
+ }
+ }
+ }
+}
+
+func (c *Context) visitAllModuleVariants(module *moduleInfo,
+ visit func(Module)) {
+
+ var variant *moduleInfo
+
+ defer func() {
+ if r := recover(); r != nil {
+ panic(newPanicErrorf(r, "VisitAllModuleVariants(%s, %s) for %s",
+ module, funcName(visit), variant))
+ }
+ }()
+
+ for _, variant = range module.group.modules {
+ visit(variant.logicModule)
+ }
+}
+
+func (c *Context) requireNinjaVersion(major, minor, micro int) {
+ if major != 1 {
+ panic("ninja version with major version != 1 not supported")
+ }
+ if c.requiredNinjaMinor < minor {
+ c.requiredNinjaMinor = minor
+ c.requiredNinjaMicro = micro
+ }
+ if c.requiredNinjaMinor == minor && c.requiredNinjaMicro < micro {
+ c.requiredNinjaMicro = micro
+ }
+}
+
+func (c *Context) setNinjaBuildDir(value *ninjaString) {
+ if c.ninjaBuildDir == nil {
+ c.ninjaBuildDir = value
+ }
+}
+
+func (c *Context) makeUniquePackageNames(
+ liveGlobals *liveTracker) (map[*packageContext]string, []string) {
+
+ pkgs := make(map[string]*packageContext)
+ pkgNames := make(map[*packageContext]string)
+ longPkgNames := make(map[*packageContext]bool)
+
+ processPackage := func(pctx *packageContext) {
+ if pctx == nil {
+ // This is a built-in rule and has no package.
+ return
+ }
+ if _, ok := pkgNames[pctx]; ok {
+ // We've already processed this package.
+ return
+ }
+
+ otherPkg, present := pkgs[pctx.shortName]
+ if present {
+ // Short name collision. Both this package and the one that's
+ // already there need to use their full names. We leave the short
+ // name in pkgNames for now so future collisions still get caught.
+ longPkgNames[pctx] = true
+ longPkgNames[otherPkg] = true
+ } else {
+ // No collision so far. Tentatively set the package's name to be
+ // its short name.
+ pkgNames[pctx] = pctx.shortName
+ pkgs[pctx.shortName] = pctx
+ }
+ }
+
+ // We try to give all packages their short name, but when we get collisions
+ // we need to use the full unique package name.
+ for v, _ := range liveGlobals.variables {
+ processPackage(v.packageContext())
+ }
+ for p, _ := range liveGlobals.pools {
+ processPackage(p.packageContext())
+ }
+ for r, _ := range liveGlobals.rules {
+ processPackage(r.packageContext())
+ }
+
+ // Add the packages that had collisions using their full unique names. This
+ // will overwrite any short names that were added in the previous step.
+ for pctx := range longPkgNames {
+ pkgNames[pctx] = pctx.fullName
+ }
+
+ // Create deps list from calls to PackageContext.AddNinjaFileDeps
+ deps := []string{}
+ for _, pkg := range pkgs {
+ deps = append(deps, pkg.ninjaFileDeps...)
+ }
+
+ return pkgNames, deps
+}
+
+func (c *Context) checkForVariableReferenceCycles(
+ variables map[Variable]*ninjaString, pkgNames map[*packageContext]string) {
+
+ visited := make(map[Variable]bool) // variables that were already checked
+ checking := make(map[Variable]bool) // variables actively being checked
+
+ var check func(v Variable) []Variable
+
+ check = func(v Variable) []Variable {
+ visited[v] = true
+ checking[v] = true
+ defer delete(checking, v)
+
+ value := variables[v]
+ for _, dep := range value.variables {
+ if checking[dep] {
+ // This is a cycle.
+ return []Variable{dep, v}
+ }
+
+ if !visited[dep] {
+ cycle := check(dep)
+ if cycle != nil {
+ if cycle[0] == v {
+ // We are the "start" of the cycle, so we're responsible
+ // for generating the errors. The cycle list is in
+ // reverse order because all the 'check' calls append
+ // their own module to the list.
+ msgs := []string{"detected variable reference cycle:"}
+
+ // Iterate backwards through the cycle list.
+ curName := v.fullName(pkgNames)
+ curValue := value.Value(pkgNames)
+ for i := len(cycle) - 1; i >= 0; i-- {
+ next := cycle[i]
+ nextName := next.fullName(pkgNames)
+ nextValue := variables[next].Value(pkgNames)
+
+ msgs = append(msgs, fmt.Sprintf(
+ " %q depends on %q", curName, nextName))
+ msgs = append(msgs, fmt.Sprintf(
+ " [%s = %s]", curName, curValue))
+
+ curName = nextName
+ curValue = nextValue
+ }
+
+ // Variable reference cycles are a programming error,
+ // not the fault of the Blueprint file authors.
+ panic(strings.Join(msgs, "\n"))
+ } else {
+ // We're not the "start" of the cycle, so we just append
+ // our module to the list and return it.
+ return append(cycle, v)
+ }
+ }
+ }
+ }
+
+ return nil
+ }
+
+ for v := range variables {
+ if !visited[v] {
+ cycle := check(v)
+ if cycle != nil {
+ panic("inconceivable!")
+ }
+ }
+ }
+}
+
+// AllTargets returns a map all the build target names to the rule used to build
+// them. This is the same information that is output by running 'ninja -t
+// targets all'. If this is called before PrepareBuildActions successfully
+// completes then ErrbuildActionsNotReady is returned.
+func (c *Context) AllTargets() (map[string]string, error) {
+ if !c.buildActionsReady {
+ return nil, ErrBuildActionsNotReady
+ }
+
+ targets := map[string]string{}
+
+ // Collect all the module build targets.
+ for _, module := range c.moduleInfo {
+ for _, buildDef := range module.actionDefs.buildDefs {
+ ruleName := buildDef.Rule.fullName(c.pkgNames)
+ for _, output := range buildDef.Outputs {
+ outputValue, err := output.Eval(c.globalVariables)
+ if err != nil {
+ return nil, err
+ }
+ targets[outputValue] = ruleName
+ }
+ }
+ }
+
+ // Collect all the singleton build targets.
+ for _, info := range c.singletonInfo {
+ for _, buildDef := range info.actionDefs.buildDefs {
+ ruleName := buildDef.Rule.fullName(c.pkgNames)
+ for _, output := range buildDef.Outputs {
+ outputValue, err := output.Eval(c.globalVariables)
+ if err != nil {
+ return nil, err
+ }
+ targets[outputValue] = ruleName
+ }
+ }
+ }
+
+ return targets, nil
+}
+
+func (c *Context) NinjaBuildDir() (string, error) {
+ if c.ninjaBuildDir != nil {
+ return c.ninjaBuildDir.Eval(c.globalVariables)
+ } else {
+ return "", nil
+ }
+}
+
+// ModuleTypePropertyStructs returns a mapping from module type name to a list of pointers to
+// property structs returned by the factory for that module type.
+func (c *Context) ModuleTypePropertyStructs() map[string][]interface{} {
+ ret := make(map[string][]interface{})
+ for moduleType, factory := range c.moduleFactories {
+ _, ret[moduleType] = factory()
+ }
+
+ return ret
+}
+
+func (c *Context) ModuleName(logicModule Module) string {
+ module := c.moduleInfo[logicModule]
+ return module.properties.Name
+}
+
+func (c *Context) ModuleDir(logicModule Module) string {
+ module := c.moduleInfo[logicModule]
+ return filepath.Dir(module.relBlueprintsFile)
+}
+
+func (c *Context) ModuleSubDir(logicModule Module) string {
+ module := c.moduleInfo[logicModule]
+ return module.variantName
+}
+
+func (c *Context) BlueprintFile(logicModule Module) string {
+ module := c.moduleInfo[logicModule]
+ return module.relBlueprintsFile
+}
+
+func (c *Context) ModuleErrorf(logicModule Module, format string,
+ args ...interface{}) error {
+
+ module := c.moduleInfo[logicModule]
+ return &Error{
+ Err: fmt.Errorf(format, args...),
+ Pos: module.pos,
+ }
+}
+
+func (c *Context) VisitAllModules(visit func(Module)) {
+ c.visitAllModules(visit)
+}
+
+func (c *Context) VisitAllModulesIf(pred func(Module) bool,
+ visit func(Module)) {
+
+ c.visitAllModulesIf(pred, visit)
+}
+
+func (c *Context) VisitDepsDepthFirst(module Module,
+ visit func(Module)) {
+
+ c.visitDepsDepthFirst(c.moduleInfo[module], visit)
+}
+
+func (c *Context) VisitDepsDepthFirstIf(module Module,
+ pred func(Module) bool, visit func(Module)) {
+
+ c.visitDepsDepthFirstIf(c.moduleInfo[module], pred, visit)
+}
+
+func (c *Context) PrimaryModule(module Module) Module {
+ return c.moduleInfo[module].group.modules[0].logicModule
+}
+
+func (c *Context) FinalModule(module Module) Module {
+ modules := c.moduleInfo[module].group.modules
+ return modules[len(modules)-1].logicModule
+}
+
+func (c *Context) VisitAllModuleVariants(module Module,
+ visit func(Module)) {
+
+ c.visitAllModuleVariants(c.moduleInfo[module], visit)
+}
+
+// WriteBuildFile writes the Ninja manifeset text for the generated build
+// actions to w. If this is called before PrepareBuildActions successfully
+// completes then ErrBuildActionsNotReady is returned.
+func (c *Context) WriteBuildFile(w io.Writer) error {
+ if !c.buildActionsReady {
+ return ErrBuildActionsNotReady
+ }
+
+ nw := newNinjaWriter(w)
+
+ err := c.writeBuildFileHeader(nw)
+ if err != nil {
+ return err
+ }
+
+ err = c.writeNinjaRequiredVersion(nw)
+ if err != nil {
+ return err
+ }
+
+ // TODO: Group the globals by package.
+
+ err = c.writeGlobalVariables(nw)
+ if err != nil {
+ return err
+ }
+
+ err = c.writeGlobalPools(nw)
+ if err != nil {
+ return err
+ }
+
+ err = c.writeBuildDir(nw)
+ if err != nil {
+ return err
+ }
+
+ err = c.writeGlobalRules(nw)
+ if err != nil {
+ return err
+ }
+
+ err = c.writeAllModuleActions(nw)
+ if err != nil {
+ return err
+ }
+
+ err = c.writeAllSingletonActions(nw)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type pkgAssociation struct {
+ PkgName string
+ PkgPath string
+}
+
+type pkgAssociationSorter struct {
+ pkgs []pkgAssociation
+}
+
+func (s *pkgAssociationSorter) Len() int {
+ return len(s.pkgs)
+}
+
+func (s *pkgAssociationSorter) Less(i, j int) bool {
+ iName := s.pkgs[i].PkgName
+ jName := s.pkgs[j].PkgName
+ return iName < jName
+}
+
+func (s *pkgAssociationSorter) Swap(i, j int) {
+ s.pkgs[i], s.pkgs[j] = s.pkgs[j], s.pkgs[i]
+}
+
+func (c *Context) writeBuildFileHeader(nw *ninjaWriter) error {
+ headerTemplate := template.New("fileHeader")
+ _, err := headerTemplate.Parse(fileHeaderTemplate)
+ if err != nil {
+ // This is a programming error.
+ panic(err)
+ }
+
+ var pkgs []pkgAssociation
+ maxNameLen := 0
+ for pkg, name := range c.pkgNames {
+ pkgs = append(pkgs, pkgAssociation{
+ PkgName: name,
+ PkgPath: pkg.pkgPath,
+ })
+ if len(name) > maxNameLen {
+ maxNameLen = len(name)
+ }
+ }
+
+ for i := range pkgs {
+ pkgs[i].PkgName += strings.Repeat(" ", maxNameLen-len(pkgs[i].PkgName))
+ }
+
+ sort.Sort(&pkgAssociationSorter{pkgs})
+
+ params := map[string]interface{}{
+ "Pkgs": pkgs,
+ }
+
+ buf := bytes.NewBuffer(nil)
+ err = headerTemplate.Execute(buf, params)
+ if err != nil {
+ return err
+ }
+
+ return nw.Comment(buf.String())
+}
+
+func (c *Context) writeNinjaRequiredVersion(nw *ninjaWriter) error {
+ value := fmt.Sprintf("%d.%d.%d", c.requiredNinjaMajor, c.requiredNinjaMinor,
+ c.requiredNinjaMicro)
+
+ err := nw.Assign("ninja_required_version", value)
+ if err != nil {
+ return err
+ }
+
+ return nw.BlankLine()
+}
+
+func (c *Context) writeBuildDir(nw *ninjaWriter) error {
+ if c.ninjaBuildDir != nil {
+ err := nw.Assign("builddir", c.ninjaBuildDir.Value(c.pkgNames))
+ if err != nil {
+ return err
+ }
+
+ err = nw.BlankLine()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type globalEntity interface {
+ fullName(pkgNames map[*packageContext]string) string
+}
+
+type globalEntitySorter struct {
+ pkgNames map[*packageContext]string
+ entities []globalEntity
+}
+
+func (s *globalEntitySorter) Len() int {
+ return len(s.entities)
+}
+
+func (s *globalEntitySorter) Less(i, j int) bool {
+ iName := s.entities[i].fullName(s.pkgNames)
+ jName := s.entities[j].fullName(s.pkgNames)
+ return iName < jName
+}
+
+func (s *globalEntitySorter) Swap(i, j int) {
+ s.entities[i], s.entities[j] = s.entities[j], s.entities[i]
+}
+
+func (c *Context) writeGlobalVariables(nw *ninjaWriter) error {
+ visited := make(map[Variable]bool)
+
+ var walk func(v Variable) error
+ walk = func(v Variable) error {
+ visited[v] = true
+
+ // First visit variables on which this variable depends.
+ value := c.globalVariables[v]
+ for _, dep := range value.variables {
+ if !visited[dep] {
+ err := walk(dep)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ err := nw.Assign(v.fullName(c.pkgNames), value.Value(c.pkgNames))
+ if err != nil {
+ return err
+ }
+
+ err = nw.BlankLine()
+ if err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ globalVariables := make([]globalEntity, 0, len(c.globalVariables))
+ for variable := range c.globalVariables {
+ globalVariables = append(globalVariables, variable)
+ }
+
+ sort.Sort(&globalEntitySorter{c.pkgNames, globalVariables})
+
+ for _, entity := range globalVariables {
+ v := entity.(Variable)
+ if !visited[v] {
+ err := walk(v)
+ if err != nil {
+ return nil
+ }
+ }
+ }
+
+ return nil
+}
+
+func (c *Context) writeGlobalPools(nw *ninjaWriter) error {
+ globalPools := make([]globalEntity, 0, len(c.globalPools))
+ for pool := range c.globalPools {
+ globalPools = append(globalPools, pool)
+ }
+
+ sort.Sort(&globalEntitySorter{c.pkgNames, globalPools})
+
+ for _, entity := range globalPools {
+ pool := entity.(Pool)
+ name := pool.fullName(c.pkgNames)
+ def := c.globalPools[pool]
+ err := def.WriteTo(nw, name)
+ if err != nil {
+ return err
+ }
+
+ err = nw.BlankLine()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *Context) writeGlobalRules(nw *ninjaWriter) error {
+ globalRules := make([]globalEntity, 0, len(c.globalRules))
+ for rule := range c.globalRules {
+ globalRules = append(globalRules, rule)
+ }
+
+ sort.Sort(&globalEntitySorter{c.pkgNames, globalRules})
+
+ for _, entity := range globalRules {
+ rule := entity.(Rule)
+ name := rule.fullName(c.pkgNames)
+ def := c.globalRules[rule]
+ err := def.WriteTo(nw, name, c.pkgNames)
+ if err != nil {
+ return err
+ }
+
+ err = nw.BlankLine()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type moduleSorter []*moduleInfo
+
+func (s moduleSorter) Len() int {
+ return len(s)
+}
+
+func (s moduleSorter) Less(i, j int) bool {
+ iName := s[i].properties.Name
+ jName := s[j].properties.Name
+ if iName == jName {
+ iName = s[i].variantName
+ jName = s[j].variantName
+ }
+ return iName < jName
+}
+
+func (s moduleSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (c *Context) writeAllModuleActions(nw *ninjaWriter) error {
+ headerTemplate := template.New("moduleHeader")
+ _, err := headerTemplate.Parse(moduleHeaderTemplate)
+ if err != nil {
+ // This is a programming error.
+ panic(err)
+ }
+
+ modules := make([]*moduleInfo, 0, len(c.moduleInfo))
+ for _, module := range c.moduleInfo {
+ modules = append(modules, module)
+ }
+ sort.Sort(moduleSorter(modules))
+
+ buf := bytes.NewBuffer(nil)
+
+ for _, module := range modules {
+ if len(module.actionDefs.variables)+len(module.actionDefs.rules)+len(module.actionDefs.buildDefs) == 0 {
+ continue
+ }
+
+ buf.Reset()
+
+ // In order to make the bootstrap build manifest independent of the
+ // build dir we need to output the Blueprints file locations in the
+ // comments as paths relative to the source directory.
+ relPos := module.pos
+ relPos.Filename = module.relBlueprintsFile
+
+ // Get the name and location of the factory function for the module.
+ factory := c.moduleFactories[module.typeName]
+ factoryFunc := runtime.FuncForPC(reflect.ValueOf(factory).Pointer())
+ factoryName := factoryFunc.Name()
+
+ infoMap := map[string]interface{}{
+ "properties": module.properties,
+ "typeName": module.typeName,
+ "goFactory": factoryName,
+ "pos": relPos,
+ "variant": module.variantName,
+ }
+ err = headerTemplate.Execute(buf, infoMap)
+ if err != nil {
+ return err
+ }
+
+ err = nw.Comment(buf.String())
+ if err != nil {
+ return err
+ }
+
+ err = nw.BlankLine()
+ if err != nil {
+ return err
+ }
+
+ err = c.writeLocalBuildActions(nw, &module.actionDefs)
+ if err != nil {
+ return err
+ }
+
+ err = nw.BlankLine()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *Context) writeAllSingletonActions(nw *ninjaWriter) error {
+ headerTemplate := template.New("singletonHeader")
+ _, err := headerTemplate.Parse(singletonHeaderTemplate)
+ if err != nil {
+ // This is a programming error.
+ panic(err)
+ }
+
+ buf := bytes.NewBuffer(nil)
+
+ for _, info := range c.singletonInfo {
+ if len(info.actionDefs.variables)+len(info.actionDefs.rules)+len(info.actionDefs.buildDefs) == 0 {
+ continue
+ }
+
+ // Get the name of the factory function for the module.
+ factory := info.factory
+ factoryFunc := runtime.FuncForPC(reflect.ValueOf(factory).Pointer())
+ factoryName := factoryFunc.Name()
+
+ buf.Reset()
+ infoMap := map[string]interface{}{
+ "name": info.name,
+ "goFactory": factoryName,
+ }
+ err = headerTemplate.Execute(buf, infoMap)
+ if err != nil {
+ return err
+ }
+
+ err = nw.Comment(buf.String())
+ if err != nil {
+ return err
+ }
+
+ err = nw.BlankLine()
+ if err != nil {
+ return err
+ }
+
+ err = c.writeLocalBuildActions(nw, &info.actionDefs)
+ if err != nil {
+ return err
+ }
+
+ err = nw.BlankLine()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *Context) writeLocalBuildActions(nw *ninjaWriter,
+ defs *localBuildActions) error {
+
+ // Write the local variable assignments.
+ for _, v := range defs.variables {
+ // A localVariable doesn't need the package names or config to
+ // determine its name or value.
+ name := v.fullName(nil)
+ value, err := v.value(nil)
+ if err != nil {
+ panic(err)
+ }
+ err = nw.Assign(name, value.Value(c.pkgNames))
+ if err != nil {
+ return err
+ }
+ }
+
+ if len(defs.variables) > 0 {
+ err := nw.BlankLine()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Write the local rules.
+ for _, r := range defs.rules {
+ // A localRule doesn't need the package names or config to determine
+ // its name or definition.
+ name := r.fullName(nil)
+ def, err := r.def(nil)
+ if err != nil {
+ panic(err)
+ }
+
+ err = def.WriteTo(nw, name, c.pkgNames)
+ if err != nil {
+ return err
+ }
+
+ err = nw.BlankLine()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Write the build definitions.
+ for _, buildDef := range defs.buildDefs {
+ err := buildDef.WriteTo(nw, c.pkgNames)
+ if err != nil {
+ return err
+ }
+
+ if len(buildDef.Args) > 0 {
+ err = nw.BlankLine()
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func beforeInModuleList(a, b *moduleInfo, list []*moduleInfo) bool {
+ found := false
+ if a == b {
+ return false
+ }
+ for _, l := range list {
+ if l == a {
+ found = true
+ } else if l == b {
+ return found
+ }
+ }
+
+ missing := a
+ if found {
+ missing = b
+ }
+ panic(fmt.Errorf("element %v not found in list %v", missing, list))
+}
+
+type panicError struct {
+ panic interface{}
+ stack []byte
+ in string
+}
+
+func newPanicErrorf(panic interface{}, in string, a ...interface{}) error {
+ buf := make([]byte, 4096)
+ count := runtime.Stack(buf, false)
+ return panicError{
+ panic: panic,
+ in: fmt.Sprintf(in, a...),
+ stack: buf[:count],
+ }
+}
+
+func (p panicError) Error() string {
+ return fmt.Sprintf("panic in %s\n%s\n%s\n", p.in, p.panic, p.stack)
+}
+
+func (p *panicError) addIn(in string) {
+ p.in += " in " + in
+}
+
+func funcName(f interface{}) string {
+ return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
+}
+
+var fileHeaderTemplate = `******************************************************************************
+*** This file is generated and should not be edited ***
+******************************************************************************
+{{if .Pkgs}}
+This file contains variables, rules, and pools with name prefixes indicating
+they were generated by the following Go packages:
+{{range .Pkgs}}
+ {{.PkgName}} [from Go package {{.PkgPath}}]{{end}}{{end}}
+
+`
+
+var moduleHeaderTemplate = `# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+Module: {{.properties.Name}}
+Variant: {{.variant}}
+Type: {{.typeName}}
+Factory: {{.goFactory}}
+Defined: {{.pos}}
+`
+
+var singletonHeaderTemplate = `# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
+Singleton: {{.name}}
+Factory: {{.goFactory}}
+`
diff --git a/blueprint/context_test.go b/blueprint/context_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..acf0c1c040a6844b0e3abee3a0b41e21c4b8e2da
--- /dev/null
+++ b/blueprint/context_test.go
@@ -0,0 +1,130 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+ "bytes"
+ "testing"
+)
+
+type Walker interface {
+ Walk() bool
+}
+
+type fooModule struct {
+ properties struct {
+ Foo string
+ }
+}
+
+func newFooModule() (Module, []interface{}) {
+ m := &fooModule{}
+ return m, []interface{}{&m.properties}
+}
+
+func (f *fooModule) GenerateBuildActions(ModuleContext) {
+}
+
+func (f *fooModule) Foo() string {
+ return f.properties.Foo
+}
+
+func (f *fooModule) Walk() bool {
+ return true
+}
+
+type barModule struct {
+ properties struct {
+ Bar bool
+ }
+}
+
+func newBarModule() (Module, []interface{}) {
+ m := &barModule{}
+ return m, []interface{}{&m.properties}
+}
+
+func (b *barModule) GenerateBuildActions(ModuleContext) {
+}
+
+func (b *barModule) Bar() bool {
+ return b.properties.Bar
+}
+
+func (b *barModule) Walk() bool {
+ return false
+}
+
+func TestContextParse(t *testing.T) {
+ ctx := NewContext()
+ ctx.RegisterModuleType("foo_module", newFooModule)
+ ctx.RegisterModuleType("bar_module", newBarModule)
+
+ r := bytes.NewBufferString(`
+ foo_module {
+ name: "MyFooModule",
+ deps: ["MyBarModule"],
+ }
+
+ bar_module {
+ name: "MyBarModule",
+ }
+ `)
+
+ _, _, _, errs := ctx.parse(".", "Blueprint", r, nil)
+ if len(errs) > 0 {
+ t.Errorf("unexpected parse errors:")
+ for _, err := range errs {
+ t.Errorf(" %s", err)
+ }
+ t.FailNow()
+ }
+
+ errs = ctx.ResolveDependencies(nil)
+ if len(errs) > 0 {
+ t.Errorf("unexpected dep errors:")
+ for _, err := range errs {
+ t.Errorf(" %s", err)
+ }
+ t.FailNow()
+ }
+}
+
+// |---B===D - represents a non-walkable edge
+// A = represents a walkable edge
+// |===C---E===G
+// | | A should not be visited because it's the root node.
+// |===F===| B, D and E should not be walked.
+func TestWalkDeps(t *testing.T) {
+ ctx := NewContext()
+ ctx.RegisterModuleType("foo_module", newFooModule)
+ ctx.RegisterModuleType("bar_module", newBarModule)
+ ctx.ParseBlueprintsFiles("context_test_Blueprints")
+ ctx.ResolveDependencies(nil)
+
+ var output string
+ topModule := ctx.moduleGroups["A"].modules[0]
+ ctx.walkDeps(topModule,
+ func(module, parent Module) bool {
+ if module.(Walker).Walk() {
+ output += ctx.ModuleName(module)
+ return true
+ }
+ return false
+ })
+ if output != "CFG" {
+ t.Fatalf("unexpected walkDeps behaviour: %s\nshould be: CFG", output)
+ }
+}
diff --git a/blueprint/context_test_Blueprints b/blueprint/context_test_Blueprints
new file mode 100644
index 0000000000000000000000000000000000000000..6cac8b22eb26f61260ba34a005daef6a8eea8af6
--- /dev/null
+++ b/blueprint/context_test_Blueprints
@@ -0,0 +1,32 @@
+foo_module {
+ name: "A",
+ deps: ["B", "C"],
+}
+
+bar_module {
+ name: "B",
+ deps: ["D"],
+}
+
+foo_module {
+ name: "C",
+ deps: ["E", "F"],
+}
+
+foo_module {
+ name: "D",
+}
+
+bar_module {
+ name: "E",
+ deps: ["G"],
+}
+
+foo_module {
+ name: "F",
+ deps: ["G"],
+}
+
+foo_module {
+ name: "G",
+}
diff --git a/blueprint/deptools/depfile.go b/blueprint/deptools/depfile.go
new file mode 100644
index 0000000000000000000000000000000000000000..bfcf2ce7479253474f61624ed38df8a8e5260e00
--- /dev/null
+++ b/blueprint/deptools/depfile.go
@@ -0,0 +1,55 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package deptools
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+var (
+ pathEscaper = strings.NewReplacer(
+ `\`, `\\`,
+ ` `, `\ `,
+ `#`, `\#`,
+ `*`, `\*`,
+ `[`, `\[`,
+ `|`, `\|`)
+)
+
+// WriteDepFile creates a new gcc-style depfile and populates it with content
+// indicating that target depends on deps.
+func WriteDepFile(filename, target string, deps []string) error {
+ f, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ var escapedDeps []string
+
+ for _, dep := range deps {
+ escapedDeps = append(escapedDeps, pathEscaper.Replace(dep))
+ }
+
+ _, err = fmt.Fprintf(f, "%s: \\\n %s\n", target,
+ strings.Join(escapedDeps, " \\\n "))
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/blueprint/doc.go b/blueprint/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..60b68a120edc3bb3cdcbb624afd2c0a9f160ed3f
--- /dev/null
+++ b/blueprint/doc.go
@@ -0,0 +1,68 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Blueprint is a meta-build system that reads in Blueprints files that describe
+// modules that need to be built, and produces a Ninja
+// (http://martine.github.io/ninja/) manifest describing the commands that need
+// to be run and their dependencies. Where most build systems use built-in
+// rules or a domain-specific language to describe the logic how modules are
+// converted to build rules, Blueprint delegates this to per-project build logic
+// written in Go. For large, heterogenous projects this allows the inherent
+// complexity of the build logic to be maintained in a high-level language,
+// while still allowing simple changes to individual modules by modifying easy
+// to understand Blueprints files.
+//
+// Blueprint uses a bootstrapping process to allow the code for Blueprint,
+// the code for the build logic, and the code for the project being compiled
+// to all live in the project. Dependencies between the layers are fully
+// tracked - a change to the logic code will cause the logic to be recompiled,
+// regenerate the project build manifest, and run modified project rules. A
+// change to Blueprint itself will cause Blueprint to rebuild, and then rebuild
+// the logic, etc.
+//
+// A Blueprints file is a list of modules in a pseudo-python data format, where
+// the module type looks like a function call, and the properties of the module
+// look like optional arguments. For example, a simple module might look like:
+//
+// cc_library(
+// name = "cmd",
+// srcs = [
+// "main.c",
+// ],
+// deps = [
+// "libc",
+// ],
+// )
+//
+// subdirs = ["subdir1", "subdir2"]
+//
+// The modules from the top level Blueprints file and recursively through any
+// subdirectories listed by the "subdirs" variable are read by Blueprint, and
+// their properties are stored into property structs by module type. Once
+// all modules are read, Blueprint calls any registered Mutators, in
+// registration order. Mutators can visit each module top-down or bottom-up,
+// and modify them as necessary. Common modifications include setting
+// properties on modules to propagate information down from dependers to
+// dependees (for example, telling a module what kinds of parents depend on it),
+// or splitting a module into multiple variants (for example, one per
+// architecture being compiled). After all Mutators have run, each module is
+// asked to generate build rules based on property values, and then singletons
+// can generate any build rules from the output of all modules.
+//
+// The per-project build logic defines a top level command, referred to in the
+// documentation as the "primary builder". This command is responsible for
+// registering the module types needed for the project, as well as any
+// singletons or mutators, and then calling into Blueprint with the path of the
+// root Blueprint file.
+package blueprint
diff --git a/blueprint/gotestmain/gotestmain.go b/blueprint/gotestmain/gotestmain.go
new file mode 100644
index 0000000000000000000000000000000000000000..0c2ca3ddfd6e1bdc9097756640d87675aff61e34
--- /dev/null
+++ b/blueprint/gotestmain/gotestmain.go
@@ -0,0 +1,106 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gotestmain
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "strings"
+ "text/template"
+)
+
+var (
+ output = flag.String("o", "", "output filename")
+ pkg = flag.String("pkg", "", "test package")
+ exitCode = 0
+)
+
+type data struct {
+ Package string
+ Tests []string
+}
+
+func findTests(srcs []string) (tests []string) {
+ for _, src := range srcs {
+ f, err := parser.ParseFile(token.NewFileSet(), src, nil, 0)
+ if err != nil {
+ panic(err)
+ }
+ for _, obj := range f.Scope.Objects {
+ if obj.Kind != ast.Fun || !strings.HasPrefix(obj.Name, "Test") {
+ continue
+ }
+ tests = append(tests, obj.Name)
+ }
+ }
+ return
+}
+
+func main() {
+ flag.Parse()
+
+ if flag.NArg() == 0 {
+ fmt.Fprintln(os.Stderr, "error: must pass at least one input")
+ exitCode = 1
+ return
+ }
+
+ buf := &bytes.Buffer{}
+
+ d := data{
+ Package: *pkg,
+ Tests: findTests(flag.Args()),
+ }
+
+ err := testMainTmpl.Execute(buf, d)
+ if err != nil {
+ panic(err)
+ }
+
+ err = ioutil.WriteFile(*output, buf.Bytes(), 0666)
+ if err != nil {
+ panic(err)
+ }
+}
+
+var testMainTmpl = template.Must(template.New("testMain").Parse(`
+package main
+
+import (
+ "testing"
+
+ pkg "{{.Package}}"
+)
+
+var t = []testing.InternalTest{
+{{range .Tests}}
+ {"{{.}}", pkg.{{.}}},
+{{end}}
+}
+
+func matchString(pat, str string) (bool, error) {
+ return true, nil
+}
+
+func main() {
+ testing.Main(matchString, t, nil, nil)
+}
+`))
diff --git a/blueprint/gotestrunner/gotestrunner.go b/blueprint/gotestrunner/gotestrunner.go
new file mode 100644
index 0000000000000000000000000000000000000000..20fbe1ce4538a6486e166265bc500fd5525d9f77
--- /dev/null
+++ b/blueprint/gotestrunner/gotestrunner.go
@@ -0,0 +1,103 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "syscall"
+)
+
+var (
+ chdir = flag.String("p", "", "Change to a path before executing test")
+ touch = flag.String("f", "", "Write a file on success")
+)
+
+// This will copy the stdout from the test process to our stdout
+// unless it only contains "PASS\n".
+func handleStdout(stdout io.Reader) {
+ reader := bufio.NewReader(stdout)
+
+ // This is intentionally 6 instead of 5 to check for EOF
+ buf, _ := reader.Peek(6)
+ if bytes.Equal(buf, []byte("PASS\n")) {
+ return
+ }
+
+ io.Copy(os.Stdout, reader)
+}
+
+func main() {
+ flag.Parse()
+
+ if flag.NArg() == 0 {
+ fmt.Fprintln(os.Stderr, "error: must pass at least one test executable")
+ os.Exit(1)
+ }
+
+ test, err := filepath.Abs(flag.Arg(0))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error: Failed to locate test binary: %s", err)
+ }
+
+ cmd := exec.Command(test, flag.Args()[1:]...)
+ if *chdir != "" {
+ cmd.Dir = *chdir
+ }
+
+ cmd.Stderr = os.Stderr
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+
+ err = cmd.Start()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+
+ handleStdout(stdout)
+
+ if err = cmd.Wait(); err != nil {
+ if e, ok := err.(*exec.ExitError); ok {
+ if status, ok := e.Sys().(syscall.WaitStatus); ok && status.Exited() {
+ os.Exit(status.ExitStatus())
+ } else if status.Signaled() {
+ fmt.Fprintf(os.Stderr, "test got signal %s\n", status.Signal())
+ os.Exit(1)
+ }
+ }
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+
+ if *touch != "" {
+ err = ioutil.WriteFile(*touch, []byte{}, 0666)
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ os.Exit(0)
+}
diff --git a/blueprint/live_tracker.go b/blueprint/live_tracker.go
new file mode 100644
index 0000000000000000000000000000000000000000..8348988d068afd4e9fd3a26f9572f11e6d28fda6
--- /dev/null
+++ b/blueprint/live_tracker.go
@@ -0,0 +1,201 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import "sync"
+
+// A liveTracker tracks the values of live variables, rules, and pools. An
+// entity is made "live" when it is referenced directly or indirectly by a build
+// definition. When an entity is made live its value is computed based on the
+// configuration.
+type liveTracker struct {
+ sync.Mutex
+ config interface{} // Used to evaluate variable, rule, and pool values.
+
+ variables map[Variable]*ninjaString
+ pools map[Pool]*poolDef
+ rules map[Rule]*ruleDef
+}
+
+func newLiveTracker(config interface{}) *liveTracker {
+ return &liveTracker{
+ config: config,
+ variables: make(map[Variable]*ninjaString),
+ pools: make(map[Pool]*poolDef),
+ rules: make(map[Rule]*ruleDef),
+ }
+}
+
+func (l *liveTracker) AddBuildDefDeps(def *buildDef) error {
+ l.Lock()
+ defer l.Unlock()
+
+ ruleDef, err := l.addRule(def.Rule)
+ if err != nil {
+ return err
+ }
+ def.RuleDef = ruleDef
+
+ err = l.addNinjaStringListDeps(def.Outputs)
+ if err != nil {
+ return err
+ }
+
+ err = l.addNinjaStringListDeps(def.Inputs)
+ if err != nil {
+ return err
+ }
+
+ err = l.addNinjaStringListDeps(def.Implicits)
+ if err != nil {
+ return err
+ }
+
+ err = l.addNinjaStringListDeps(def.OrderOnly)
+ if err != nil {
+ return err
+ }
+
+ for _, value := range def.Args {
+ err = l.addNinjaStringDeps(value)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (l *liveTracker) addRule(r Rule) (def *ruleDef, err error) {
+ def, ok := l.rules[r]
+ if !ok {
+ def, err = r.def(l.config)
+ if err == errRuleIsBuiltin {
+ // No need to do anything for built-in rules.
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if def.Pool != nil {
+ err = l.addPool(def.Pool)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ err = l.addNinjaStringListDeps(def.CommandDeps)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, value := range def.Variables {
+ err = l.addNinjaStringDeps(value)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ l.rules[r] = def
+ }
+
+ return
+}
+
+func (l *liveTracker) addPool(p Pool) error {
+ _, ok := l.pools[p]
+ if !ok {
+ def, err := p.def(l.config)
+ if err == errPoolIsBuiltin {
+ // No need to do anything for built-in rules.
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ l.pools[p] = def
+ }
+
+ return nil
+}
+
+func (l *liveTracker) addVariable(v Variable) error {
+ _, ok := l.variables[v]
+ if !ok {
+ value, err := v.value(l.config)
+ if err == errVariableIsArg {
+ // This variable is a placeholder for an argument that can be passed
+ // to a rule. It has no value and thus doesn't reference any other
+ // variables.
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ l.variables[v] = value
+
+ err = l.addNinjaStringDeps(value)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (l *liveTracker) addNinjaStringListDeps(list []*ninjaString) error {
+ for _, str := range list {
+ err := l.addNinjaStringDeps(str)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (l *liveTracker) addNinjaStringDeps(str *ninjaString) error {
+ for _, v := range str.variables {
+ err := l.addVariable(v)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (l *liveTracker) RemoveVariableIfLive(v Variable) bool {
+ l.Lock()
+ defer l.Unlock()
+
+ _, isLive := l.variables[v]
+ if isLive {
+ delete(l.variables, v)
+ }
+ return isLive
+}
+
+func (l *liveTracker) RemoveRuleIfLive(r Rule) bool {
+ l.Lock()
+ defer l.Unlock()
+
+ _, isLive := l.rules[r]
+ if isLive {
+ delete(l.rules, r)
+ }
+ return isLive
+}
diff --git a/blueprint/loadplugins/loadplugins.go b/blueprint/loadplugins/loadplugins.go
new file mode 100644
index 0000000000000000000000000000000000000000..3c7e1e3de18ad2468cdfc5087c021022f6c14745
--- /dev/null
+++ b/blueprint/loadplugins/loadplugins.go
@@ -0,0 +1,67 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "text/template"
+)
+
+var (
+ output = flag.String("o", "", "output filename")
+ pkg = flag.String("p", "main", "package name")
+)
+
+func main() {
+ flag.Parse()
+
+ if flag.NArg() == 0 {
+ fmt.Fprintln(os.Stderr, "error: must pass at least one input")
+ os.Exit(1)
+ }
+
+ buf := &bytes.Buffer{}
+
+ err := pluginTmpl.Execute(buf, struct {
+ Package string
+ Plugins []string
+ }{
+ filepath.Base(*pkg),
+ flag.Args(),
+ })
+ if err != nil {
+ panic(err)
+ }
+
+ err = ioutil.WriteFile(*output, buf.Bytes(), 0666)
+ if err != nil {
+ panic(err)
+ }
+}
+
+var pluginTmpl = template.Must(template.New("pluginloader").Parse(`
+package {{.Package}}
+
+import (
+{{range .Plugins}}
+ _ "{{.}}"
+{{end}}
+)
+`))
diff --git a/blueprint/mangle.go b/blueprint/mangle.go
new file mode 100644
index 0000000000000000000000000000000000000000..fe3fe65e46cf75e3c36812e640f294fc8b0d03b9
--- /dev/null
+++ b/blueprint/mangle.go
@@ -0,0 +1,27 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+func packageNamespacePrefix(packageName string) string {
+ return "g." + packageName + "."
+}
+
+func moduleNamespacePrefix(moduleName string) string {
+ return "m." + moduleName + "."
+}
+
+func singletonNamespacePrefix(singletonName string) string {
+ return "s." + singletonName + "."
+}
diff --git a/blueprint/module_ctx.go b/blueprint/module_ctx.go
new file mode 100644
index 0000000000000000000000000000000000000000..381b54a34287c54620477b3058f2b29590104c84
--- /dev/null
+++ b/blueprint/module_ctx.go
@@ -0,0 +1,552 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+ "fmt"
+ "path/filepath"
+ "text/scanner"
+)
+
+// A Module handles generating all of the Ninja build actions needed to build a
+// single module based on properties defined in a Blueprints file. Module
+// objects are initially created during the parse phase of a Context using one
+// of the registered module types (and the associated ModuleFactory function).
+// The Module's properties struct is automatically filled in with the property
+// values specified in the Blueprints file (see Context.RegisterModuleType for more
+// information on this).
+//
+// A Module can be split into multiple Modules by a Mutator. All existing
+// properties set on the module will be duplicated to the new Module, and then
+// modified as necessary by the Mutator.
+//
+// The Module implementation can access the build configuration as well as any
+// modules on which on which it depends (as defined by the "deps" property
+// specified in the Blueprints file, dynamically added by implementing the
+// (deprecated) DynamicDependerModule interface, or dynamically added by a
+// BottomUpMutator) using the ModuleContext passed to GenerateBuildActions.
+// This ModuleContext is also used to create Ninja build actions and to report
+// errors to the user.
+//
+// In addition to implementing the GenerateBuildActions method, a Module should
+// implement methods that provide dependant modules and singletons information
+// they need to generate their build actions. These methods will only be called
+// after GenerateBuildActions is called because the Context calls
+// GenerateBuildActions in dependency-order (and singletons are invoked after
+// all the Modules). The set of methods a Module supports will determine how
+// dependant Modules interact with it.
+//
+// For example, consider a Module that is responsible for generating a library
+// that other modules can link against. The library Module might implement the
+// following interface:
+//
+// type LibraryProducer interface {
+// LibraryFileName() string
+// }
+//
+// func IsLibraryProducer(module blueprint.Module) {
+// _, ok := module.(LibraryProducer)
+// return ok
+// }
+//
+// A binary-producing Module that depends on the library Module could then do:
+//
+// func (m *myBinaryModule) GenerateBuildActions(ctx blueprint.ModuleContext) {
+// ...
+// var libraryFiles []string
+// ctx.VisitDepsDepthFirstIf(IsLibraryProducer,
+// func(module blueprint.Module) {
+// libProducer := module.(LibraryProducer)
+// libraryFiles = append(libraryFiles, libProducer.LibraryFileName())
+// })
+// ...
+// }
+//
+// to build the list of library file names that should be included in its link
+// command.
+//
+// GenerateBuildActions may be called from multiple threads. It is guaranteed to
+// be called after it has finished being called on all dependencies and on all
+// variants of that appear earlier in the ModuleContext.VisitAllModuleVariants list.
+// Any accesses to global variables or to Module objects that are not dependencies
+// or variants of the current Module must be synchronized by the implementation of
+// GenerateBuildActions.
+type Module interface {
+ // GenerateBuildActions is called by the Context that created the Module
+ // during its generate phase. This call should generate all Ninja build
+ // actions (rules, pools, and build statements) needed to build the module.
+ GenerateBuildActions(ModuleContext)
+}
+
+// A DynamicDependerModule is a Module that may add dependencies that do not
+// appear in its "deps" property. Any Module that implements this interface
+// will have its DynamicDependencies method called by the Context that created
+// it during generate phase.
+//
+// Deprecated, use a BottomUpMutator instead
+type DynamicDependerModule interface {
+ Module
+
+ // DynamicDependencies is called by the Context that created the
+ // DynamicDependerModule during its generate phase. This call should return
+ // the list of module names that the DynamicDependerModule depends on
+ // dynamically. Module names that already appear in the "deps" property may
+ // but do not need to be included in the returned list.
+ DynamicDependencies(DynamicDependerModuleContext) []string
+}
+
+type BaseModuleContext interface {
+ ModuleName() string
+ ModuleDir() string
+ Config() interface{}
+
+ ContainsProperty(name string) bool
+ Errorf(pos scanner.Position, fmt string, args ...interface{})
+ ModuleErrorf(fmt string, args ...interface{})
+ PropertyErrorf(property, fmt string, args ...interface{})
+ Failed() bool
+
+ moduleInfo() *moduleInfo
+ error(err error)
+}
+
+type DynamicDependerModuleContext BottomUpMutatorContext
+
+type ModuleContext interface {
+ BaseModuleContext
+
+ OtherModuleName(m Module) string
+ OtherModuleErrorf(m Module, fmt string, args ...interface{})
+
+ VisitDirectDeps(visit func(Module))
+ VisitDirectDepsIf(pred func(Module) bool, visit func(Module))
+ VisitDepsDepthFirst(visit func(Module))
+ VisitDepsDepthFirstIf(pred func(Module) bool, visit func(Module))
+ WalkDeps(visit func(Module, Module) bool)
+
+ ModuleSubDir() string
+
+ Variable(pctx PackageContext, name, value string)
+ Rule(pctx PackageContext, name string, params RuleParams, argNames ...string) Rule
+ Build(pctx PackageContext, params BuildParams)
+
+ AddNinjaFileDeps(deps ...string)
+
+ PrimaryModule() Module
+ FinalModule() Module
+ VisitAllModuleVariants(visit func(Module))
+
+ GetMissingDependencies() []string
+}
+
+var _ BaseModuleContext = (*baseModuleContext)(nil)
+
+type baseModuleContext struct {
+ context *Context
+ config interface{}
+ module *moduleInfo
+ errs []error
+}
+
+func (d *baseModuleContext) moduleInfo() *moduleInfo {
+ return d.module
+}
+
+func (d *baseModuleContext) ModuleName() string {
+ return d.module.properties.Name
+}
+
+func (d *baseModuleContext) ContainsProperty(name string) bool {
+ _, ok := d.module.propertyPos[name]
+ return ok
+}
+
+func (d *baseModuleContext) ModuleDir() string {
+ return filepath.Dir(d.module.relBlueprintsFile)
+}
+
+func (d *baseModuleContext) Config() interface{} {
+ return d.config
+}
+
+func (d *baseModuleContext) error(err error) {
+ if err != nil {
+ d.errs = append(d.errs, err)
+ }
+}
+
+func (d *baseModuleContext) Errorf(pos scanner.Position,
+ format string, args ...interface{}) {
+
+ d.error(&Error{
+ Err: fmt.Errorf(format, args...),
+ Pos: pos,
+ })
+}
+
+func (d *baseModuleContext) ModuleErrorf(format string,
+ args ...interface{}) {
+
+ d.error(&Error{
+ Err: fmt.Errorf(format, args...),
+ Pos: d.module.pos,
+ })
+}
+
+func (d *baseModuleContext) PropertyErrorf(property, format string,
+ args ...interface{}) {
+
+ pos := d.module.propertyPos[property]
+
+ if !pos.IsValid() {
+ pos = d.module.pos
+ }
+
+ format = property + ": " + format
+
+ d.error(&Error{
+ Err: fmt.Errorf(format, args...),
+ Pos: pos,
+ })
+}
+
+func (d *baseModuleContext) Failed() bool {
+ return len(d.errs) > 0
+}
+
+var _ ModuleContext = (*moduleContext)(nil)
+
+type moduleContext struct {
+ baseModuleContext
+ scope *localScope
+ ninjaFileDeps []string
+ actionDefs localBuildActions
+ handledMissingDeps bool
+}
+
+func (m *moduleContext) OtherModuleName(logicModule Module) string {
+ module := m.context.moduleInfo[logicModule]
+ return module.properties.Name
+}
+
+func (m *moduleContext) OtherModuleErrorf(logicModule Module, format string,
+ args ...interface{}) {
+
+ module := m.context.moduleInfo[logicModule]
+ m.errs = append(m.errs, &Error{
+ Err: fmt.Errorf(format, args...),
+ Pos: module.pos,
+ })
+}
+
+func (m *moduleContext) VisitDirectDeps(visit func(Module)) {
+ m.context.visitDirectDeps(m.module, visit)
+}
+
+func (m *moduleContext) VisitDirectDepsIf(pred func(Module) bool, visit func(Module)) {
+ m.context.visitDirectDepsIf(m.module, pred, visit)
+}
+
+func (m *moduleContext) VisitDepsDepthFirst(visit func(Module)) {
+ m.context.visitDepsDepthFirst(m.module, visit)
+}
+
+func (m *moduleContext) VisitDepsDepthFirstIf(pred func(Module) bool,
+ visit func(Module)) {
+
+ m.context.visitDepsDepthFirstIf(m.module, pred, visit)
+}
+
+func (m *moduleContext) WalkDeps(visit func(Module, Module) bool) {
+ m.context.walkDeps(m.module, visit)
+}
+
+func (m *moduleContext) ModuleSubDir() string {
+ return m.module.variantName
+}
+
+func (m *moduleContext) Variable(pctx PackageContext, name, value string) {
+ m.scope.ReparentTo(pctx)
+
+ v, err := m.scope.AddLocalVariable(name, value)
+ if err != nil {
+ panic(err)
+ }
+
+ m.actionDefs.variables = append(m.actionDefs.variables, v)
+}
+
+func (m *moduleContext) Rule(pctx PackageContext, name string,
+ params RuleParams, argNames ...string) Rule {
+
+ m.scope.ReparentTo(pctx)
+
+ r, err := m.scope.AddLocalRule(name, ¶ms, argNames...)
+ if err != nil {
+ panic(err)
+ }
+
+ m.actionDefs.rules = append(m.actionDefs.rules, r)
+
+ return r
+}
+
+func (m *moduleContext) Build(pctx PackageContext, params BuildParams) {
+ m.scope.ReparentTo(pctx)
+
+ def, err := parseBuildParams(m.scope, ¶ms)
+ if err != nil {
+ panic(err)
+ }
+
+ m.actionDefs.buildDefs = append(m.actionDefs.buildDefs, def)
+}
+
+func (m *moduleContext) AddNinjaFileDeps(deps ...string) {
+ m.ninjaFileDeps = append(m.ninjaFileDeps, deps...)
+}
+
+func (m *moduleContext) PrimaryModule() Module {
+ return m.module.group.modules[0].logicModule
+}
+
+func (m *moduleContext) FinalModule() Module {
+ return m.module.group.modules[len(m.module.group.modules)-1].logicModule
+}
+
+func (m *moduleContext) VisitAllModuleVariants(visit func(Module)) {
+ m.context.visitAllModuleVariants(m.module, visit)
+}
+
+func (m *moduleContext) GetMissingDependencies() []string {
+ m.handledMissingDeps = true
+ return m.module.missingDeps
+}
+
+//
+// MutatorContext
+//
+
+type mutatorContext struct {
+ baseModuleContext
+ name string
+ reverseDeps map[*moduleInfo][]*moduleInfo
+}
+
+type baseMutatorContext interface {
+ BaseModuleContext
+
+ Module() Module
+}
+
+type EarlyMutatorContext interface {
+ baseMutatorContext
+
+ CreateVariations(...string) []Module
+ CreateLocalVariations(...string) []Module
+}
+
+type TopDownMutatorContext interface {
+ baseMutatorContext
+
+ OtherModuleName(m Module) string
+ OtherModuleErrorf(m Module, fmt string, args ...interface{})
+
+ VisitDirectDeps(visit func(Module))
+ VisitDirectDepsIf(pred func(Module) bool, visit func(Module))
+ VisitDepsDepthFirst(visit func(Module))
+ VisitDepsDepthFirstIf(pred func(Module) bool, visit func(Module))
+ WalkDeps(visit func(Module, Module) bool)
+}
+
+type BottomUpMutatorContext interface {
+ baseMutatorContext
+
+ AddDependency(module Module, name ...string)
+ AddReverseDependency(module Module, name string)
+ CreateVariations(...string) []Module
+ CreateLocalVariations(...string) []Module
+ SetDependencyVariation(string)
+ AddVariationDependencies([]Variation, ...string)
+ AddFarVariationDependencies([]Variation, ...string)
+}
+
+// A Mutator function is called for each Module, and can use
+// MutatorContext.CreateVariations to split a Module into multiple Modules,
+// modifying properties on the new modules to differentiate them. It is called
+// after parsing all Blueprint files, but before generating any build rules,
+// and is always called on dependencies before being called on the depending module.
+//
+// The Mutator function should only modify members of properties structs, and not
+// members of the module struct itself, to ensure the modified values are copied
+// if a second Mutator chooses to split the module a second time.
+type TopDownMutator func(mctx TopDownMutatorContext)
+type BottomUpMutator func(mctx BottomUpMutatorContext)
+type EarlyMutator func(mctx EarlyMutatorContext)
+
+// Split a module into mulitple variants, one for each name in the variationNames
+// parameter. It returns a list of new modules in the same order as the variationNames
+// list.
+//
+// If any of the dependencies of the module being operated on were already split
+// by calling CreateVariations with the same name, the dependency will automatically
+// be updated to point the matching variant.
+//
+// If a module is split, and then a module depending on the first module is not split
+// when the Mutator is later called on it, the dependency of the depending module will
+// automatically be updated to point to the first variant.
+func (mctx *mutatorContext) CreateVariations(variationNames ...string) []Module {
+ return mctx.createVariations(variationNames, false)
+}
+
+// Split a module into mulitple variants, one for each name in the variantNames
+// parameter. It returns a list of new modules in the same order as the variantNames
+// list.
+//
+// Local variations do not affect automatic dependency resolution - dependencies added
+// to the split module via deps or DynamicDependerModule must exactly match a variant
+// that contains all the non-local variations.
+func (mctx *mutatorContext) CreateLocalVariations(variationNames ...string) []Module {
+ return mctx.createVariations(variationNames, true)
+}
+
+func (mctx *mutatorContext) createVariations(variationNames []string, local bool) []Module {
+ ret := []Module{}
+ modules, errs := mctx.context.createVariations(mctx.module, mctx.name, variationNames)
+ if len(errs) > 0 {
+ mctx.errs = append(mctx.errs, errs...)
+ }
+
+ for i, module := range modules {
+ ret = append(ret, module.logicModule)
+ if !local {
+ module.dependencyVariant[mctx.name] = variationNames[i]
+ }
+ }
+
+ if len(ret) != len(variationNames) {
+ panic("oops!")
+ }
+
+ return ret
+}
+
+// Set all dangling dependencies on the current module to point to the variation
+// with given name.
+func (mctx *mutatorContext) SetDependencyVariation(variationName string) {
+ mctx.context.convertDepsToVariation(mctx.module, mctx.name, variationName)
+}
+
+func (mctx *mutatorContext) Module() Module {
+ return mctx.module.logicModule
+}
+
+// Add a dependency to the given module.
+// Does not affect the ordering of the current mutator pass, but will be ordered
+// correctly for all future mutator passes.
+func (mctx *mutatorContext) AddDependency(module Module, deps ...string) {
+ for _, dep := range deps {
+ errs := mctx.context.addDependency(mctx.context.moduleInfo[module], dep)
+ if len(errs) > 0 {
+ mctx.errs = append(mctx.errs, errs...)
+ }
+ }
+}
+
+// Add a dependency from the destination to the given module.
+// Does not affect the ordering of the current mutator pass, but will be ordered
+// correctly for all future mutator passes. All reverse dependencies for a destination module are
+// collected until the end of the mutator pass, sorted by name, and then appended to the destination
+// module's dependency list.
+func (mctx *mutatorContext) AddReverseDependency(module Module, destName string) {
+ destModule, errs := mctx.context.findReverseDependency(mctx.context.moduleInfo[module], destName)
+ if len(errs) > 0 {
+ mctx.errs = append(mctx.errs, errs...)
+ return
+ }
+
+ mctx.reverseDeps[destModule] = append(mctx.reverseDeps[destModule],
+ mctx.context.moduleInfo[module])
+}
+
+// AddVariationDependencies adds deps as dependencies of the current module, but uses the variations
+// argument to select which variant of the dependency to use. A variant of the dependency must
+// exist that matches the all of the non-local variations of the current module, plus the variations
+// argument.
+func (mctx *mutatorContext) AddVariationDependencies(variations []Variation,
+ deps ...string) {
+
+ for _, dep := range deps {
+ errs := mctx.context.addVariationDependency(mctx.module, variations, dep, false)
+ if len(errs) > 0 {
+ mctx.errs = append(mctx.errs, errs...)
+ }
+ }
+}
+
+// AddFarVariationDependencies adds deps as dependencies of the current module, but uses the
+// variations argument to select which variant of the dependency to use. A variant of the
+// dependency must exist that matches the variations argument, but may also have other variations.
+// For any unspecified variation the first variant will be used.
+//
+// Unlike AddVariationDependencies, the variations of the current module are ignored - the
+// depdendency only needs to match the supplied variations.
+func (mctx *mutatorContext) AddFarVariationDependencies(variations []Variation,
+ deps ...string) {
+
+ for _, dep := range deps {
+ errs := mctx.context.addVariationDependency(mctx.module, variations, dep, true)
+ if len(errs) > 0 {
+ mctx.errs = append(mctx.errs, errs...)
+ }
+ }
+}
+
+func (mctx *mutatorContext) OtherModuleName(logicModule Module) string {
+ module := mctx.context.moduleInfo[logicModule]
+ return module.properties.Name
+}
+
+func (mctx *mutatorContext) OtherModuleErrorf(logicModule Module, format string,
+ args ...interface{}) {
+
+ module := mctx.context.moduleInfo[logicModule]
+ mctx.errs = append(mctx.errs, &Error{
+ Err: fmt.Errorf(format, args...),
+ Pos: module.pos,
+ })
+}
+
+func (mctx *mutatorContext) VisitDirectDeps(visit func(Module)) {
+ mctx.context.visitDirectDeps(mctx.module, visit)
+}
+
+func (mctx *mutatorContext) VisitDirectDepsIf(pred func(Module) bool, visit func(Module)) {
+ mctx.context.visitDirectDepsIf(mctx.module, pred, visit)
+}
+
+func (mctx *mutatorContext) VisitDepsDepthFirst(visit func(Module)) {
+ mctx.context.visitDepsDepthFirst(mctx.module, visit)
+}
+
+func (mctx *mutatorContext) VisitDepsDepthFirstIf(pred func(Module) bool,
+ visit func(Module)) {
+
+ mctx.context.visitDepsDepthFirstIf(mctx.module, pred, visit)
+}
+
+func (mctx *mutatorContext) WalkDeps(visit func(Module, Module) bool) {
+ mctx.context.walkDeps(mctx.module, visit)
+}
diff --git a/blueprint/ninja_defs.go b/blueprint/ninja_defs.go
new file mode 100644
index 0000000000000000000000000000000000000000..e7a2929ed02129783a390f47ca32f11c7b98925a
--- /dev/null
+++ b/blueprint/ninja_defs.go
@@ -0,0 +1,383 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// A Deps value indicates the dependency file format that Ninja should expect to
+// be output by a compiler.
+type Deps int
+
+const (
+ DepsNone Deps = iota
+ DepsGCC
+ DepsMSVC
+)
+
+func (d Deps) String() string {
+ switch d {
+ case DepsNone:
+ return "none"
+ case DepsGCC:
+ return "gcc"
+ case DepsMSVC:
+ return "msvc"
+ default:
+ panic(fmt.Sprintf("unknown deps value: %d", d))
+ }
+}
+
+// A PoolParams object contains the set of parameters that make up a Ninja pool
+// definition.
+type PoolParams struct {
+ Comment string // The comment that will appear above the definition.
+ Depth int // The Ninja pool depth.
+}
+
+// A RuleParams object contains the set of parameters that make up a Ninja rule
+// definition.
+type RuleParams struct {
+ // These fields correspond to a Ninja variable of the same name.
+ Command string // The command that Ninja will run for the rule.
+ Depfile string // The dependency file name.
+ Deps Deps // The format of the dependency file.
+ Description string // The description that Ninja will print for the rule.
+ Generator bool // Whether the rule generates the Ninja manifest file.
+ Pool Pool // The Ninja pool to which the rule belongs.
+ Restat bool // Whether Ninja should re-stat the rule's outputs.
+ Rspfile string // The response file.
+ RspfileContent string // The response file content.
+
+ // These fields are used internally in Blueprint
+ CommandDeps []string // Command-specific implicit dependencies to prepend to builds
+ Comment string // The comment that will appear above the definition.
+}
+
+// A BuildParams object contains the set of parameters that make up a Ninja
+// build statement. Each field except for Args corresponds with a part of the
+// Ninja build statement. The Args field contains variable names and values
+// that are set within the build statement's scope in the Ninja file.
+type BuildParams struct {
+ Comment string // The comment that will appear above the definition.
+ Rule Rule // The rule to invoke.
+ Outputs []string // The list of output targets.
+ Inputs []string // The list of explicit input dependencies.
+ Implicits []string // The list of implicit dependencies.
+ OrderOnly []string // The list of order-only dependencies.
+ Args map[string]string // The variable/value pairs to set.
+ Optional bool // Skip outputting a default statement
+}
+
+// A poolDef describes a pool definition. It does not include the name of the
+// pool.
+type poolDef struct {
+ Comment string
+ Depth int
+}
+
+func parsePoolParams(scope scope, params *PoolParams) (*poolDef,
+ error) {
+
+ def := &poolDef{
+ Comment: params.Comment,
+ Depth: params.Depth,
+ }
+
+ return def, nil
+}
+
+func (p *poolDef) WriteTo(nw *ninjaWriter, name string) error {
+ if p.Comment != "" {
+ err := nw.Comment(p.Comment)
+ if err != nil {
+ return err
+ }
+ }
+
+ err := nw.Pool(name)
+ if err != nil {
+ return err
+ }
+
+ return nw.ScopedAssign("depth", strconv.Itoa(p.Depth))
+}
+
+// A ruleDef describes a rule definition. It does not include the name of the
+// rule.
+type ruleDef struct {
+ CommandDeps []*ninjaString
+ Comment string
+ Pool Pool
+ Variables map[string]*ninjaString
+}
+
+func parseRuleParams(scope scope, params *RuleParams) (*ruleDef,
+ error) {
+
+ r := &ruleDef{
+ Comment: params.Comment,
+ Pool: params.Pool,
+ Variables: make(map[string]*ninjaString),
+ }
+
+ if params.Command == "" {
+ return nil, fmt.Errorf("encountered rule params with no command " +
+ "specified")
+ }
+
+ if r.Pool != nil && !scope.IsPoolVisible(r.Pool) {
+ return nil, fmt.Errorf("Pool %s is not visible in this scope", r.Pool)
+ }
+
+ value, err := parseNinjaString(scope, params.Command)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing Command param: %s", err)
+ }
+ r.Variables["command"] = value
+
+ if params.Depfile != "" {
+ value, err = parseNinjaString(scope, params.Depfile)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing Depfile param: %s", err)
+ }
+ r.Variables["depfile"] = value
+ }
+
+ if params.Deps != DepsNone {
+ r.Variables["deps"] = simpleNinjaString(params.Deps.String())
+ }
+
+ if params.Description != "" {
+ value, err = parseNinjaString(scope, params.Description)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing Description param: %s", err)
+ }
+ r.Variables["description"] = value
+ }
+
+ if params.Generator {
+ r.Variables["generator"] = simpleNinjaString("true")
+ }
+
+ if params.Restat {
+ r.Variables["restat"] = simpleNinjaString("true")
+ }
+
+ if params.Rspfile != "" {
+ value, err = parseNinjaString(scope, params.Rspfile)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing Rspfile param: %s", err)
+ }
+ r.Variables["rspfile"] = value
+ }
+
+ if params.RspfileContent != "" {
+ value, err = parseNinjaString(scope, params.RspfileContent)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing RspfileContent param: %s",
+ err)
+ }
+ r.Variables["rspfile_content"] = value
+ }
+
+ r.CommandDeps, err = parseNinjaStrings(scope, params.CommandDeps)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing CommandDeps param: %s", err)
+ }
+
+ return r, nil
+}
+
+func (r *ruleDef) WriteTo(nw *ninjaWriter, name string,
+ pkgNames map[*packageContext]string) error {
+
+ if r.Comment != "" {
+ err := nw.Comment(r.Comment)
+ if err != nil {
+ return err
+ }
+ }
+
+ err := nw.Rule(name)
+ if err != nil {
+ return err
+ }
+
+ if r.Pool != nil {
+ err = nw.ScopedAssign("pool", r.Pool.fullName(pkgNames))
+ if err != nil {
+ return err
+ }
+ }
+
+ var keys []string
+ for k := range r.Variables {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, name := range keys {
+ err = nw.ScopedAssign(name, r.Variables[name].Value(pkgNames))
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// A buildDef describes a build target definition.
+type buildDef struct {
+ Comment string
+ Rule Rule
+ RuleDef *ruleDef
+ Outputs []*ninjaString
+ Inputs []*ninjaString
+ Implicits []*ninjaString
+ OrderOnly []*ninjaString
+ Args map[Variable]*ninjaString
+ Optional bool
+}
+
+func parseBuildParams(scope scope, params *BuildParams) (*buildDef,
+ error) {
+
+ comment := params.Comment
+ rule := params.Rule
+
+ b := &buildDef{
+ Comment: comment,
+ Rule: rule,
+ }
+
+ if !scope.IsRuleVisible(rule) {
+ return nil, fmt.Errorf("Rule %s is not visible in this scope", rule)
+ }
+
+ if len(params.Outputs) == 0 {
+ return nil, errors.New("Outputs param has no elements")
+ }
+
+ var err error
+ b.Outputs, err = parseNinjaStrings(scope, params.Outputs)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing Outputs param: %s", err)
+ }
+
+ b.Inputs, err = parseNinjaStrings(scope, params.Inputs)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing Inputs param: %s", err)
+ }
+
+ b.Implicits, err = parseNinjaStrings(scope, params.Implicits)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing Implicits param: %s", err)
+ }
+
+ b.OrderOnly, err = parseNinjaStrings(scope, params.OrderOnly)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing OrderOnly param: %s", err)
+ }
+
+ b.Optional = params.Optional
+
+ argNameScope := rule.scope()
+
+ if len(params.Args) > 0 {
+ b.Args = make(map[Variable]*ninjaString)
+ for name, value := range params.Args {
+ if !rule.isArg(name) {
+ return nil, fmt.Errorf("unknown argument %q", name)
+ }
+
+ argVar, err := argNameScope.LookupVariable(name)
+ if err != nil {
+ // This shouldn't happen.
+ return nil, fmt.Errorf("argument lookup error: %s", err)
+ }
+
+ ninjaValue, err := parseNinjaString(scope, value)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing variable %q: %s", name,
+ err)
+ }
+
+ b.Args[argVar] = ninjaValue
+ }
+ }
+
+ return b, nil
+}
+
+func (b *buildDef) WriteTo(nw *ninjaWriter, pkgNames map[*packageContext]string) error {
+ var (
+ comment = b.Comment
+ rule = b.Rule.fullName(pkgNames)
+ outputs = valueList(b.Outputs, pkgNames, outputEscaper)
+ explicitDeps = valueList(b.Inputs, pkgNames, inputEscaper)
+ implicitDeps = valueList(b.Implicits, pkgNames, inputEscaper)
+ orderOnlyDeps = valueList(b.OrderOnly, pkgNames, inputEscaper)
+ )
+
+ if b.RuleDef != nil {
+ implicitDeps = append(valueList(b.RuleDef.CommandDeps, pkgNames, inputEscaper), implicitDeps...)
+ }
+
+ err := nw.Build(comment, rule, outputs, explicitDeps, implicitDeps, orderOnlyDeps)
+ if err != nil {
+ return err
+ }
+
+ args := make(map[string]string)
+
+ for argVar, value := range b.Args {
+ args[argVar.fullName(pkgNames)] = value.Value(pkgNames)
+ }
+
+ var keys []string
+ for k := range args {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, name := range keys {
+ err = nw.ScopedAssign(name, args[name])
+ if err != nil {
+ return err
+ }
+ }
+
+ if !b.Optional {
+ nw.Default(outputs...)
+ }
+
+ return nw.BlankLine()
+}
+
+func valueList(list []*ninjaString, pkgNames map[*packageContext]string,
+ escaper *strings.Replacer) []string {
+
+ result := make([]string, len(list))
+ for i, ninjaStr := range list {
+ result[i] = ninjaStr.ValueWithEscaper(pkgNames, escaper)
+ }
+ return result
+}
diff --git a/blueprint/ninja_strings.go b/blueprint/ninja_strings.go
new file mode 100644
index 0000000000000000000000000000000000000000..5bdddeae50b05cadc2944158b06b4c0710b77025
--- /dev/null
+++ b/blueprint/ninja_strings.go
@@ -0,0 +1,365 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+)
+
+const eof = -1
+
+var (
+ defaultEscaper = strings.NewReplacer(
+ "\n", "$\n")
+ inputEscaper = strings.NewReplacer(
+ "\n", "$\n",
+ " ", "$ ")
+ outputEscaper = strings.NewReplacer(
+ "\n", "$\n",
+ " ", "$ ",
+ ":", "$:")
+)
+
+type ninjaString struct {
+ strings []string
+ variables []Variable
+}
+
+type scope interface {
+ LookupVariable(name string) (Variable, error)
+ IsRuleVisible(rule Rule) bool
+ IsPoolVisible(pool Pool) bool
+}
+
+func simpleNinjaString(str string) *ninjaString {
+ return &ninjaString{
+ strings: []string{str},
+ }
+}
+
+type parseState struct {
+ scope scope
+ str string
+ stringStart int
+ varStart int
+ result *ninjaString
+}
+
+func (ps *parseState) pushVariable(v Variable) {
+ if len(ps.result.variables) == len(ps.result.strings) {
+ // Last push was a variable, we need a blank string separator
+ ps.result.strings = append(ps.result.strings, "")
+ }
+ ps.result.variables = append(ps.result.variables, v)
+}
+
+func (ps *parseState) pushString(s string) {
+ if len(ps.result.strings) != len(ps.result.variables) {
+ panic("oops, pushed string after string")
+ }
+ ps.result.strings = append(ps.result.strings, s)
+}
+
+type stateFunc func(*parseState, int, rune) (stateFunc, error)
+
+// parseNinjaString parses an unescaped ninja string (i.e. all $
+// occurrences are expected to be variables or $$) and returns a list of the
+// variable names that the string references.
+func parseNinjaString(scope scope, str string) (*ninjaString, error) {
+ // naively pre-allocate slices by counting $ signs
+ n := strings.Count(str, "$")
+ result := &ninjaString{
+ strings: make([]string, 0, n+1),
+ variables: make([]Variable, 0, n),
+ }
+
+ parseState := &parseState{
+ scope: scope,
+ str: str,
+ result: result,
+ }
+
+ state := parseStringState
+ var err error
+ for i := 0; i < len(str); i++ {
+ r := rune(str[i])
+ state, err = state(parseState, i, r)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ _, err = state(parseState, len(parseState.str), eof)
+ if err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+func parseStringState(state *parseState, i int, r rune) (stateFunc, error) {
+ switch {
+ case r == '$':
+ state.varStart = i + 1
+ return parseDollarStartState, nil
+
+ case r == eof:
+ state.pushString(state.str[state.stringStart:i])
+ return nil, nil
+
+ default:
+ return parseStringState, nil
+ }
+}
+
+func parseDollarStartState(state *parseState, i int, r rune) (stateFunc, error) {
+ switch {
+ case r >= 'a' && r <= 'z', r >= 'A' && r <= 'Z',
+ r >= '0' && r <= '9', r == '_', r == '-':
+ // The beginning of a of the variable name. Output the string and
+ // keep going.
+ state.pushString(state.str[state.stringStart : i-1])
+ return parseDollarState, nil
+
+ case r == '$':
+ // Just a "$$". Go back to parseStringState without changing
+ // state.stringStart.
+ return parseStringState, nil
+
+ case r == '{':
+ // This is a bracketted variable name (e.g. "${blah.blah}"). Output
+ // the string and keep going.
+ state.pushString(state.str[state.stringStart : i-1])
+ state.varStart = i + 1
+ return parseBracketsState, nil
+
+ case r == eof:
+ return nil, fmt.Errorf("unexpected end of string after '$'")
+
+ default:
+ // This was some arbitrary character following a dollar sign,
+ // which is not allowed.
+ return nil, fmt.Errorf("invalid character after '$' at byte "+
+ "offset %d", i)
+ }
+}
+
+func parseDollarState(state *parseState, i int, r rune) (stateFunc, error) {
+ switch {
+ case r >= 'a' && r <= 'z', r >= 'A' && r <= 'Z',
+ r >= '0' && r <= '9', r == '_', r == '-':
+ // A part of the variable name. Keep going.
+ return parseDollarState, nil
+
+ case r == '$':
+ // A dollar after the variable name (e.g. "$blah$"). Output the
+ // variable we have and start a new one.
+ v, err := state.scope.LookupVariable(state.str[state.varStart:i])
+ if err != nil {
+ return nil, err
+ }
+
+ state.pushVariable(v)
+ state.varStart = i + 1
+ state.stringStart = i
+
+ return parseDollarStartState, nil
+
+ case r == eof:
+ // This is the end of the variable name.
+ v, err := state.scope.LookupVariable(state.str[state.varStart:i])
+ if err != nil {
+ return nil, err
+ }
+
+ state.pushVariable(v)
+
+ // We always end with a string, even if it's an empty one.
+ state.pushString("")
+
+ return nil, nil
+
+ default:
+ // We've just gone past the end of the variable name, so record what
+ // we have.
+ v, err := state.scope.LookupVariable(state.str[state.varStart:i])
+ if err != nil {
+ return nil, err
+ }
+
+ state.pushVariable(v)
+ state.stringStart = i
+ return parseStringState, nil
+ }
+}
+
+func parseBracketsState(state *parseState, i int, r rune) (stateFunc, error) {
+ switch {
+ case r >= 'a' && r <= 'z', r >= 'A' && r <= 'Z',
+ r >= '0' && r <= '9', r == '_', r == '-', r == '.':
+ // A part of the variable name. Keep going.
+ return parseBracketsState, nil
+
+ case r == '}':
+ if state.varStart == i {
+ // The brackets were immediately closed. That's no good.
+ return nil, fmt.Errorf("empty variable name at byte offset %d",
+ i)
+ }
+
+ // This is the end of the variable name.
+ v, err := state.scope.LookupVariable(state.str[state.varStart:i])
+ if err != nil {
+ return nil, err
+ }
+
+ state.pushVariable(v)
+ state.stringStart = i + 1
+ return parseStringState, nil
+
+ case r == eof:
+ return nil, fmt.Errorf("unexpected end of string in variable name")
+
+ default:
+ // This character isn't allowed in a variable name.
+ return nil, fmt.Errorf("invalid character in variable name at "+
+ "byte offset %d", i)
+ }
+}
+
+func parseNinjaStrings(scope scope, strs []string) ([]*ninjaString,
+ error) {
+
+ if len(strs) == 0 {
+ return nil, nil
+ }
+ result := make([]*ninjaString, len(strs))
+ for i, str := range strs {
+ ninjaStr, err := parseNinjaString(scope, str)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing element %d: %s", i, err)
+ }
+ result[i] = ninjaStr
+ }
+ return result, nil
+}
+
+func (n *ninjaString) Value(pkgNames map[*packageContext]string) string {
+ return n.ValueWithEscaper(pkgNames, defaultEscaper)
+}
+
+func (n *ninjaString) ValueWithEscaper(pkgNames map[*packageContext]string,
+ escaper *strings.Replacer) string {
+
+ str := escaper.Replace(n.strings[0])
+ for i, v := range n.variables {
+ str += "${" + v.fullName(pkgNames) + "}"
+ str += escaper.Replace(n.strings[i+1])
+ }
+ return str
+}
+
+func (n *ninjaString) Eval(variables map[Variable]*ninjaString) (string, error) {
+ str := n.strings[0]
+ for i, v := range n.variables {
+ variable, ok := variables[v]
+ if !ok {
+ return "", fmt.Errorf("no such global variable: %s", v)
+ }
+ value, err := variable.Eval(variables)
+ if err != nil {
+ return "", err
+ }
+ str += value + n.strings[i+1]
+ }
+ return str, nil
+}
+
+func validateNinjaName(name string) error {
+ for i, r := range name {
+ valid := (r >= 'a' && r <= 'z') ||
+ (r >= 'A' && r <= 'Z') ||
+ (r >= '0' && r <= '9') ||
+ (r == '_') ||
+ (r == '-') ||
+ (r == '.')
+ if !valid {
+
+ return fmt.Errorf("%q contains an invalid Ninja name character "+
+ "%q at byte offset %d", name, r, i)
+ }
+ }
+ return nil
+}
+
+func toNinjaName(name string) string {
+ ret := bytes.Buffer{}
+ ret.Grow(len(name))
+ for _, r := range name {
+ valid := (r >= 'a' && r <= 'z') ||
+ (r >= 'A' && r <= 'Z') ||
+ (r >= '0' && r <= '9') ||
+ (r == '_') ||
+ (r == '-') ||
+ (r == '.')
+ if valid {
+ ret.WriteRune(r)
+ } else {
+ ret.WriteRune('_')
+ }
+ }
+
+ return ret.String()
+}
+
+var builtinRuleArgs = []string{"out", "in"}
+
+func validateArgName(argName string) error {
+ err := validateNinjaName(argName)
+ if err != nil {
+ return err
+ }
+
+ // We only allow globals within the rule's package to be used as rule
+ // arguments. A global in another package can always be mirrored into
+ // the rule's package by defining a new variable, so this doesn't limit
+ // what's possible. This limitation prevents situations where a Build
+ // invocation in another package must use the rule-defining package's
+ // import name for a 3rd package in order to set the rule's arguments.
+ if strings.ContainsRune(argName, '.') {
+ return fmt.Errorf("%q contains a '.' character", argName)
+ }
+
+ for _, builtin := range builtinRuleArgs {
+ if argName == builtin {
+ return fmt.Errorf("%q conflicts with Ninja built-in", argName)
+ }
+ }
+
+ return nil
+}
+
+func validateArgNames(argNames []string) error {
+ for _, argName := range argNames {
+ err := validateArgName(argName)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/blueprint/ninja_strings_test.go b/blueprint/ninja_strings_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..b417335b2edf692f11370dd4689e5bff6fd9de71
--- /dev/null
+++ b/blueprint/ninja_strings_test.go
@@ -0,0 +1,158 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+ "reflect"
+ "testing"
+)
+
+var ninjaParseTestCases = []struct {
+ input string
+ vars []string
+ strs []string
+ err string
+}{
+ {
+ input: "abc def $ghi jkl",
+ vars: []string{"ghi"},
+ strs: []string{"abc def ", " jkl"},
+ },
+ {
+ input: "abc def $ghi$jkl",
+ vars: []string{"ghi", "jkl"},
+ strs: []string{"abc def ", "", ""},
+ },
+ {
+ input: "foo $012_-345xyz_! bar",
+ vars: []string{"012_-345xyz_"},
+ strs: []string{"foo ", "! bar"},
+ },
+ {
+ input: "foo ${012_-345xyz_} bar",
+ vars: []string{"012_-345xyz_"},
+ strs: []string{"foo ", " bar"},
+ },
+ {
+ input: "foo ${012_-345xyz_} bar",
+ vars: []string{"012_-345xyz_"},
+ strs: []string{"foo ", " bar"},
+ },
+ {
+ input: "foo $$ bar",
+ vars: nil,
+ strs: []string{"foo $$ bar"},
+ },
+ {
+ input: "$foo${bar}",
+ vars: []string{"foo", "bar"},
+ strs: []string{"", "", ""},
+ },
+ {
+ input: "$foo$$",
+ vars: []string{"foo"},
+ strs: []string{"", "$$"},
+ },
+ {
+ input: "foo bar",
+ vars: nil,
+ strs: []string{"foo bar"},
+ },
+ {
+ input: "foo $ bar",
+ err: "invalid character after '$' at byte offset 5",
+ },
+ {
+ input: "foo $",
+ err: "unexpected end of string after '$'",
+ },
+ {
+ input: "foo ${} bar",
+ err: "empty variable name at byte offset 6",
+ },
+ {
+ input: "foo ${abc!} bar",
+ err: "invalid character in variable name at byte offset 9",
+ },
+ {
+ input: "foo ${abc",
+ err: "unexpected end of string in variable name",
+ },
+}
+
+func TestParseNinjaString(t *testing.T) {
+ for _, testCase := range ninjaParseTestCases {
+ scope := newLocalScope(nil, "namespace")
+ expectedVars := []Variable{}
+ for _, varName := range testCase.vars {
+ v, err := scope.LookupVariable(varName)
+ if err != nil {
+ v, err = scope.AddLocalVariable(varName, "")
+ if err != nil {
+ t.Fatalf("error creating scope: %s", err)
+ }
+ }
+ expectedVars = append(expectedVars, v)
+ }
+
+ output, err := parseNinjaString(scope, testCase.input)
+ if err == nil {
+ if !reflect.DeepEqual(output.variables, expectedVars) {
+ t.Errorf("incorrect variable list:")
+ t.Errorf(" input: %q", testCase.input)
+ t.Errorf(" expected: %#v", expectedVars)
+ t.Errorf(" got: %#v", output.variables)
+ }
+ if !reflect.DeepEqual(output.strings, testCase.strs) {
+ t.Errorf("incorrect string list:")
+ t.Errorf(" input: %q", testCase.input)
+ t.Errorf(" expected: %#v", testCase.strs)
+ t.Errorf(" got: %#v", output.strings)
+ }
+ }
+ var errStr string
+ if err != nil {
+ errStr = err.Error()
+ }
+ if err != nil && err.Error() != testCase.err {
+ t.Errorf("unexpected error:")
+ t.Errorf(" input: %q", testCase.input)
+ t.Errorf(" expected: %q", testCase.err)
+ t.Errorf(" got: %q", errStr)
+ }
+ }
+}
+
+func TestParseNinjaStringWithImportedVar(t *testing.T) {
+ ImpVar := &staticVariable{name_: "ImpVar"}
+ impScope := newScope(nil)
+ impScope.AddVariable(ImpVar)
+ scope := newScope(nil)
+ scope.AddImport("impPkg", impScope)
+
+ input := "abc def ${impPkg.ImpVar} ghi"
+ output, err := parseNinjaString(scope, input)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ expect := []Variable{ImpVar}
+ if !reflect.DeepEqual(output.variables, expect) {
+ t.Errorf("incorrect output:")
+ t.Errorf(" input: %q", input)
+ t.Errorf(" expected: %#v", expect)
+ t.Errorf(" got: %#v", output)
+ }
+}
diff --git a/blueprint/ninja_writer.go b/blueprint/ninja_writer.go
new file mode 100644
index 0000000000000000000000000000000000000000..42b9aa801debb4e9a338d57c5d2490776ec99e3b
--- /dev/null
+++ b/blueprint/ninja_writer.go
@@ -0,0 +1,248 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "unicode"
+)
+
+const (
+ indentWidth = 4
+ maxIndentDepth = 2
+ lineWidth = 80
+)
+
+var indentString = strings.Repeat(" ", indentWidth*maxIndentDepth)
+
+type ninjaWriter struct {
+ writer io.Writer
+
+ justDidBlankLine bool // true if the last operation was a BlankLine
+}
+
+func newNinjaWriter(writer io.Writer) *ninjaWriter {
+ return &ninjaWriter{
+ writer: writer,
+ }
+}
+
+func (n *ninjaWriter) Comment(comment string) error {
+ n.justDidBlankLine = false
+
+ const lineHeaderLen = len("# ")
+ const maxLineLen = lineWidth - lineHeaderLen
+
+ var lineStart, lastSplitPoint int
+ for i, r := range comment {
+ if unicode.IsSpace(r) {
+ // We know we can safely split the line here.
+ lastSplitPoint = i + 1
+ }
+
+ var line string
+ var writeLine bool
+ switch {
+ case r == '\n':
+ // Output the line without trimming the left so as to allow comments
+ // to contain their own indentation.
+ line = strings.TrimRightFunc(comment[lineStart:i], unicode.IsSpace)
+ writeLine = true
+
+ case (i-lineStart > maxLineLen) && (lastSplitPoint > lineStart):
+ // The line has grown too long and is splittable. Split it at the
+ // last split point.
+ line = strings.TrimSpace(comment[lineStart:lastSplitPoint])
+ writeLine = true
+ }
+
+ if writeLine {
+ line = strings.TrimSpace("# "+line) + "\n"
+ _, err := io.WriteString(n.writer, line)
+ if err != nil {
+ return err
+ }
+ lineStart = lastSplitPoint
+ }
+ }
+
+ if lineStart != len(comment) {
+ line := strings.TrimSpace(comment[lineStart:])
+ _, err := fmt.Fprintf(n.writer, "# %s\n", line)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (n *ninjaWriter) Pool(name string) error {
+ n.justDidBlankLine = false
+ _, err := fmt.Fprintf(n.writer, "pool %s\n", name)
+ return err
+}
+
+func (n *ninjaWriter) Rule(name string) error {
+ n.justDidBlankLine = false
+ _, err := fmt.Fprintf(n.writer, "rule %s\n", name)
+ return err
+}
+
+func (n *ninjaWriter) Build(comment string, rule string, outputs, explicitDeps, implicitDeps,
+ orderOnlyDeps []string) error {
+
+ n.justDidBlankLine = false
+
+ const lineWrapLen = len(" $")
+ const maxLineLen = lineWidth - lineWrapLen
+
+ wrapper := ninjaWriterWithWrap{
+ ninjaWriter: n,
+ maxLineLen: maxLineLen,
+ }
+
+ if comment != "" {
+ wrapper.Comment(comment)
+ }
+
+ wrapper.WriteString("build")
+
+ for _, output := range outputs {
+ wrapper.WriteStringWithSpace(output)
+ }
+
+ wrapper.WriteString(":")
+
+ wrapper.WriteStringWithSpace(rule)
+
+ for _, dep := range explicitDeps {
+ wrapper.WriteStringWithSpace(dep)
+ }
+
+ if len(implicitDeps) > 0 {
+ wrapper.WriteStringWithSpace("|")
+
+ for _, dep := range implicitDeps {
+ wrapper.WriteStringWithSpace(dep)
+ }
+ }
+
+ if len(orderOnlyDeps) > 0 {
+ wrapper.WriteStringWithSpace("||")
+
+ for _, dep := range orderOnlyDeps {
+ wrapper.WriteStringWithSpace(dep)
+ }
+ }
+
+ return wrapper.Flush()
+}
+
+func (n *ninjaWriter) Assign(name, value string) error {
+ n.justDidBlankLine = false
+ _, err := fmt.Fprintf(n.writer, "%s = %s\n", name, value)
+ return err
+}
+
+func (n *ninjaWriter) ScopedAssign(name, value string) error {
+ n.justDidBlankLine = false
+ _, err := fmt.Fprintf(n.writer, "%s%s = %s\n", indentString[:indentWidth], name, value)
+ return err
+}
+
+func (n *ninjaWriter) Default(targets ...string) error {
+ n.justDidBlankLine = false
+
+ const lineWrapLen = len(" $")
+ const maxLineLen = lineWidth - lineWrapLen
+
+ wrapper := ninjaWriterWithWrap{
+ ninjaWriter: n,
+ maxLineLen: maxLineLen,
+ }
+
+ wrapper.WriteString("default")
+
+ for _, target := range targets {
+ wrapper.WriteString(" " + target)
+ }
+
+ return wrapper.Flush()
+}
+
+func (n *ninjaWriter) BlankLine() (err error) {
+ // We don't output multiple blank lines in a row.
+ if !n.justDidBlankLine {
+ n.justDidBlankLine = true
+ _, err = io.WriteString(n.writer, "\n")
+ }
+ return err
+}
+
+type ninjaWriterWithWrap struct {
+ *ninjaWriter
+ maxLineLen int
+ writtenLen int
+ err error
+}
+
+func (n *ninjaWriterWithWrap) writeString(s string, space bool) {
+ if n.err != nil {
+ return
+ }
+
+ spaceLen := 0
+ if space {
+ spaceLen = 1
+ }
+
+ if n.writtenLen+len(s)+spaceLen > n.maxLineLen {
+ _, n.err = io.WriteString(n.writer, " $\n")
+ if n.err != nil {
+ return
+ }
+ _, n.err = io.WriteString(n.writer, indentString[:indentWidth*2])
+ if n.err != nil {
+ return
+ }
+ n.writtenLen = indentWidth * 2
+ s = strings.TrimLeftFunc(s, unicode.IsSpace)
+ } else if space {
+ io.WriteString(n.writer, " ")
+ n.writtenLen++
+ }
+
+ _, n.err = io.WriteString(n.writer, s)
+ n.writtenLen += len(s)
+}
+
+func (n *ninjaWriterWithWrap) WriteString(s string) {
+ n.writeString(s, false)
+}
+
+func (n *ninjaWriterWithWrap) WriteStringWithSpace(s string) {
+ n.writeString(s, true)
+}
+
+func (n *ninjaWriterWithWrap) Flush() error {
+ if n.err != nil {
+ return n.err
+ }
+ _, err := io.WriteString(n.writer, "\n")
+ return err
+}
diff --git a/blueprint/ninja_writer_test.go b/blueprint/ninja_writer_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..7b34d906c911d669ce1c397f2cadb47112881ef3
--- /dev/null
+++ b/blueprint/ninja_writer_test.go
@@ -0,0 +1,120 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+ "bytes"
+ "testing"
+)
+
+func ck(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+var ninjaWriterTestCases = []struct {
+ input func(w *ninjaWriter)
+ output string
+}{
+ {
+ input: func(w *ninjaWriter) {
+ ck(w.Comment("foo"))
+ },
+ output: "# foo\n",
+ },
+ {
+ input: func(w *ninjaWriter) {
+ ck(w.Pool("foo"))
+ },
+ output: "pool foo\n",
+ },
+ {
+ input: func(w *ninjaWriter) {
+ ck(w.Rule("foo"))
+ },
+ output: "rule foo\n",
+ },
+ {
+ input: func(w *ninjaWriter) {
+ ck(w.Build("foo comment", "foo", []string{"o1", "o2"}, []string{"e1", "e2"},
+ []string{"i1", "i2"}, []string{"oo1", "oo2"}))
+ },
+ output: "# foo comment\nbuild o1 o2: foo e1 e2 | i1 i2 || oo1 oo2\n",
+ },
+ {
+ input: func(w *ninjaWriter) {
+ ck(w.Default("foo"))
+ },
+ output: "default foo\n",
+ },
+ {
+ input: func(w *ninjaWriter) {
+ ck(w.Assign("foo", "bar"))
+ },
+ output: "foo = bar\n",
+ },
+ {
+ input: func(w *ninjaWriter) {
+ ck(w.ScopedAssign("foo", "bar"))
+ },
+ output: " foo = bar\n",
+ },
+ {
+ input: func(w *ninjaWriter) {
+ ck(w.BlankLine())
+ },
+ output: "\n",
+ },
+ {
+ input: func(w *ninjaWriter) {
+ ck(w.Pool("p"))
+ ck(w.ScopedAssign("depth", "3"))
+ ck(w.BlankLine())
+ ck(w.Comment("here comes a rule"))
+ ck(w.Rule("r"))
+ ck(w.ScopedAssign("command", "echo out: $out in: $in _arg: $_arg"))
+ ck(w.ScopedAssign("pool", "p"))
+ ck(w.BlankLine())
+ ck(w.Build("r comment", "r", []string{"foo.o"}, []string{"foo.in"}, nil, nil))
+ ck(w.ScopedAssign("_arg", "arg value"))
+ },
+ output: `pool p
+ depth = 3
+
+# here comes a rule
+rule r
+ command = echo out: $out in: $in _arg: $_arg
+ pool = p
+
+# r comment
+build foo.o: r foo.in
+ _arg = arg value
+`,
+ },
+}
+
+func TestNinjaWriter(t *testing.T) {
+ for i, testCase := range ninjaWriterTestCases {
+ buf := bytes.NewBuffer(nil)
+ w := newNinjaWriter(buf)
+ testCase.input(w)
+ if buf.String() != testCase.output {
+ t.Errorf("incorrect output for test case %d", i)
+ t.Errorf(" expected: %q", testCase.output)
+ t.Errorf(" got: %q", buf.String())
+ }
+ }
+}
diff --git a/blueprint/package_ctx.go b/blueprint/package_ctx.go
new file mode 100644
index 0000000000000000000000000000000000000000..cedee046f022144130b5336a41221bd83652d792
--- /dev/null
+++ b/blueprint/package_ctx.go
@@ -0,0 +1,872 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+// A PackageContext provides a way to create package-scoped Ninja pools,
+// rules, and variables. A Go package should create a single unexported
+// package-scoped PackageContext variable that it uses to create all package-
+// scoped Ninja object definitions. This PackageContext object should then be
+// passed to all calls to define module- or singleton-specific Ninja
+// definitions. For example:
+//
+// package blah
+//
+// import (
+// "blueprint"
+// )
+//
+// var (
+// pctx = NewPackageContext("path/to/blah")
+//
+// myPrivateVar = pctx.StaticVariable("myPrivateVar", "abcdef")
+// MyExportedVar = pctx.StaticVariable("MyExportedVar", "$myPrivateVar 123456!")
+//
+// SomeRule = pctx.StaticRule(...)
+// )
+//
+// // ...
+//
+// func (m *MyModule) GenerateBuildActions(ctx blueprint.Module) {
+// ctx.Build(pctx, blueprint.BuildParams{
+// Rule: SomeRule,
+// Outputs: []string{"$myPrivateVar"},
+// })
+// }
+type PackageContext interface {
+ Import(pkgPath string)
+ ImportAs(as, pkgPath string)
+
+ StaticVariable(name, value string) Variable
+ VariableFunc(name string, f func(config interface{}) (string, error)) Variable
+ VariableConfigMethod(name string, method interface{}) Variable
+
+ StaticPool(name string, params PoolParams) Pool
+ PoolFunc(name string, f func(interface{}) (PoolParams, error)) Pool
+
+ StaticRule(name string, params RuleParams, argNames ...string) Rule
+ RuleFunc(name string, f func(interface{}) (RuleParams, error), argNames ...string) Rule
+
+ AddNinjaFileDeps(deps ...string)
+
+ getScope() *basicScope
+}
+
+type packageContext struct {
+ fullName string
+ shortName string
+ pkgPath string
+ scope *basicScope
+ ninjaFileDeps []string
+}
+
+var _ PackageContext = &packageContext{}
+
+func (p *packageContext) getScope() *basicScope {
+ return p.scope
+}
+
+var packageContexts = map[string]*packageContext{}
+
+// NewPackageContext creates a PackageContext object for a given package. The
+// pkgPath argument should always be set to the full path used to import the
+// package. This function may only be called from a Go package's init()
+// function or as part of a package-scoped variable initialization.
+func NewPackageContext(pkgPath string) PackageContext {
+ checkCalledFromInit()
+
+ if _, present := packageContexts[pkgPath]; present {
+ panic(fmt.Errorf("package %q already has a package context"))
+ }
+
+ pkgName := pkgPathToName(pkgPath)
+ err := validateNinjaName(pkgName)
+ if err != nil {
+ panic(err)
+ }
+
+ i := strings.LastIndex(pkgPath, "/")
+ shortName := pkgPath[i+1:]
+
+ p := &packageContext{
+ fullName: pkgName,
+ shortName: shortName,
+ pkgPath: pkgPath,
+ scope: newScope(nil),
+ }
+
+ packageContexts[pkgPath] = p
+
+ return p
+}
+
+var Phony Rule = &builtinRule{
+ name_: "phony",
+}
+
+var Console Pool = &builtinPool{
+ name_: "console",
+}
+
+var errRuleIsBuiltin = errors.New("the rule is a built-in")
+var errPoolIsBuiltin = errors.New("the pool is a built-in")
+var errVariableIsArg = errors.New("argument variables have no value")
+
+// checkCalledFromInit panics if a Go package's init function is not on the
+// call stack.
+func checkCalledFromInit() {
+ for skip := 3; ; skip++ {
+ _, funcName, ok := callerName(skip)
+ if !ok {
+ panic("not called from an init func")
+ }
+
+ if funcName == "init" || strings.HasPrefix(funcName, "init·") {
+ return
+ }
+ }
+}
+
+// callerName returns the package path and function name of the calling
+// function. The skip argument has the same meaning as the skip argument of
+// runtime.Callers.
+func callerName(skip int) (pkgPath, funcName string, ok bool) {
+ var pc [1]uintptr
+ n := runtime.Callers(skip+1, pc[:])
+ if n != 1 {
+ return "", "", false
+ }
+
+ f := runtime.FuncForPC(pc[0])
+ fullName := f.Name()
+
+ lastDotIndex := strings.LastIndex(fullName, ".")
+ if lastDotIndex == -1 {
+ panic("unable to distinguish function name from package")
+ }
+
+ if fullName[lastDotIndex-1] == ')' {
+ // The caller is a method on some type, so it's name looks like
+ // "pkg/path.(type).method". We need to go back one dot farther to get
+ // to the package name.
+ lastDotIndex = strings.LastIndex(fullName[:lastDotIndex], ".")
+ }
+
+ pkgPath = fullName[:lastDotIndex]
+ funcName = fullName[lastDotIndex+1:]
+ ok = true
+ return
+}
+
+// pkgPathToName makes a Ninja-friendly name out of a Go package name by
+// replaceing all the '/' characters with '.'. We assume the results are
+// unique, though this is not 100% guaranteed for Go package names that
+// already contain '.' characters. Disallowing package names with '.' isn't
+// reasonable since many package names contain the name of the hosting site
+// (e.g. "code.google.com"). In practice this probably isn't really a
+// problem.
+func pkgPathToName(pkgPath string) string {
+ return strings.Replace(pkgPath, "/", ".", -1)
+}
+
+// Import enables access to the exported Ninja pools, rules, and variables
+// that are defined at the package scope of another Go package. Go's
+// visibility rules apply to these references - capitalized names indicate
+// that something is exported. It may only be called from a Go package's
+// init() function. The Go package path passed to Import must have already
+// been imported into the Go package using a Go import statement. The
+// imported variables may then be accessed from Ninja strings as
+// "${pkg.Variable}", while the imported rules can simply be accessed as
+// exported Go variables from the package. For example:
+//
+// import (
+// "blueprint"
+// "foo/bar"
+// )
+//
+// var pctx = NewPackagePath("blah")
+//
+// func init() {
+// pctx.Import("foo/bar")
+// }
+//
+// ...
+//
+// func (m *MyModule) GenerateBuildActions(ctx blueprint.Module) {
+// ctx.Build(pctx, blueprint.BuildParams{
+// Rule: bar.SomeRule,
+// Outputs: []string{"${bar.SomeVariable}"},
+// })
+// }
+//
+// Note that the local name used to refer to the package in Ninja variable names
+// is derived from pkgPath by extracting the last path component. This differs
+// from Go's import declaration, which derives the local name from the package
+// clause in the imported package. By convention these names are made to match,
+// but this is not required.
+func (p *packageContext) Import(pkgPath string) {
+ checkCalledFromInit()
+ importPkg, ok := packageContexts[pkgPath]
+ if !ok {
+ panic(fmt.Errorf("package %q has no context", pkgPath))
+ }
+
+ err := p.scope.AddImport(importPkg.shortName, importPkg.scope)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// ImportAs provides the same functionality as Import, but it allows the local
+// name that will be used to refer to the package to be specified explicitly.
+// It may only be called from a Go package's init() function.
+func (p *packageContext) ImportAs(as, pkgPath string) {
+ checkCalledFromInit()
+ importPkg, ok := packageContexts[pkgPath]
+ if !ok {
+ panic(fmt.Errorf("package %q has no context", pkgPath))
+ }
+
+ err := validateNinjaName(as)
+ if err != nil {
+ panic(err)
+ }
+
+ err = p.scope.AddImport(as, importPkg.scope)
+ if err != nil {
+ panic(err)
+ }
+}
+
+type staticVariable struct {
+ pctx *packageContext
+ name_ string
+ value_ string
+}
+
+// StaticVariable returns a Variable whose value does not depend on any
+// configuration information. It may only be called during a Go package's
+// initialization - either from the init() function or as part of a package-
+// scoped variable's initialization.
+//
+// This function is usually used to initialize a package-scoped Go variable that
+// represents a Ninja variable that will be output. The name argument should
+// exactly match the Go variable name, and the value string may reference other
+// Ninja variables that are visible within the calling Go package.
+func (p *packageContext) StaticVariable(name, value string) Variable {
+ checkCalledFromInit()
+ err := validateNinjaName(name)
+ if err != nil {
+ panic(err)
+ }
+
+ v := &staticVariable{p, name, value}
+ err = p.scope.AddVariable(v)
+ if err != nil {
+ panic(err)
+ }
+
+ return v
+}
+
+func (v *staticVariable) packageContext() *packageContext {
+ return v.pctx
+}
+
+func (v *staticVariable) name() string {
+ return v.name_
+}
+
+func (v *staticVariable) fullName(pkgNames map[*packageContext]string) string {
+ return packageNamespacePrefix(pkgNames[v.pctx]) + v.name_
+}
+
+func (v *staticVariable) value(interface{}) (*ninjaString, error) {
+ ninjaStr, err := parseNinjaString(v.pctx.scope, v.value_)
+ if err != nil {
+ err = fmt.Errorf("error parsing variable %s value: %s", v, err)
+ panic(err)
+ }
+ return ninjaStr, nil
+}
+
+func (v *staticVariable) String() string {
+ return v.pctx.pkgPath + "." + v.name_
+}
+
+type variableFunc struct {
+ pctx *packageContext
+ name_ string
+ value_ func(interface{}) (string, error)
+}
+
+// VariableFunc returns a Variable whose value is determined by a function that
+// takes a config object as input and returns either the variable value or an
+// error. It may only be called during a Go package's initialization - either
+// from the init() function or as part of a package-scoped variable's
+// initialization.
+//
+// This function is usually used to initialize a package-scoped Go variable that
+// represents a Ninja variable that will be output. The name argument should
+// exactly match the Go variable name, and the value string returned by f may
+// reference other Ninja variables that are visible within the calling Go
+// package.
+func (p *packageContext) VariableFunc(name string,
+ f func(config interface{}) (string, error)) Variable {
+
+ checkCalledFromInit()
+
+ err := validateNinjaName(name)
+ if err != nil {
+ panic(err)
+ }
+
+ v := &variableFunc{p, name, f}
+ err = p.scope.AddVariable(v)
+ if err != nil {
+ panic(err)
+ }
+
+ return v
+}
+
+// VariableConfigMethod returns a Variable whose value is determined by calling
+// a method on the config object. The method must take no arguments and return
+// a single string that will be the variable's value. It may only be called
+// during a Go package's initialization - either from the init() function or as
+// part of a package-scoped variable's initialization.
+//
+// This function is usually used to initialize a package-scoped Go variable that
+// represents a Ninja variable that will be output. The name argument should
+// exactly match the Go variable name, and the value string returned by method
+// may reference other Ninja variables that are visible within the calling Go
+// package.
+func (p *packageContext) VariableConfigMethod(name string,
+ method interface{}) Variable {
+
+ checkCalledFromInit()
+
+ err := validateNinjaName(name)
+ if err != nil {
+ panic(err)
+ }
+
+ methodValue := reflect.ValueOf(method)
+ validateVariableMethod(name, methodValue)
+
+ fun := func(config interface{}) (string, error) {
+ result := methodValue.Call([]reflect.Value{reflect.ValueOf(config)})
+ resultStr := result[0].Interface().(string)
+ return resultStr, nil
+ }
+
+ v := &variableFunc{p, name, fun}
+ err = p.scope.AddVariable(v)
+ if err != nil {
+ panic(err)
+ }
+
+ return v
+}
+
+func (v *variableFunc) packageContext() *packageContext {
+ return v.pctx
+}
+
+func (v *variableFunc) name() string {
+ return v.name_
+}
+
+func (v *variableFunc) fullName(pkgNames map[*packageContext]string) string {
+ return packageNamespacePrefix(pkgNames[v.pctx]) + v.name_
+}
+
+func (v *variableFunc) value(config interface{}) (*ninjaString, error) {
+ value, err := v.value_(config)
+ if err != nil {
+ return nil, err
+ }
+
+ ninjaStr, err := parseNinjaString(v.pctx.scope, value)
+ if err != nil {
+ err = fmt.Errorf("error parsing variable %s value: %s", v, err)
+ panic(err)
+ }
+
+ return ninjaStr, nil
+}
+
+func (v *variableFunc) String() string {
+ return v.pctx.pkgPath + "." + v.name_
+}
+
+func validateVariableMethod(name string, methodValue reflect.Value) {
+ methodType := methodValue.Type()
+ if methodType.Kind() != reflect.Func {
+ panic(fmt.Errorf("method given for variable %s is not a function",
+ name))
+ }
+ if n := methodType.NumIn(); n != 1 {
+ panic(fmt.Errorf("method for variable %s has %d inputs (should be 1)",
+ name, n))
+ }
+ if n := methodType.NumOut(); n != 1 {
+ panic(fmt.Errorf("method for variable %s has %d outputs (should be 1)",
+ name, n))
+ }
+ if kind := methodType.Out(0).Kind(); kind != reflect.String {
+ panic(fmt.Errorf("method for variable %s does not return a string",
+ name))
+ }
+}
+
+// An argVariable is a Variable that exists only when it is set by a build
+// statement to pass a value to the rule being invoked. It has no value, so it
+// can never be used to create a Ninja assignment statement. It is inserted
+// into the rule's scope, which is used for name lookups within the rule and
+// when assigning argument values as part of a build statement.
+type argVariable struct {
+ name_ string
+}
+
+func (v *argVariable) packageContext() *packageContext {
+ panic("this should not be called")
+}
+
+func (v *argVariable) name() string {
+ return v.name_
+}
+
+func (v *argVariable) fullName(pkgNames map[*packageContext]string) string {
+ return v.name_
+}
+
+func (v *argVariable) value(config interface{}) (*ninjaString, error) {
+ return nil, errVariableIsArg
+}
+
+func (v *argVariable) String() string {
+ return ":" + v.name_
+}
+
+type staticPool struct {
+ pctx *packageContext
+ name_ string
+ params PoolParams
+}
+
+// StaticPool returns a Pool whose value does not depend on any configuration
+// information. It may only be called during a Go package's initialization -
+// either from the init() function or as part of a package-scoped Go variable's
+// initialization.
+//
+// This function is usually used to initialize a package-scoped Go variable that
+// represents a Ninja pool that will be output. The name argument should
+// exactly match the Go variable name, and the params fields may reference other
+// Ninja variables that are visible within the calling Go package.
+func (p *packageContext) StaticPool(name string, params PoolParams) Pool {
+ checkCalledFromInit()
+
+ err := validateNinjaName(name)
+ if err != nil {
+ panic(err)
+ }
+
+ pool := &staticPool{p, name, params}
+ err = p.scope.AddPool(pool)
+ if err != nil {
+ panic(err)
+ }
+
+ return pool
+}
+
+func (p *staticPool) packageContext() *packageContext {
+ return p.pctx
+}
+
+func (p *staticPool) name() string {
+ return p.name_
+}
+
+func (p *staticPool) fullName(pkgNames map[*packageContext]string) string {
+ return packageNamespacePrefix(pkgNames[p.pctx]) + p.name_
+}
+
+func (p *staticPool) def(config interface{}) (*poolDef, error) {
+ def, err := parsePoolParams(p.pctx.scope, &p.params)
+ if err != nil {
+ panic(fmt.Errorf("error parsing PoolParams for %s: %s", p, err))
+ }
+ return def, nil
+}
+
+func (p *staticPool) String() string {
+ return p.pctx.pkgPath + "." + p.name_
+}
+
+type poolFunc struct {
+ pctx *packageContext
+ name_ string
+ paramsFunc func(interface{}) (PoolParams, error)
+}
+
+// PoolFunc returns a Pool whose value is determined by a function that takes a
+// config object as input and returns either the pool parameters or an error. It
+// may only be called during a Go package's initialization - either from the
+// init() function or as part of a package-scoped variable's initialization.
+//
+// This function is usually used to initialize a package-scoped Go variable that
+// represents a Ninja pool that will be output. The name argument should
+// exactly match the Go variable name, and the string fields of the PoolParams
+// returned by f may reference other Ninja variables that are visible within the
+// calling Go package.
+func (p *packageContext) PoolFunc(name string, f func(interface{}) (PoolParams,
+ error)) Pool {
+
+ checkCalledFromInit()
+
+ err := validateNinjaName(name)
+ if err != nil {
+ panic(err)
+ }
+
+ pool := &poolFunc{p, name, f}
+ err = p.scope.AddPool(pool)
+ if err != nil {
+ panic(err)
+ }
+
+ return pool
+}
+
+func (p *poolFunc) packageContext() *packageContext {
+ return p.pctx
+}
+
+func (p *poolFunc) name() string {
+ return p.name_
+}
+
+func (p *poolFunc) fullName(pkgNames map[*packageContext]string) string {
+ return packageNamespacePrefix(pkgNames[p.pctx]) + p.name_
+}
+
+func (p *poolFunc) def(config interface{}) (*poolDef, error) {
+ params, err := p.paramsFunc(config)
+ if err != nil {
+ return nil, err
+ }
+ def, err := parsePoolParams(p.pctx.scope, ¶ms)
+ if err != nil {
+ panic(fmt.Errorf("error parsing PoolParams for %s: %s", p, err))
+ }
+ return def, nil
+}
+
+func (p *poolFunc) String() string {
+ return p.pctx.pkgPath + "." + p.name_
+}
+
+type builtinPool struct {
+ name_ string
+}
+
+func (p *builtinPool) packageContext() *packageContext {
+ return nil
+}
+
+func (p *builtinPool) name() string {
+ return p.name_
+}
+
+func (p *builtinPool) fullName(pkgNames map[*packageContext]string) string {
+ return p.name_
+}
+
+func (p *builtinPool) def(config interface{}) (*poolDef, error) {
+ return nil, errPoolIsBuiltin
+}
+
+func (p *builtinPool) String() string {
+ return ":" + p.name_
+}
+
+type staticRule struct {
+ pctx *packageContext
+ name_ string
+ params RuleParams
+ argNames map[string]bool
+ scope_ *basicScope
+ sync.Mutex // protects scope_ during lazy creation
+}
+
+// StaticRule returns a Rule whose value does not depend on any configuration
+// information. It may only be called during a Go package's initialization -
+// either from the init() function or as part of a package-scoped Go variable's
+// initialization.
+//
+// This function is usually used to initialize a package-scoped Go variable that
+// represents a Ninja rule that will be output. The name argument should
+// exactly match the Go variable name, and the params fields may reference other
+// Ninja variables that are visible within the calling Go package.
+//
+// The argNames arguments list Ninja variables that may be overridden by Ninja
+// build statements that invoke the rule. These arguments may be referenced in
+// any of the string fields of params. Arguments can shadow package-scoped
+// variables defined within the caller's Go package, but they may not shadow
+// those defined in another package. Shadowing a package-scoped variable
+// results in the package-scoped variable's value being used for build
+// statements that do not override the argument. For argument names that do not
+// shadow package-scoped variables the default value is an empty string.
+func (p *packageContext) StaticRule(name string, params RuleParams,
+ argNames ...string) Rule {
+
+ checkCalledFromInit()
+
+ err := validateNinjaName(name)
+ if err != nil {
+ panic(err)
+ }
+
+ err = validateArgNames(argNames)
+ if err != nil {
+ panic(fmt.Errorf("invalid argument name: %s", err))
+ }
+
+ argNamesSet := make(map[string]bool)
+ for _, argName := range argNames {
+ argNamesSet[argName] = true
+ }
+
+ ruleScope := (*basicScope)(nil) // This will get created lazily
+
+ r := &staticRule{
+ pctx: p,
+ name_: name,
+ params: params,
+ argNames: argNamesSet,
+ scope_: ruleScope,
+ }
+ err = p.scope.AddRule(r)
+ if err != nil {
+ panic(err)
+ }
+
+ return r
+}
+
+func (r *staticRule) packageContext() *packageContext {
+ return r.pctx
+}
+
+func (r *staticRule) name() string {
+ return r.name_
+}
+
+func (r *staticRule) fullName(pkgNames map[*packageContext]string) string {
+ return packageNamespacePrefix(pkgNames[r.pctx]) + r.name_
+}
+
+func (r *staticRule) def(interface{}) (*ruleDef, error) {
+ def, err := parseRuleParams(r.scope(), &r.params)
+ if err != nil {
+ panic(fmt.Errorf("error parsing RuleParams for %s: %s", r, err))
+ }
+ return def, nil
+}
+
+func (r *staticRule) scope() *basicScope {
+ // We lazily create the scope so that all the package-scoped variables get
+ // declared before the args are created. Otherwise we could incorrectly
+ // shadow a package-scoped variable with an arg variable.
+ r.Lock()
+ defer r.Unlock()
+
+ if r.scope_ == nil {
+ r.scope_ = makeRuleScope(r.pctx.scope, r.argNames)
+ }
+ return r.scope_
+}
+
+func (r *staticRule) isArg(argName string) bool {
+ return r.argNames[argName]
+}
+
+func (r *staticRule) String() string {
+ return r.pctx.pkgPath + "." + r.name_
+}
+
+type ruleFunc struct {
+ pctx *packageContext
+ name_ string
+ paramsFunc func(interface{}) (RuleParams, error)
+ argNames map[string]bool
+ scope_ *basicScope
+ sync.Mutex // protects scope_ during lazy creation
+}
+
+// RuleFunc returns a Rule whose value is determined by a function that takes a
+// config object as input and returns either the rule parameters or an error. It
+// may only be called during a Go package's initialization - either from the
+// init() function or as part of a package-scoped variable's initialization.
+//
+// This function is usually used to initialize a package-scoped Go variable that
+// represents a Ninja rule that will be output. The name argument should
+// exactly match the Go variable name, and the string fields of the RuleParams
+// returned by f may reference other Ninja variables that are visible within the
+// calling Go package.
+//
+// The argNames arguments list Ninja variables that may be overridden by Ninja
+// build statements that invoke the rule. These arguments may be referenced in
+// any of the string fields of the RuleParams returned by f. Arguments can
+// shadow package-scoped variables defined within the caller's Go package, but
+// they may not shadow those defined in another package. Shadowing a package-
+// scoped variable results in the package-scoped variable's value being used for
+// build statements that do not override the argument. For argument names that
+// do not shadow package-scoped variables the default value is an empty string.
+func (p *packageContext) RuleFunc(name string, f func(interface{}) (RuleParams,
+ error), argNames ...string) Rule {
+
+ checkCalledFromInit()
+
+ err := validateNinjaName(name)
+ if err != nil {
+ panic(err)
+ }
+
+ err = validateArgNames(argNames)
+ if err != nil {
+ panic(fmt.Errorf("invalid argument name: %s", err))
+ }
+
+ argNamesSet := make(map[string]bool)
+ for _, argName := range argNames {
+ argNamesSet[argName] = true
+ }
+
+ ruleScope := (*basicScope)(nil) // This will get created lazily
+
+ rule := &ruleFunc{
+ pctx: p,
+ name_: name,
+ paramsFunc: f,
+ argNames: argNamesSet,
+ scope_: ruleScope,
+ }
+ err = p.scope.AddRule(rule)
+ if err != nil {
+ panic(err)
+ }
+
+ return rule
+}
+
+func (r *ruleFunc) packageContext() *packageContext {
+ return r.pctx
+}
+
+func (r *ruleFunc) name() string {
+ return r.name_
+}
+
+func (r *ruleFunc) fullName(pkgNames map[*packageContext]string) string {
+ return packageNamespacePrefix(pkgNames[r.pctx]) + r.name_
+}
+
+func (r *ruleFunc) def(config interface{}) (*ruleDef, error) {
+ params, err := r.paramsFunc(config)
+ if err != nil {
+ return nil, err
+ }
+ def, err := parseRuleParams(r.scope(), ¶ms)
+ if err != nil {
+ panic(fmt.Errorf("error parsing RuleParams for %s: %s", r, err))
+ }
+ return def, nil
+}
+
+func (r *ruleFunc) scope() *basicScope {
+ // We lazily create the scope so that all the global variables get declared
+ // before the args are created. Otherwise we could incorrectly shadow a
+ // global variable with an arg variable.
+ r.Lock()
+ defer r.Unlock()
+
+ if r.scope_ == nil {
+ r.scope_ = makeRuleScope(r.pctx.scope, r.argNames)
+ }
+ return r.scope_
+}
+
+func (r *ruleFunc) isArg(argName string) bool {
+ return r.argNames[argName]
+}
+
+func (r *ruleFunc) String() string {
+ return r.pctx.pkgPath + "." + r.name_
+}
+
+type builtinRule struct {
+ name_ string
+ scope_ *basicScope
+ sync.Mutex // protects scope_ during lazy creation
+}
+
+func (r *builtinRule) packageContext() *packageContext {
+ return nil
+}
+
+func (r *builtinRule) name() string {
+ return r.name_
+}
+
+func (r *builtinRule) fullName(pkgNames map[*packageContext]string) string {
+ return r.name_
+}
+
+func (r *builtinRule) def(config interface{}) (*ruleDef, error) {
+ return nil, errRuleIsBuiltin
+}
+
+func (r *builtinRule) scope() *basicScope {
+ r.Lock()
+ defer r.Unlock()
+
+ if r.scope_ == nil {
+ r.scope_ = makeRuleScope(nil, nil)
+ }
+ return r.scope_
+}
+
+func (r *builtinRule) isArg(argName string) bool {
+ return false
+}
+
+func (r *builtinRule) String() string {
+ return ":" + r.name_
+}
+
+func (p *packageContext) AddNinjaFileDeps(deps ...string) {
+ p.ninjaFileDeps = append(p.ninjaFileDeps, deps...)
+}
diff --git a/blueprint/parser/modify.go b/blueprint/parser/modify.go
new file mode 100644
index 0000000000000000000000000000000000000000..1b11e2c80b4ccd6893b148793e4a12b87ae69a5f
--- /dev/null
+++ b/blueprint/parser/modify.go
@@ -0,0 +1,61 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+func AddStringToList(value *Value, s string) (modified bool) {
+ if value.Type != List {
+ panic("expected list value, got " + value.Type.String())
+ }
+
+ for _, v := range value.ListValue {
+ if v.Type != String {
+ panic("expected string in list, got " + value.Type.String())
+ }
+
+ if v.StringValue == s {
+ // string already exists
+ return false
+ }
+
+ }
+
+ value.ListValue = append(value.ListValue, Value{
+ Type: String,
+ Pos: value.EndPos,
+ StringValue: s,
+ })
+
+ return true
+}
+
+func RemoveStringFromList(value *Value, s string) (modified bool) {
+ if value.Type != List {
+ panic("expected list value, got " + value.Type.String())
+ }
+
+ for i, v := range value.ListValue {
+ if v.Type != String {
+ panic("expected string in list, got " + value.Type.String())
+ }
+
+ if v.StringValue == s {
+ value.ListValue = append(value.ListValue[:i], value.ListValue[i+1:]...)
+ return true
+ }
+
+ }
+
+ return false
+}
diff --git a/blueprint/parser/parser.go b/blueprint/parser/parser.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb931afc5b5f3a00bcb1474f6989192317c69a53
--- /dev/null
+++ b/blueprint/parser/parser.go
@@ -0,0 +1,831 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "sort"
+ "strconv"
+ "strings"
+ "text/scanner"
+)
+
+var errTooManyErrors = errors.New("too many errors")
+
+const maxErrors = 1
+
+type ParseError struct {
+ Err error
+ Pos scanner.Position
+}
+
+func (e *ParseError) Error() string {
+ return fmt.Sprintf("%s: %s", e.Pos, e.Err)
+}
+
+type File struct {
+ Name string
+ Defs []Definition
+ Comments []Comment
+}
+
+func parse(p *parser) (file *File, errs []error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if r == errTooManyErrors {
+ errs = p.errors
+ return
+ }
+ panic(r)
+ }
+ }()
+
+ defs := p.parseDefinitions()
+ p.accept(scanner.EOF)
+ errs = p.errors
+ comments := p.comments
+
+ return &File{
+ Name: p.scanner.Filename,
+ Defs: defs,
+ Comments: comments,
+ }, errs
+
+}
+
+func ParseAndEval(filename string, r io.Reader, scope *Scope) (file *File, errs []error) {
+ p := newParser(r, scope)
+ p.eval = true
+ p.scanner.Filename = filename
+
+ return parse(p)
+}
+
+func Parse(filename string, r io.Reader, scope *Scope) (file *File, errs []error) {
+ p := newParser(r, scope)
+ p.scanner.Filename = filename
+
+ return parse(p)
+}
+
+type parser struct {
+ scanner scanner.Scanner
+ tok rune
+ errors []error
+ scope *Scope
+ comments []Comment
+ eval bool
+}
+
+func newParser(r io.Reader, scope *Scope) *parser {
+ p := &parser{}
+ p.scope = scope
+ p.scanner.Init(r)
+ p.scanner.Error = func(sc *scanner.Scanner, msg string) {
+ p.errorf(msg)
+ }
+ p.scanner.Mode = scanner.ScanIdents | scanner.ScanStrings |
+ scanner.ScanRawStrings | scanner.ScanComments
+ p.next()
+ return p
+}
+
+func (p *parser) error(err error) {
+ pos := p.scanner.Position
+ if !pos.IsValid() {
+ pos = p.scanner.Pos()
+ }
+ err = &ParseError{
+ Err: err,
+ Pos: pos,
+ }
+ p.errors = append(p.errors, err)
+ if len(p.errors) >= maxErrors {
+ panic(errTooManyErrors)
+ }
+}
+
+func (p *parser) errorf(format string, args ...interface{}) {
+ p.error(fmt.Errorf(format, args...))
+}
+
+func (p *parser) accept(toks ...rune) bool {
+ for _, tok := range toks {
+ if p.tok != tok {
+ p.errorf("expected %s, found %s", scanner.TokenString(tok),
+ scanner.TokenString(p.tok))
+ return false
+ }
+ p.next()
+ }
+ return true
+}
+
+func (p *parser) next() {
+ if p.tok != scanner.EOF {
+ p.tok = p.scanner.Scan()
+ for p.tok == scanner.Comment {
+ lines := strings.Split(p.scanner.TokenText(), "\n")
+ p.comments = append(p.comments, Comment{lines, p.scanner.Position})
+ p.tok = p.scanner.Scan()
+ }
+ }
+ return
+}
+
+func (p *parser) parseDefinitions() (defs []Definition) {
+ for {
+ switch p.tok {
+ case scanner.Ident:
+ ident := p.scanner.TokenText()
+ pos := p.scanner.Position
+
+ p.accept(scanner.Ident)
+
+ switch p.tok {
+ case '+':
+ p.accept('+')
+ defs = append(defs, p.parseAssignment(ident, pos, "+="))
+ case '=':
+ defs = append(defs, p.parseAssignment(ident, pos, "="))
+ case '{', '(':
+ defs = append(defs, p.parseModule(ident, pos))
+ default:
+ p.errorf("expected \"=\" or \"+=\" or \"{\" or \"(\", found %s",
+ scanner.TokenString(p.tok))
+ }
+ case scanner.EOF:
+ return
+ default:
+ p.errorf("expected assignment or module definition, found %s",
+ scanner.TokenString(p.tok))
+ return
+ }
+ }
+}
+
+func (p *parser) parseAssignment(name string,
+ namePos scanner.Position, assigner string) (assignment *Assignment) {
+
+ assignment = new(Assignment)
+
+ pos := p.scanner.Position
+ if !p.accept('=') {
+ return
+ }
+ value := p.parseExpression()
+
+ assignment.Name = Ident{name, namePos}
+ assignment.Value = value
+ assignment.OrigValue = value
+ assignment.Pos = pos
+ assignment.Assigner = assigner
+
+ if p.scope != nil {
+ if assigner == "+=" {
+ if old, local := p.scope.Get(assignment.Name.Name); old == nil {
+ p.errorf("modified non-existent variable %q with +=", assignment.Name.Name)
+ } else if !local {
+ p.errorf("modified non-local variable %q with +=", assignment.Name.Name)
+ } else if old.Referenced {
+ p.errorf("modified variable %q with += after referencing",
+ assignment.Name.Name)
+ } else {
+ val, err := p.evaluateOperator(old.Value, assignment.Value, '+', assignment.Pos)
+ if err != nil {
+ p.error(err)
+ } else {
+ old.Value = val
+ }
+ }
+ } else {
+ err := p.scope.Add(assignment)
+ if err != nil {
+ p.error(err)
+ }
+ }
+ }
+
+ return
+}
+
+func (p *parser) parseModule(typ string,
+ typPos scanner.Position) (module *Module) {
+
+ module = new(Module)
+ compat := false
+ lbracePos := p.scanner.Position
+ if p.tok == '{' {
+ compat = true
+ }
+
+ if !p.accept(p.tok) {
+ return
+ }
+ properties := p.parsePropertyList(true, compat)
+ rbracePos := p.scanner.Position
+ if !compat {
+ p.accept(')')
+ } else {
+ p.accept('}')
+ }
+
+ module.Type = Ident{typ, typPos}
+ module.Properties = properties
+ module.LbracePos = lbracePos
+ module.RbracePos = rbracePos
+ return
+}
+
+func (p *parser) parsePropertyList(isModule, compat bool) (properties []*Property) {
+ for p.tok == scanner.Ident {
+ property := p.parseProperty(isModule, compat)
+ properties = append(properties, property)
+
+ if p.tok != ',' {
+ // There was no comma, so the list is done.
+ break
+ }
+
+ p.accept(',')
+ }
+
+ return
+}
+
+func (p *parser) parseProperty(isModule, compat bool) (property *Property) {
+ property = new(Property)
+
+ name := p.scanner.TokenText()
+ namePos := p.scanner.Position
+ p.accept(scanner.Ident)
+ pos := p.scanner.Position
+
+ if isModule {
+ if compat && p.tok == ':' {
+ p.accept(':')
+ } else {
+ if !p.accept('=') {
+ return
+ }
+ }
+ } else {
+ if !p.accept(':') {
+ return
+ }
+ }
+
+ value := p.parseExpression()
+
+ property.Name = Ident{name, namePos}
+ property.Value = value
+ property.Pos = pos
+
+ return
+}
+
+func (p *parser) parseExpression() (value Value) {
+ value = p.parseValue()
+ switch p.tok {
+ case '+':
+ return p.parseOperator(value)
+ default:
+ return value
+ }
+}
+
+func (p *parser) evaluateOperator(value1, value2 Value, operator rune,
+ pos scanner.Position) (Value, error) {
+
+ value := Value{}
+
+ if p.eval {
+ if value1.Type != value2.Type {
+ return Value{}, fmt.Errorf("mismatched type in operator %c: %s != %s", operator,
+ value1.Type, value2.Type)
+ }
+
+ value = value1
+ value.Variable = ""
+
+ switch operator {
+ case '+':
+ switch value1.Type {
+ case String:
+ value.StringValue = value1.StringValue + value2.StringValue
+ case List:
+ value.ListValue = append([]Value{}, value1.ListValue...)
+ value.ListValue = append(value.ListValue, value2.ListValue...)
+ case Map:
+ var err error
+ value.MapValue, err = p.addMaps(value.MapValue, value2.MapValue, pos)
+ if err != nil {
+ return Value{}, err
+ }
+ default:
+ return Value{}, fmt.Errorf("operator %c not supported on type %s", operator,
+ value1.Type)
+ }
+ default:
+ panic("unknown operator " + string(operator))
+ }
+ }
+
+ value.Expression = &Expression{
+ Args: [2]Value{value1, value2},
+ Operator: operator,
+ Pos: pos,
+ }
+
+ return value, nil
+}
+
+func (p *parser) addMaps(map1, map2 []*Property, pos scanner.Position) ([]*Property, error) {
+ ret := make([]*Property, 0, len(map1))
+
+ inMap1 := make(map[string]*Property)
+ inMap2 := make(map[string]*Property)
+ inBoth := make(map[string]*Property)
+
+ for _, prop1 := range map1 {
+ inMap1[prop1.Name.Name] = prop1
+ }
+
+ for _, prop2 := range map2 {
+ inMap2[prop2.Name.Name] = prop2
+ if _, ok := inMap1[prop2.Name.Name]; ok {
+ inBoth[prop2.Name.Name] = prop2
+ }
+ }
+
+ for _, prop1 := range map1 {
+ if prop2, ok := inBoth[prop1.Name.Name]; ok {
+ var err error
+ newProp := *prop1
+ newProp.Value, err = p.evaluateOperator(prop1.Value, prop2.Value, '+', pos)
+ if err != nil {
+ return nil, err
+ }
+ ret = append(ret, &newProp)
+ } else {
+ ret = append(ret, prop1)
+ }
+ }
+
+ for _, prop2 := range map2 {
+ if _, ok := inBoth[prop2.Name.Name]; !ok {
+ ret = append(ret, prop2)
+ }
+ }
+
+ return ret, nil
+}
+
+func (p *parser) parseOperator(value1 Value) Value {
+ operator := p.tok
+ pos := p.scanner.Position
+ p.accept(operator)
+
+ value2 := p.parseExpression()
+
+ value, err := p.evaluateOperator(value1, value2, operator, pos)
+ if err != nil {
+ p.error(err)
+ return Value{}
+ }
+
+ return value
+}
+
+func (p *parser) parseValue() (value Value) {
+ switch p.tok {
+ case scanner.Ident:
+ return p.parseVariable()
+ case scanner.String:
+ return p.parseStringValue()
+ case '[':
+ return p.parseListValue()
+ case '{':
+ return p.parseMapValue()
+ default:
+ p.errorf("expected bool, list, or string value; found %s",
+ scanner.TokenString(p.tok))
+ return
+ }
+}
+
+func (p *parser) parseVariable() (value Value) {
+ switch text := p.scanner.TokenText(); text {
+ case "true":
+ value.Type = Bool
+ value.BoolValue = true
+ case "false":
+ value.Type = Bool
+ value.BoolValue = false
+ default:
+ variable := p.scanner.TokenText()
+ if p.eval {
+ if assignment, local := p.scope.Get(variable); assignment == nil {
+ p.errorf("variable %q is not set", variable)
+ } else {
+ if local {
+ assignment.Referenced = true
+ }
+ value = assignment.Value
+ }
+ }
+ value.Variable = variable
+ }
+ value.Pos = p.scanner.Position
+
+ p.accept(scanner.Ident)
+ return
+}
+
+func (p *parser) parseStringValue() (value Value) {
+ value.Type = String
+ value.Pos = p.scanner.Position
+ str, err := strconv.Unquote(p.scanner.TokenText())
+ if err != nil {
+ p.errorf("couldn't parse string: %s", err)
+ return
+ }
+ value.StringValue = str
+ p.accept(scanner.String)
+ return
+}
+
+func (p *parser) parseListValue() (value Value) {
+ value.Type = List
+ value.Pos = p.scanner.Position
+ if !p.accept('[') {
+ return
+ }
+
+ var elements []Value
+ for p.tok != ']' {
+ element := p.parseExpression()
+ if p.eval && element.Type != String {
+ p.errorf("Expected string in list, found %s", element.String())
+ return
+ }
+ elements = append(elements, element)
+
+ if p.tok != ',' {
+ // There was no comma, so the list is done.
+ break
+ }
+
+ p.accept(',')
+ }
+
+ value.ListValue = elements
+ value.EndPos = p.scanner.Position
+
+ p.accept(']')
+ return
+}
+
+func (p *parser) parseMapValue() (value Value) {
+ value.Type = Map
+ value.Pos = p.scanner.Position
+ if !p.accept('{') {
+ return
+ }
+
+ properties := p.parsePropertyList(false, false)
+ value.MapValue = properties
+
+ value.EndPos = p.scanner.Position
+ p.accept('}')
+ return
+}
+
+type Expression struct {
+ Args [2]Value
+ Operator rune
+ Pos scanner.Position
+}
+
+func (e *Expression) Copy() *Expression {
+ ret := *e
+ ret.Args[0] = e.Args[0].Copy()
+ ret.Args[1] = e.Args[1].Copy()
+ return &ret
+}
+
+func (e *Expression) String() string {
+ return fmt.Sprintf("(%s %c %s)@%d:%s", e.Args[0].String(), e.Operator, e.Args[1].String(),
+ e.Pos.Offset, e.Pos)
+}
+
+type ValueType int
+
+const (
+ Bool ValueType = iota
+ String
+ List
+ Map
+)
+
+func (p ValueType) String() string {
+ switch p {
+ case Bool:
+ return "bool"
+ case String:
+ return "string"
+ case List:
+ return "list"
+ case Map:
+ return "map"
+ default:
+ panic(fmt.Errorf("unknown value type: %d", p))
+ }
+}
+
+type Definition interface {
+ String() string
+ definitionTag()
+}
+
+type Assignment struct {
+ Name Ident
+ Value Value
+ OrigValue Value
+ Pos scanner.Position
+ Assigner string
+ Referenced bool
+}
+
+func (a *Assignment) String() string {
+ return fmt.Sprintf("%s@%d:%s %s %s", a.Name, a.Pos.Offset, a.Pos, a.Assigner, a.Value)
+}
+
+func (a *Assignment) definitionTag() {}
+
+type Module struct {
+ Type Ident
+ Properties []*Property
+ LbracePos scanner.Position
+ RbracePos scanner.Position
+}
+
+func (m *Module) Copy() *Module {
+ ret := *m
+ ret.Properties = make([]*Property, len(m.Properties))
+ for i := range m.Properties {
+ ret.Properties[i] = m.Properties[i].Copy()
+ }
+ return &ret
+}
+
+func (m *Module) String() string {
+ propertyStrings := make([]string, len(m.Properties))
+ for i, property := range m.Properties {
+ propertyStrings[i] = property.String()
+ }
+ return fmt.Sprintf("%s@%d:%s-%d:%s{%s}", m.Type,
+ m.LbracePos.Offset, m.LbracePos,
+ m.RbracePos.Offset, m.RbracePos,
+ strings.Join(propertyStrings, ", "))
+}
+
+func (m *Module) definitionTag() {}
+
+type Property struct {
+ Name Ident
+ Value Value
+ Pos scanner.Position
+}
+
+func (p *Property) Copy() *Property {
+ ret := *p
+ ret.Value = p.Value.Copy()
+ return &ret
+}
+
+func (p *Property) String() string {
+ return fmt.Sprintf("%s@%d:%s: %s", p.Name, p.Pos.Offset, p.Pos, p.Value)
+}
+
+type Ident struct {
+ Name string
+ Pos scanner.Position
+}
+
+func (i Ident) String() string {
+ return fmt.Sprintf("%s@%d:%s", i.Name, i.Pos.Offset, i.Pos)
+}
+
+type Value struct {
+ Type ValueType
+ BoolValue bool
+ StringValue string
+ ListValue []Value
+ MapValue []*Property
+ Expression *Expression
+ Variable string
+ Pos scanner.Position
+ EndPos scanner.Position
+}
+
+func (p Value) Copy() Value {
+ ret := p
+ if p.MapValue != nil {
+ ret.MapValue = make([]*Property, len(p.MapValue))
+ for i := range p.MapValue {
+ ret.MapValue[i] = p.MapValue[i].Copy()
+ }
+ }
+ if p.ListValue != nil {
+ ret.ListValue = make([]Value, len(p.ListValue))
+ for i := range p.ListValue {
+ ret.ListValue[i] = p.ListValue[i].Copy()
+ }
+ }
+ if p.Expression != nil {
+ ret.Expression = p.Expression.Copy()
+ }
+ return ret
+}
+
+func (p Value) String() string {
+ var s string
+ if p.Variable != "" {
+ s += p.Variable + " = "
+ }
+ if p.Expression != nil {
+ s += p.Expression.String()
+ }
+ switch p.Type {
+ case Bool:
+ s += fmt.Sprintf("%t@%d:%s", p.BoolValue, p.Pos.Offset, p.Pos)
+ case String:
+ s += fmt.Sprintf("%q@%d:%s", p.StringValue, p.Pos.Offset, p.Pos)
+ case List:
+ valueStrings := make([]string, len(p.ListValue))
+ for i, value := range p.ListValue {
+ valueStrings[i] = value.String()
+ }
+ s += fmt.Sprintf("@%d:%s-%d:%s[%s]", p.Pos.Offset, p.Pos, p.EndPos.Offset, p.EndPos,
+ strings.Join(valueStrings, ", "))
+ case Map:
+ propertyStrings := make([]string, len(p.MapValue))
+ for i, property := range p.MapValue {
+ propertyStrings[i] = property.String()
+ }
+ s += fmt.Sprintf("@%d:%s-%d:%s{%s}", p.Pos.Offset, p.Pos, p.EndPos.Offset, p.EndPos,
+ strings.Join(propertyStrings, ", "))
+ default:
+ panic(fmt.Errorf("bad property type: %d", p.Type))
+ }
+
+ return s
+}
+
+type Scope struct {
+ vars map[string]*Assignment
+ inheritedVars map[string]*Assignment
+}
+
+func NewScope(s *Scope) *Scope {
+ newScope := &Scope{
+ vars: make(map[string]*Assignment),
+ inheritedVars: make(map[string]*Assignment),
+ }
+
+ if s != nil {
+ for k, v := range s.vars {
+ newScope.inheritedVars[k] = v
+ }
+ for k, v := range s.inheritedVars {
+ newScope.inheritedVars[k] = v
+ }
+ }
+
+ return newScope
+}
+
+func (s *Scope) Add(assignment *Assignment) error {
+ if old, ok := s.vars[assignment.Name.Name]; ok {
+ return fmt.Errorf("variable already set, previous assignment: %s", old)
+ }
+
+ if old, ok := s.inheritedVars[assignment.Name.Name]; ok {
+ return fmt.Errorf("variable already set in inherited scope, previous assignment: %s", old)
+ }
+
+ s.vars[assignment.Name.Name] = assignment
+
+ return nil
+}
+
+func (s *Scope) Remove(name string) {
+ delete(s.vars, name)
+ delete(s.inheritedVars, name)
+}
+
+func (s *Scope) Get(name string) (*Assignment, bool) {
+ if a, ok := s.vars[name]; ok {
+ return a, true
+ }
+
+ if a, ok := s.inheritedVars[name]; ok {
+ return a, false
+ }
+
+ return nil, false
+}
+
+func (s *Scope) String() string {
+ vars := []string{}
+
+ for k := range s.vars {
+ vars = append(vars, k)
+ }
+ for k := range s.inheritedVars {
+ vars = append(vars, k)
+ }
+
+ sort.Strings(vars)
+
+ ret := []string{}
+ for _, v := range vars {
+ if assignment, ok := s.vars[v]; ok {
+ ret = append(ret, assignment.String())
+ } else {
+ ret = append(ret, s.inheritedVars[v].String())
+ }
+ }
+
+ return strings.Join(ret, "\n")
+}
+
+type Comment struct {
+ Comment []string
+ Pos scanner.Position
+}
+
+func (c Comment) String() string {
+ l := 0
+ for _, comment := range c.Comment {
+ l += len(comment) + 1
+ }
+ buf := make([]byte, 0, l)
+ for _, comment := range c.Comment {
+ buf = append(buf, comment...)
+ buf = append(buf, '\n')
+ }
+
+ return string(buf)
+}
+
+// Return the text of the comment with // or /* and */ stripped
+func (c Comment) Text() string {
+ l := 0
+ for _, comment := range c.Comment {
+ l += len(comment) + 1
+ }
+ buf := make([]byte, 0, l)
+
+ blockComment := false
+ if strings.HasPrefix(c.Comment[0], "/*") {
+ blockComment = true
+ }
+
+ for i, comment := range c.Comment {
+ if blockComment {
+ if i == 0 {
+ comment = strings.TrimPrefix(comment, "/*")
+ }
+ if i == len(c.Comment)-1 {
+ comment = strings.TrimSuffix(comment, "*/")
+ }
+ } else {
+ comment = strings.TrimPrefix(comment, "//")
+ }
+ buf = append(buf, comment...)
+ buf = append(buf, '\n')
+ }
+
+ return string(buf)
+}
+
+// Return the line number that the comment ends on
+func (c Comment) EndLine() int {
+ return c.Pos.Line + len(c.Comment) - 1
+}
diff --git a/blueprint/parser/parser_test.go b/blueprint/parser/parser_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..8925684f16c9f4aa9cdcd320218311d44d017c70
--- /dev/null
+++ b/blueprint/parser/parser_test.go
@@ -0,0 +1,522 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+ "text/scanner"
+)
+
+func mkpos(offset, line, column int) scanner.Position {
+ return scanner.Position{
+ Offset: offset,
+ Line: line,
+ Column: column,
+ }
+}
+
+var validParseTestCases = []struct {
+ input string
+ defs []Definition
+ comments []Comment
+}{
+ {`
+ foo {}
+ `,
+ []Definition{
+ &Module{
+ Type: Ident{"foo", mkpos(3, 2, 3)},
+ LbracePos: mkpos(7, 2, 7),
+ RbracePos: mkpos(8, 2, 8),
+ },
+ },
+ nil,
+ },
+
+ {`
+ foo {
+ name: "abc",
+ }
+ `,
+ []Definition{
+ &Module{
+ Type: Ident{"foo", mkpos(3, 2, 3)},
+ LbracePos: mkpos(7, 2, 7),
+ RbracePos: mkpos(27, 4, 3),
+ Properties: []*Property{
+ {
+ Name: Ident{"name", mkpos(12, 3, 4)},
+ Pos: mkpos(16, 3, 8),
+ Value: Value{
+ Type: String,
+ Pos: mkpos(18, 3, 10),
+ StringValue: "abc",
+ },
+ },
+ },
+ },
+ },
+ nil,
+ },
+
+ {`
+ foo {
+ isGood: true,
+ }
+ `,
+ []Definition{
+ &Module{
+ Type: Ident{"foo", mkpos(3, 2, 3)},
+ LbracePos: mkpos(7, 2, 7),
+ RbracePos: mkpos(28, 4, 3),
+ Properties: []*Property{
+ {
+ Name: Ident{"isGood", mkpos(12, 3, 4)},
+ Pos: mkpos(18, 3, 10),
+ Value: Value{
+ Type: Bool,
+ Pos: mkpos(20, 3, 12),
+ BoolValue: true,
+ },
+ },
+ },
+ },
+ },
+ nil,
+ },
+
+ {`
+ foo {
+ stuff: ["asdf", "jkl;", "qwert",
+ "uiop", "bnm,"]
+ }
+ `,
+ []Definition{
+ &Module{
+ Type: Ident{"foo", mkpos(3, 2, 3)},
+ LbracePos: mkpos(7, 2, 7),
+ RbracePos: mkpos(67, 5, 3),
+ Properties: []*Property{
+ {
+ Name: Ident{"stuff", mkpos(12, 3, 4)},
+ Pos: mkpos(17, 3, 9),
+ Value: Value{
+ Type: List,
+ Pos: mkpos(19, 3, 11),
+ EndPos: mkpos(63, 4, 19),
+ ListValue: []Value{
+ Value{
+ Type: String,
+ Pos: mkpos(20, 3, 12),
+ StringValue: "asdf",
+ },
+ Value{
+ Type: String,
+ Pos: mkpos(28, 3, 20),
+ StringValue: "jkl;",
+ },
+ Value{
+ Type: String,
+ Pos: mkpos(36, 3, 28),
+ StringValue: "qwert",
+ },
+ Value{
+ Type: String,
+ Pos: mkpos(49, 4, 5),
+ StringValue: "uiop",
+ },
+ Value{
+ Type: String,
+ Pos: mkpos(57, 4, 13),
+ StringValue: "bnm,",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ nil,
+ },
+
+ {`
+ foo {
+ stuff: {
+ isGood: true,
+ name: "bar"
+ }
+ }
+ `,
+ []Definition{
+ &Module{
+ Type: Ident{"foo", mkpos(3, 2, 3)},
+ LbracePos: mkpos(7, 2, 7),
+ RbracePos: mkpos(62, 7, 3),
+ Properties: []*Property{
+ {
+ Name: Ident{"stuff", mkpos(12, 3, 4)},
+ Pos: mkpos(17, 3, 9),
+ Value: Value{
+ Type: Map,
+ Pos: mkpos(19, 3, 11),
+ EndPos: mkpos(58, 6, 4),
+ MapValue: []*Property{
+ {
+ Name: Ident{"isGood", mkpos(25, 4, 5)},
+ Pos: mkpos(31, 4, 11),
+ Value: Value{
+ Type: Bool,
+ Pos: mkpos(33, 4, 13),
+ BoolValue: true,
+ },
+ },
+ {
+ Name: Ident{"name", mkpos(43, 5, 5)},
+ Pos: mkpos(47, 5, 9),
+ Value: Value{
+ Type: String,
+ Pos: mkpos(49, 5, 11),
+ StringValue: "bar",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ nil,
+ },
+
+ {`
+ // comment1
+ foo {
+ // comment2
+ isGood: true, // comment3
+ }
+ `,
+ []Definition{
+ &Module{
+ Type: Ident{"foo", mkpos(17, 3, 3)},
+ LbracePos: mkpos(21, 3, 7),
+ RbracePos: mkpos(70, 6, 3),
+ Properties: []*Property{
+ {
+ Name: Ident{"isGood", mkpos(41, 5, 4)},
+ Pos: mkpos(47, 5, 10),
+ Value: Value{
+ Type: Bool,
+ Pos: mkpos(49, 5, 12),
+ BoolValue: true,
+ },
+ },
+ },
+ },
+ },
+ []Comment{
+ Comment{
+ Comment: []string{"// comment1"},
+ Pos: mkpos(3, 2, 3),
+ },
+ Comment{
+ Comment: []string{"// comment2"},
+ Pos: mkpos(26, 4, 4),
+ },
+ Comment{
+ Comment: []string{"// comment3"},
+ Pos: mkpos(56, 5, 19),
+ },
+ },
+ },
+
+ {`
+ foo {
+ name: "abc",
+ }
+
+ bar {
+ name: "def",
+ }
+ `,
+ []Definition{
+ &Module{
+ Type: Ident{"foo", mkpos(3, 2, 3)},
+ LbracePos: mkpos(7, 2, 7),
+ RbracePos: mkpos(27, 4, 3),
+ Properties: []*Property{
+ {
+ Name: Ident{"name", mkpos(12, 3, 4)},
+ Pos: mkpos(16, 3, 8),
+ Value: Value{
+ Type: String,
+ Pos: mkpos(18, 3, 10),
+ StringValue: "abc",
+ },
+ },
+ },
+ },
+ &Module{
+ Type: Ident{"bar", mkpos(32, 6, 3)},
+ LbracePos: mkpos(36, 6, 7),
+ RbracePos: mkpos(56, 8, 3),
+ Properties: []*Property{
+ {
+ Name: Ident{"name", mkpos(41, 7, 4)},
+ Pos: mkpos(45, 7, 8),
+ Value: Value{
+ Type: String,
+ Pos: mkpos(47, 7, 10),
+ StringValue: "def",
+ },
+ },
+ },
+ },
+ },
+ nil,
+ },
+ {`
+ foo = "stuff"
+ bar = foo
+ baz = foo + bar
+ boo = baz
+ boo += foo
+ `,
+ []Definition{
+ &Assignment{
+ Name: Ident{"foo", mkpos(3, 2, 3)},
+ Pos: mkpos(7, 2, 7),
+ Value: Value{
+ Type: String,
+ Pos: mkpos(9, 2, 9),
+ StringValue: "stuff",
+ },
+ OrigValue: Value{
+ Type: String,
+ Pos: mkpos(9, 2, 9),
+ StringValue: "stuff",
+ },
+ Assigner: "=",
+ Referenced: true,
+ },
+ &Assignment{
+ Name: Ident{"bar", mkpos(19, 3, 3)},
+ Pos: mkpos(23, 3, 7),
+ Value: Value{
+ Type: String,
+ Pos: mkpos(25, 3, 9),
+ StringValue: "stuff",
+ Variable: "foo",
+ },
+ OrigValue: Value{
+ Type: String,
+ Pos: mkpos(25, 3, 9),
+ StringValue: "stuff",
+ Variable: "foo",
+ },
+ Assigner: "=",
+ Referenced: true,
+ },
+ &Assignment{
+ Name: Ident{"baz", mkpos(31, 4, 3)},
+ Pos: mkpos(35, 4, 7),
+ Value: Value{
+ Type: String,
+ Pos: mkpos(37, 4, 9),
+ StringValue: "stuffstuff",
+ Expression: &Expression{
+ Args: [2]Value{
+ {
+ Type: String,
+ Pos: mkpos(37, 4, 9),
+ StringValue: "stuff",
+ Variable: "foo",
+ },
+ {
+ Type: String,
+ Pos: mkpos(43, 4, 15),
+ StringValue: "stuff",
+ Variable: "bar",
+ },
+ },
+ Operator: '+',
+ Pos: mkpos(41, 4, 13),
+ },
+ },
+ OrigValue: Value{
+ Type: String,
+ Pos: mkpos(37, 4, 9),
+ StringValue: "stuffstuff",
+ Expression: &Expression{
+ Args: [2]Value{
+ {
+ Type: String,
+ Pos: mkpos(37, 4, 9),
+ StringValue: "stuff",
+ Variable: "foo",
+ },
+ {
+ Type: String,
+ Pos: mkpos(43, 4, 15),
+ StringValue: "stuff",
+ Variable: "bar",
+ },
+ },
+ Operator: '+',
+ Pos: mkpos(41, 4, 13),
+ },
+ },
+ Assigner: "=",
+ Referenced: true,
+ },
+ &Assignment{
+ Name: Ident{"boo", mkpos(49, 5, 3)},
+ Pos: mkpos(53, 5, 7),
+ Value: Value{
+ Type: String,
+ Pos: mkpos(55, 5, 9),
+ StringValue: "stuffstuffstuff",
+ Expression: &Expression{
+ Args: [2]Value{
+ {
+ Type: String,
+ Pos: mkpos(55, 5, 9),
+ StringValue: "stuffstuff",
+ Variable: "baz",
+ Expression: &Expression{
+ Args: [2]Value{
+ {
+ Type: String,
+ Pos: mkpos(37, 4, 9),
+ StringValue: "stuff",
+ Variable: "foo",
+ },
+ {
+ Type: String,
+ Pos: mkpos(43, 4, 15),
+ StringValue: "stuff",
+ Variable: "bar",
+ },
+ },
+ Operator: '+',
+ Pos: mkpos(41, 4, 13),
+ },
+ },
+ {
+ Variable: "foo",
+ Type: String,
+ Pos: mkpos(68, 6, 10),
+ StringValue: "stuff",
+ },
+ },
+ Pos: mkpos(66, 6, 8),
+ Operator: '+',
+ },
+ },
+ OrigValue: Value{
+ Type: String,
+ Pos: mkpos(55, 5, 9),
+ StringValue: "stuffstuff",
+ Variable: "baz",
+ Expression: &Expression{
+ Args: [2]Value{
+ {
+ Type: String,
+ Pos: mkpos(37, 4, 9),
+ StringValue: "stuff",
+ Variable: "foo",
+ },
+ {
+ Type: String,
+ Pos: mkpos(43, 4, 15),
+ StringValue: "stuff",
+ Variable: "bar",
+ },
+ },
+ Operator: '+',
+ Pos: mkpos(41, 4, 13),
+ },
+ },
+ Assigner: "=",
+ },
+ &Assignment{
+ Name: Ident{"boo", mkpos(61, 6, 3)},
+ Pos: mkpos(66, 6, 8),
+ Value: Value{
+ Type: String,
+ Pos: mkpos(68, 6, 10),
+ StringValue: "stuff",
+ Variable: "foo",
+ },
+ OrigValue: Value{
+ Type: String,
+ Pos: mkpos(68, 6, 10),
+ StringValue: "stuff",
+ Variable: "foo",
+ },
+ Assigner: "+=",
+ },
+ },
+ nil,
+ },
+}
+
+func TestParseValidInput(t *testing.T) {
+ for _, testCase := range validParseTestCases {
+ r := bytes.NewBufferString(testCase.input)
+ file, errs := ParseAndEval("", r, NewScope(nil))
+ if len(errs) != 0 {
+ t.Errorf("test case: %s", testCase.input)
+ t.Errorf("unexpected errors:")
+ for _, err := range errs {
+ t.Errorf(" %s", err)
+ }
+ t.FailNow()
+ }
+
+ if len(file.Defs) == len(testCase.defs) {
+ for i := range file.Defs {
+ if !reflect.DeepEqual(file.Defs[i], testCase.defs[i]) {
+ t.Errorf("test case: %s", testCase.input)
+ t.Errorf("incorrect defintion %d:", i)
+ t.Errorf(" expected: %s", testCase.defs[i])
+ t.Errorf(" got: %s", file.Defs[i])
+ }
+ }
+ } else {
+ t.Errorf("test case: %s", testCase.input)
+ t.Errorf("length mismatch, expected %d definitions, got %d",
+ len(testCase.defs), len(file.Defs))
+ }
+
+ if len(file.Comments) == len(testCase.comments) {
+ for i := range file.Comments {
+ if !reflect.DeepEqual(file.Comments, testCase.comments) {
+ t.Errorf("test case: %s", testCase.input)
+ t.Errorf("incorrect comment %d:", i)
+ t.Errorf(" expected: %s", testCase.comments[i])
+ t.Errorf(" got: %s", file.Comments[i])
+ }
+ }
+ } else {
+ t.Errorf("test case: %s", testCase.input)
+ t.Errorf("length mismatch, expected %d comments, got %d",
+ len(testCase.comments), len(file.Comments))
+ }
+ }
+}
+
+// TODO: Test error strings
diff --git a/blueprint/parser/printer.go b/blueprint/parser/printer.go
new file mode 100644
index 0000000000000000000000000000000000000000..b27f5e0ada3f71b74d695edf7049ee09522d9fd4
--- /dev/null
+++ b/blueprint/parser/printer.go
@@ -0,0 +1,373 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "text/scanner"
+ "unicode"
+)
+
+var noPos = scanner.Position{}
+
+type printer struct {
+ defs []Definition
+ comments []Comment
+
+ curComment int
+
+ pos scanner.Position
+
+ pendingSpace bool
+ pendingNewline int
+
+ output []byte
+
+ indentList []int
+ wsBuf []byte
+
+ skippedComments []Comment
+}
+
+func newPrinter(file *File) *printer {
+ return &printer{
+ defs: file.Defs,
+ comments: file.Comments,
+ indentList: []int{0},
+
+ // pendingNewLine is initialized to -1 to eat initial spaces if the first token is a comment
+ pendingNewline: -1,
+
+ pos: scanner.Position{
+ Line: 1,
+ },
+ }
+}
+
+func Print(file *File) ([]byte, error) {
+ p := newPrinter(file)
+
+ for _, def := range p.defs {
+ p.printDef(def)
+ }
+ p.flush()
+ return p.output, nil
+}
+
+func (p *printer) Print() ([]byte, error) {
+ for _, def := range p.defs {
+ p.printDef(def)
+ }
+ p.flush()
+ return p.output, nil
+}
+
+func (p *printer) printDef(def Definition) {
+ if assignment, ok := def.(*Assignment); ok {
+ p.printAssignment(assignment)
+ } else if module, ok := def.(*Module); ok {
+ p.printModule(module)
+ } else {
+ panic("Unknown definition")
+ }
+}
+
+func (p *printer) printAssignment(assignment *Assignment) {
+ p.printToken(assignment.Name.Name, assignment.Name.Pos)
+ p.requestSpace()
+ p.printToken(assignment.Assigner, assignment.Pos)
+ p.requestSpace()
+ p.printValue(assignment.OrigValue)
+ p.requestNewline()
+}
+
+func (p *printer) printModule(module *Module) {
+ p.printToken(module.Type.Name, module.Type.Pos)
+ p.printMap(module.Properties, module.LbracePos, module.RbracePos)
+ p.requestDoubleNewline()
+}
+
+func (p *printer) printValue(value Value) {
+ if value.Variable != "" {
+ p.printToken(value.Variable, value.Pos)
+ } else if value.Expression != nil {
+ p.printExpression(*value.Expression)
+ } else {
+ switch value.Type {
+ case Bool:
+ var s string
+ if value.BoolValue {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ p.printToken(s, value.Pos)
+ case String:
+ p.printToken(strconv.Quote(value.StringValue), value.Pos)
+ case List:
+ p.printList(value.ListValue, value.Pos, value.EndPos)
+ case Map:
+ p.printMap(value.MapValue, value.Pos, value.EndPos)
+ default:
+ panic(fmt.Errorf("bad property type: %d", value.Type))
+ }
+ }
+}
+
+func (p *printer) printList(list []Value, pos, endPos scanner.Position) {
+ p.requestSpace()
+ p.printToken("[", pos)
+ if len(list) > 1 || pos.Line != endPos.Line {
+ p.requestNewline()
+ p.indent(p.curIndent() + 4)
+ for _, value := range list {
+ p.printValue(value)
+ p.printToken(",", noPos)
+ p.requestNewline()
+ }
+ p.unindent(endPos)
+ } else {
+ for _, value := range list {
+ p.printValue(value)
+ }
+ }
+ p.printToken("]", endPos)
+}
+
+func (p *printer) printMap(list []*Property, pos, endPos scanner.Position) {
+ p.requestSpace()
+ p.printToken("{", pos)
+ if len(list) > 0 || pos.Line != endPos.Line {
+ p.requestNewline()
+ p.indent(p.curIndent() + 4)
+ for _, prop := range list {
+ p.printProperty(prop)
+ p.printToken(",", noPos)
+ p.requestNewline()
+ }
+ p.unindent(endPos)
+ }
+ p.printToken("}", endPos)
+}
+
+func (p *printer) printExpression(expression Expression) {
+ p.printValue(expression.Args[0])
+ p.requestSpace()
+ p.printToken(string(expression.Operator), expression.Pos)
+ if expression.Args[0].Pos.Line == expression.Args[1].Pos.Line {
+ p.requestSpace()
+ } else {
+ p.requestNewline()
+ }
+ p.printValue(expression.Args[1])
+}
+
+func (p *printer) printProperty(property *Property) {
+ p.printToken(property.Name.Name, property.Name.Pos)
+ p.printToken(":", property.Pos)
+ p.requestSpace()
+ p.printValue(property.Value)
+}
+
+// Print a single token, including any necessary comments or whitespace between
+// this token and the previously printed token
+func (p *printer) printToken(s string, pos scanner.Position) {
+ newline := p.pendingNewline != 0
+
+ if pos == noPos {
+ pos = p.pos
+ }
+
+ if newline {
+ p.printEndOfLineCommentsBefore(pos)
+ p.requestNewlinesForPos(pos)
+ }
+
+ p.printInLineCommentsBefore(pos)
+
+ p.flushSpace()
+
+ p.output = append(p.output, s...)
+
+ p.pos = pos
+}
+
+// Print any in-line (single line /* */) comments that appear _before_ pos
+func (p *printer) printInLineCommentsBefore(pos scanner.Position) {
+ for p.curComment < len(p.comments) && p.comments[p.curComment].Pos.Offset < pos.Offset {
+ c := p.comments[p.curComment]
+ if c.Comment[0][0:2] == "//" || len(c.Comment) > 1 {
+ p.skippedComments = append(p.skippedComments, c)
+ } else {
+ p.flushSpace()
+ p.printComment(c)
+ p.requestSpace()
+ }
+ p.curComment++
+ }
+}
+
+// Print any comments, including end of line comments, that appear _before_ the line specified
+// by pos
+func (p *printer) printEndOfLineCommentsBefore(pos scanner.Position) {
+ for _, c := range p.skippedComments {
+ if !p.requestNewlinesForPos(c.Pos) {
+ p.requestSpace()
+ }
+ p.printComment(c)
+ p._requestNewline()
+ }
+ p.skippedComments = []Comment{}
+ for p.curComment < len(p.comments) && p.comments[p.curComment].Pos.Line < pos.Line {
+ c := p.comments[p.curComment]
+ if !p.requestNewlinesForPos(c.Pos) {
+ p.requestSpace()
+ }
+ p.printComment(c)
+ p._requestNewline()
+ p.curComment++
+ }
+}
+
+// Compare the line numbers of the previous and current positions to determine whether extra
+// newlines should be inserted. A second newline is allowed anywhere requestNewline() is called.
+func (p *printer) requestNewlinesForPos(pos scanner.Position) bool {
+ if pos.Line > p.pos.Line {
+ p._requestNewline()
+ if pos.Line > p.pos.Line+1 {
+ p.pendingNewline = 2
+ }
+ return true
+ }
+
+ return false
+}
+
+func (p *printer) requestSpace() {
+ p.pendingSpace = true
+}
+
+// Ask for a newline to be inserted before the next token, but do not insert any comments. Used
+// by the comment printers.
+func (p *printer) _requestNewline() {
+ if p.pendingNewline == 0 {
+ p.pendingNewline = 1
+ }
+}
+
+// Ask for a newline to be inserted before the next token. Also inserts any end-of line comments
+// for the current line
+func (p *printer) requestNewline() {
+ pos := p.pos
+ pos.Line++
+ p.printEndOfLineCommentsBefore(pos)
+ p._requestNewline()
+}
+
+// Ask for two newlines to be inserted before the next token. Also inserts any end-of line comments
+// for the current line
+func (p *printer) requestDoubleNewline() {
+ p.requestNewline()
+ p.pendingNewline = 2
+}
+
+// Flush any pending whitespace, ignoring pending spaces if there is a pending newline
+func (p *printer) flushSpace() {
+ if p.pendingNewline == 1 {
+ p.output = append(p.output, '\n')
+ p.pad(p.curIndent())
+ } else if p.pendingNewline == 2 {
+ p.output = append(p.output, "\n\n"...)
+ p.pad(p.curIndent())
+ } else if p.pendingSpace == true && p.pendingNewline != -1 {
+ p.output = append(p.output, ' ')
+ }
+
+ p.pendingSpace = false
+ p.pendingNewline = 0
+}
+
+// Print a single comment, which may be a multi-line comment
+func (p *printer) printComment(comment Comment) {
+ pos := comment.Pos
+ for i, line := range comment.Comment {
+ line = strings.TrimRightFunc(line, unicode.IsSpace)
+ p.flushSpace()
+ if i != 0 {
+ lineIndent := strings.IndexFunc(line, func(r rune) bool { return !unicode.IsSpace(r) })
+ lineIndent = max(lineIndent, p.curIndent())
+ p.pad(lineIndent - p.curIndent())
+ pos.Line++
+ }
+ p.output = append(p.output, strings.TrimSpace(line)...)
+ if i < len(comment.Comment)-1 {
+ p._requestNewline()
+ }
+ }
+ p.pos = pos
+}
+
+// Print any comments that occur after the last token, and a trailing newline
+func (p *printer) flush() {
+ for _, c := range p.skippedComments {
+ if !p.requestNewlinesForPos(c.Pos) {
+ p.requestSpace()
+ }
+ p.printComment(c)
+ }
+ for p.curComment < len(p.comments) {
+ c := p.comments[p.curComment]
+ if !p.requestNewlinesForPos(c.Pos) {
+ p.requestSpace()
+ }
+ p.printComment(c)
+ p.curComment++
+ }
+ p.output = append(p.output, '\n')
+}
+
+// Print whitespace to pad from column l to column max
+func (p *printer) pad(l int) {
+ if l > len(p.wsBuf) {
+ p.wsBuf = make([]byte, l)
+ for i := range p.wsBuf {
+ p.wsBuf[i] = ' '
+ }
+ }
+ p.output = append(p.output, p.wsBuf[0:l]...)
+}
+
+func (p *printer) indent(i int) {
+ p.indentList = append(p.indentList, i)
+}
+
+func (p *printer) unindent(pos scanner.Position) {
+ p.printEndOfLineCommentsBefore(pos)
+ p.indentList = p.indentList[0 : len(p.indentList)-1]
+}
+
+func (p *printer) curIndent() int {
+ return p.indentList[len(p.indentList)-1]
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ } else {
+ return b
+ }
+}
diff --git a/blueprint/parser/printer_test.go b/blueprint/parser/printer_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..e5acf064b80172e803624355268be825ff1ef488
--- /dev/null
+++ b/blueprint/parser/printer_test.go
@@ -0,0 +1,289 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "bytes"
+ "testing"
+)
+
+var validPrinterTestCases = []struct {
+ input string
+ output string
+}{
+ {
+ input: `
+foo {}
+`,
+ output: `
+foo {}
+`,
+ },
+ {
+ input: `
+foo{name= "abc",}
+`,
+ output: `
+foo {
+ name: "abc",
+}
+`,
+ },
+ {
+ input: `
+ foo {
+ stuff: ["asdf", "jkl;", "qwert",
+ "uiop", "bnm,"]
+ }
+ `,
+ output: `
+foo {
+ stuff: [
+ "asdf",
+ "bnm,",
+ "jkl;",
+ "qwert",
+ "uiop",
+ ],
+}
+`,
+ },
+ {
+ input: `
+ foo {
+ stuff: {
+ isGood: true,
+ name: "bar"
+ }
+ }
+ `,
+ output: `
+foo {
+ stuff: {
+ isGood: true,
+ name: "bar",
+ },
+}
+`,
+ },
+ {
+ input: `
+// comment1
+foo {
+ // comment2
+ isGood: true, // comment3
+}
+`,
+ output: `
+// comment1
+foo {
+ // comment2
+ isGood: true, // comment3
+}
+`,
+ },
+ {
+ input: `
+foo {
+ name: "abc",
+}
+
+bar {
+ name: "def",
+}
+ `,
+ output: `
+foo {
+ name: "abc",
+}
+
+bar {
+ name: "def",
+}
+`,
+ },
+ {
+ input: `
+foo = "stuff"
+bar = foo
+baz = foo + bar
+baz += foo
+`,
+ output: `
+foo = "stuff"
+bar = foo
+baz = foo + bar
+baz += foo
+`,
+ },
+ {
+ input: `
+//test
+test /* test */ {
+ srcs: [
+ /*"bootstrap/bootstrap.go",
+ "bootstrap/cleanup.go",*/
+ "bootstrap/command.go",
+ "bootstrap/doc.go", //doc.go
+ "bootstrap/config.go", //config.go
+ ],
+ deps: ["libabc"],
+ incs: []
+} //test
+//test
+test2 {
+}
+
+
+//test3
+`,
+ output: `
+//test
+test /* test */ {
+ srcs: [
+ /*"bootstrap/bootstrap.go",
+ "bootstrap/cleanup.go",*/
+ "bootstrap/command.go",
+ "bootstrap/config.go", //config.go
+ "bootstrap/doc.go", //doc.go
+ ],
+ deps: ["libabc"],
+ incs: [],
+} //test
+
+//test
+test2 {
+}
+
+//test3
+`,
+ },
+ {
+ input: `
+// test
+module // test
+
+ {
+ srcs
+ : [
+ "src1.c",
+ "src2.c",
+ ],
+//test
+}
+//test2
+`,
+ output: `
+// test
+module { // test
+
+ srcs: [
+ "src1.c",
+ "src2.c",
+ ],
+ //test
+}
+
+//test2
+`,
+ },
+ {
+ input: `
+/*test {
+ test: true,
+}*/
+
+test {
+/*test: true,*/
+}
+
+// This
+/* Is */
+// A
+
+// Multiline
+// Comment
+
+test {}
+
+// This
+/* Is */
+// A
+// Trailing
+
+// Multiline
+// Comment
+`,
+ output: `
+/*test {
+ test: true,
+}*/
+
+test {
+ /*test: true,*/
+}
+
+// This
+/* Is */
+// A
+
+// Multiline
+// Comment
+
+test {}
+
+// This
+/* Is */
+// A
+// Trailing
+
+// Multiline
+// Comment
+`,
+ },
+}
+
+func TestPrinter(t *testing.T) {
+ for _, testCase := range validPrinterTestCases {
+ in := testCase.input[1:]
+ expected := testCase.output[1:]
+
+ r := bytes.NewBufferString(in)
+ file, errs := Parse("", r, NewScope(nil))
+ if len(errs) != 0 {
+ t.Errorf("test case: %s", in)
+ t.Errorf("unexpected errors:")
+ for _, err := range errs {
+ t.Errorf(" %s", err)
+ }
+ t.FailNow()
+ }
+
+ SortLists(file)
+
+ got, err := Print(file)
+ if err != nil {
+ t.Errorf("test case: %s", in)
+ t.Errorf("unexpected error: %s", err)
+ t.FailNow()
+ }
+
+ if string(got) != expected {
+ t.Errorf("test case: %s", in)
+ t.Errorf(" expected: %s", expected)
+ t.Errorf(" got: %s", string(got))
+ }
+ }
+}
diff --git a/blueprint/parser/sort.go b/blueprint/parser/sort.go
new file mode 100644
index 0000000000000000000000000000000000000000..381ef82b5d85b52549185377f18d8006f7c97007
--- /dev/null
+++ b/blueprint/parser/sort.go
@@ -0,0 +1,182 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "sort"
+ "text/scanner"
+)
+
+func SortLists(file *File) {
+ for _, def := range file.Defs {
+ if assignment, ok := def.(*Assignment); ok {
+ sortListsInValue(assignment.Value, file)
+ } else if module, ok := def.(*Module); ok {
+ for _, prop := range module.Properties {
+ sortListsInValue(prop.Value, file)
+ }
+ }
+ }
+ sort.Sort(commentsByOffset(file.Comments))
+}
+
+func SortList(file *File, value Value) {
+ for i := 0; i < len(value.ListValue); i++ {
+ // Find a set of values on contiguous lines
+ line := value.ListValue[i].Pos.Line
+ var j int
+ for j = i + 1; j < len(value.ListValue); j++ {
+ if value.ListValue[j].Pos.Line > line+1 {
+ break
+ }
+ line = value.ListValue[j].Pos.Line
+ }
+
+ nextPos := value.EndPos
+ if j < len(value.ListValue) {
+ nextPos = value.ListValue[j].Pos
+ }
+ sortSubList(value.ListValue[i:j], nextPos, file)
+ i = j - 1
+ }
+}
+
+func ListIsSorted(value Value) bool {
+ for i := 0; i < len(value.ListValue); i++ {
+ // Find a set of values on contiguous lines
+ line := value.ListValue[i].Pos.Line
+ var j int
+ for j = i + 1; j < len(value.ListValue); j++ {
+ if value.ListValue[j].Pos.Line > line+1 {
+ break
+ }
+ line = value.ListValue[j].Pos.Line
+ }
+
+ if !subListIsSorted(value.ListValue[i:j]) {
+ return false
+ }
+ i = j - 1
+ }
+
+ return true
+}
+
+func sortListsInValue(value Value, file *File) {
+ if value.Variable != "" {
+ return
+ }
+
+ if value.Expression != nil {
+ sortListsInValue(value.Expression.Args[0], file)
+ sortListsInValue(value.Expression.Args[1], file)
+ return
+ }
+
+ if value.Type == Map {
+ for _, p := range value.MapValue {
+ sortListsInValue(p.Value, file)
+ }
+ return
+ } else if value.Type != List {
+ return
+ }
+
+ SortList(file, value)
+}
+
+func sortSubList(values []Value, nextPos scanner.Position, file *File) {
+ l := make(elemList, len(values))
+ for i, v := range values {
+ if v.Type != String {
+ panic("list contains non-string element")
+ }
+ n := nextPos
+ if i < len(values)-1 {
+ n = values[i+1].Pos
+ }
+ l[i] = elem{v.StringValue, i, v.Pos, n}
+ }
+
+ sort.Sort(l)
+
+ copyValues := append([]Value{}, values...)
+ copyComments := append([]Comment{}, file.Comments...)
+
+ curPos := values[0].Pos
+ for i, e := range l {
+ values[i] = copyValues[e.i]
+ values[i].Pos = curPos
+ for j, c := range copyComments {
+ if c.Pos.Offset > e.pos.Offset && c.Pos.Offset < e.nextPos.Offset {
+ file.Comments[j].Pos.Line = curPos.Line
+ file.Comments[j].Pos.Offset += values[i].Pos.Offset - e.pos.Offset
+ }
+ }
+
+ curPos.Offset += e.nextPos.Offset - e.pos.Offset
+ curPos.Line++
+ }
+}
+
+func subListIsSorted(values []Value) bool {
+ prev := ""
+ for _, v := range values {
+ if v.Type != String {
+ panic("list contains non-string element")
+ }
+ if prev > v.StringValue {
+ return false
+ }
+ prev = v.StringValue
+ }
+
+ return true
+}
+
+type elem struct {
+ s string
+ i int
+ pos scanner.Position
+ nextPos scanner.Position
+}
+
+type elemList []elem
+
+func (l elemList) Len() int {
+ return len(l)
+}
+
+func (l elemList) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l elemList) Less(i, j int) bool {
+ return l[i].s < l[j].s
+}
+
+type commentsByOffset []Comment
+
+func (l commentsByOffset) Len() int {
+ return len(l)
+}
+
+func (l commentsByOffset) Less(i, j int) bool {
+ return l[i].Pos.Offset < l[j].Pos.Offset
+}
+
+func (l commentsByOffset) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/blueprint/pathtools/glob.go b/blueprint/pathtools/glob.go
new file mode 100644
index 0000000000000000000000000000000000000000..55416436931592eb77031176ac990cd5c48a4d05
--- /dev/null
+++ b/blueprint/pathtools/glob.go
@@ -0,0 +1,289 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pathtools
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+var GlobMultipleRecursiveErr = errors.New("pattern contains multiple **")
+var GlobLastRecursiveErr = errors.New("pattern ** as last path element")
+
+// Glob returns the list of files that match the given pattern along with the
+// list of directories that were searched to construct the file list.
+// The supported glob patterns are equivalent to filepath.Glob, with an
+// extension that recursive glob (** matching zero or more complete path
+// entries) is supported.
+func Glob(pattern string) (matches, dirs []string, err error) {
+ return GlobWithExcludes(pattern, nil)
+}
+
+// GlobWithExcludes returns the list of files that match the given pattern but
+// do not match the given exclude patterns, along with the list of directories
+// that were searched to construct the file list. The supported glob and
+// exclude patterns are equivalent to filepath.Glob, with an extension that
+// recursive glob (** matching zero or more complete path entries) is supported.
+func GlobWithExcludes(pattern string, excludes []string) (matches, dirs []string, err error) {
+ if filepath.Base(pattern) == "**" {
+ return nil, nil, GlobLastRecursiveErr
+ } else {
+ matches, dirs, err = glob(pattern, false)
+ }
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ matches, err = filterExcludes(matches, excludes)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return matches, dirs, nil
+}
+
+// glob is a recursive helper function to handle globbing each level of the pattern individually,
+// allowing searched directories to be tracked. Also handles the recursive glob pattern, **.
+func glob(pattern string, hasRecursive bool) (matches, dirs []string, err error) {
+ if !isWild(pattern) {
+ // If there are no wilds in the pattern, check whether the file exists or not.
+ // Uses filepath.Glob instead of manually statting to get consistent results.
+ pattern = filepath.Clean(pattern)
+ matches, err = filepath.Glob(pattern)
+ if err != nil {
+ return matches, dirs, err
+ }
+
+ if len(matches) == 0 {
+ // Some part of the non-wild pattern didn't exist. Add the last existing directory
+ // as a dependency.
+ var matchDirs []string
+ for len(matchDirs) == 0 {
+ pattern, _ = saneSplit(pattern)
+ matchDirs, err = filepath.Glob(pattern)
+ if err != nil {
+ return matches, dirs, err
+ }
+ }
+ dirs = append(dirs, matchDirs...)
+ }
+ return matches, dirs, err
+ }
+
+ dir, file := saneSplit(pattern)
+
+ if file == "**" {
+ if hasRecursive {
+ return matches, dirs, GlobMultipleRecursiveErr
+ }
+ hasRecursive = true
+ }
+
+ dirMatches, dirs, err := glob(dir, hasRecursive)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ for _, m := range dirMatches {
+ if info, _ := os.Stat(m); info.IsDir() {
+ if file == "**" {
+ recurseDirs, err := walkAllDirs(m)
+ if err != nil {
+ return nil, nil, err
+ }
+ matches = append(matches, recurseDirs...)
+ } else {
+ dirs = append(dirs, m)
+ newMatches, err := filepath.Glob(filepath.Join(m, file))
+ if err != nil {
+ return nil, nil, err
+ }
+ matches = append(matches, newMatches...)
+ }
+ }
+ }
+
+ return matches, dirs, nil
+}
+
+// Faster version of dir, file := filepath.Dir(path), filepath.File(path) with no allocations
+// Similar to filepath.Split, but returns "." if dir is empty and trims trailing slash if dir is
+// not "/". Returns ".", "" if path is "."
+func saneSplit(path string) (dir, file string) {
+ if path == "." {
+ return ".", ""
+ }
+ dir, file = filepath.Split(path)
+ switch dir {
+ case "":
+ dir = "."
+ case "/":
+ // Nothing
+ default:
+ dir = dir[:len(dir)-1]
+ }
+ return dir, file
+}
+
+func isWild(pattern string) bool {
+ return strings.ContainsAny(pattern, "*?[")
+}
+
+// Returns a list of all directories under dir
+func walkAllDirs(dir string) (dirs []string, err error) {
+ err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if info.Mode().IsDir() {
+ dirs = append(dirs, path)
+ }
+ return nil
+ })
+
+ return dirs, err
+}
+
+// Filters the strings in matches based on the glob patterns in excludes. Hierarchical (a/*) and
+// recursive (**) glob patterns are supported.
+func filterExcludes(matches []string, excludes []string) ([]string, error) {
+ if len(excludes) == 0 {
+ return matches, nil
+ }
+
+ var ret []string
+matchLoop:
+ for _, m := range matches {
+ for _, e := range excludes {
+ exclude, err := match(e, m)
+ if err != nil {
+ return nil, err
+ }
+ if exclude {
+ continue matchLoop
+ }
+ }
+ ret = append(ret, m)
+ }
+
+ return ret, nil
+}
+
+// match returns true if name matches pattern using the same rules as filepath.Match, but supporting
+// hierarchical patterns (a/*) and recursive globs (**).
+func match(pattern, name string) (bool, error) {
+ if filepath.Base(pattern) == "**" {
+ return false, GlobLastRecursiveErr
+ }
+
+ for {
+ var patternFile, nameFile string
+ pattern, patternFile = saneSplit(pattern)
+ name, nameFile = saneSplit(name)
+
+ if patternFile == "**" {
+ return matchPrefix(pattern, filepath.Join(name, nameFile))
+ }
+
+ if nameFile == "" && patternFile == "" {
+ return true, nil
+ } else if nameFile == "" || patternFile == "" {
+ return false, nil
+ }
+
+ match, err := filepath.Match(patternFile, nameFile)
+ if err != nil || !match {
+ return match, err
+ }
+ }
+}
+
+// matchPrefix returns true if the beginning of name matches pattern using the same rules as
+// filepath.Match, but supporting hierarchical patterns (a/*). Recursive globs (**) are not
+// supported, they should have been handled in match().
+func matchPrefix(pattern, name string) (bool, error) {
+ if len(pattern) > 0 && pattern[0] == '/' {
+ if len(name) > 0 && name[0] == '/' {
+ pattern = pattern[1:]
+ name = name[1:]
+ } else {
+ return false, nil
+ }
+ }
+
+ for {
+ var patternElem, nameElem string
+ patternElem, pattern = saneSplitFirst(pattern)
+ nameElem, name = saneSplitFirst(name)
+
+ if patternElem == "." {
+ patternElem = ""
+ }
+ if nameElem == "." {
+ nameElem = ""
+ }
+
+ if patternElem == "**" {
+ return false, GlobMultipleRecursiveErr
+ }
+
+ if patternElem == "" {
+ return true, nil
+ } else if nameElem == "" {
+ return false, nil
+ }
+
+ match, err := filepath.Match(patternElem, nameElem)
+ if err != nil || !match {
+ return match, err
+ }
+ }
+}
+
+func saneSplitFirst(path string) (string, string) {
+ i := strings.IndexRune(path, filepath.Separator)
+ if i < 0 {
+ return path, ""
+ }
+ return path[:i], path[i+1:]
+}
+
+func GlobPatternList(patterns []string, prefix string) (globedList []string, depDirs []string, err error) {
+ var (
+ matches []string
+ deps []string
+ )
+
+ globedList = make([]string, 0)
+ depDirs = make([]string, 0)
+
+ for _, pattern := range patterns {
+ if isWild(pattern) {
+ matches, deps, err = Glob(filepath.Join(prefix, pattern))
+ if err != nil {
+ return nil, nil, err
+ }
+ globedList = append(globedList, matches...)
+ depDirs = append(depDirs, deps...)
+ } else {
+ globedList = append(globedList, filepath.Join(prefix, pattern))
+ }
+ }
+ return globedList, depDirs, nil
+}
diff --git a/blueprint/pathtools/glob_test.go b/blueprint/pathtools/glob_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..b082267dc9fa4419206d1ca67b5da0d32572dbf3
--- /dev/null
+++ b/blueprint/pathtools/glob_test.go
@@ -0,0 +1,455 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pathtools
+
+import (
+ "os"
+ "path/filepath"
+ "reflect"
+ "testing"
+)
+
+var pwd, _ = os.Getwd()
+
+var globTestCases = []struct {
+ pattern string
+ matches []string
+ excludes []string
+ dirs []string
+ err error
+}{
+ // Current directory tests
+ {
+ pattern: "*",
+ matches: []string{"a", "b", "c", "d.ext", "e.ext"},
+ dirs: []string{"."},
+ },
+ {
+ pattern: "*.ext",
+ matches: []string{"d.ext", "e.ext"},
+ dirs: []string{"."},
+ },
+ {
+ pattern: "*/a",
+ matches: []string{"a/a", "b/a"},
+ dirs: []string{".", "a", "b", "c"},
+ },
+ {
+ pattern: "*/*/a",
+ matches: []string{"a/a/a"},
+ dirs: []string{".", "a", "b", "c", "a/a", "a/b", "c/f", "c/g", "c/h"},
+ },
+ {
+ pattern: "*/a/a",
+ matches: []string{"a/a/a"},
+ dirs: []string{".", "a", "b", "c", "a/a"},
+ },
+
+ // ./ directory tests
+ {
+ pattern: "./*",
+ matches: []string{"a", "b", "c", "d.ext", "e.ext"},
+ dirs: []string{"."},
+ },
+ {
+ pattern: "./*.ext",
+ matches: []string{"d.ext", "e.ext"},
+ dirs: []string{"."},
+ },
+ {
+ pattern: "./*/a",
+ matches: []string{"a/a", "b/a"},
+ dirs: []string{".", "a", "b", "c"},
+ },
+ {
+ pattern: "./[ac]/a",
+ matches: []string{"a/a"},
+ dirs: []string{".", "a", "c"},
+ },
+
+ // subdirectory tests
+ {
+ pattern: "c/*/*.ext",
+ matches: []string{"c/f/f.ext", "c/g/g.ext"},
+ dirs: []string{"c", "c/f", "c/g", "c/h"},
+ },
+ {
+ pattern: "a/*/a",
+ matches: []string{"a/a/a"},
+ dirs: []string{"a", "a/a", "a/b"},
+ },
+
+ // absolute tests
+ {
+ pattern: filepath.Join(pwd, "testdata/c/*/*.ext"),
+ matches: []string{
+ filepath.Join(pwd, "testdata/c/f/f.ext"),
+ filepath.Join(pwd, "testdata/c/g/g.ext"),
+ },
+ dirs: []string{
+ filepath.Join(pwd, "testdata/c"),
+ filepath.Join(pwd, "testdata/c/f"),
+ filepath.Join(pwd, "testdata/c/g"),
+ filepath.Join(pwd, "testdata/c/h"),
+ },
+ },
+
+ // no-wild tests
+ {
+ pattern: "a",
+ matches: []string{"a"},
+ dirs: nil,
+ },
+ {
+ pattern: "a/a",
+ matches: []string{"a/a"},
+ dirs: nil,
+ },
+
+ // clean tests
+ {
+ pattern: "./c/*/*.ext",
+ matches: []string{"c/f/f.ext", "c/g/g.ext"},
+ dirs: []string{"c", "c/f", "c/g", "c/h"},
+ },
+ {
+ pattern: "c/../c/*/*.ext",
+ matches: []string{"c/f/f.ext", "c/g/g.ext"},
+ dirs: []string{"c", "c/f", "c/g", "c/h"},
+ },
+
+ // recursive tests
+ {
+ pattern: "**/a",
+ matches: []string{"a", "a/a", "a/a/a", "b/a"},
+ dirs: []string{".", "a", "a/a", "a/b", "b", "c", "c/f", "c/g", "c/h"},
+ },
+ {
+ pattern: "a/**/a",
+ matches: []string{"a/a", "a/a/a"},
+ dirs: []string{"a", "a/a", "a/b"},
+ },
+ {
+ pattern: "a/**/*",
+ matches: []string{"a/a", "a/b", "a/a/a", "a/b/b"},
+ dirs: []string{"a", "a/a", "a/b"},
+ },
+
+ // absolute recursive tests
+ {
+ pattern: filepath.Join(pwd, "testdata/**/*.ext"),
+ matches: []string{
+ filepath.Join(pwd, "testdata/d.ext"),
+ filepath.Join(pwd, "testdata/e.ext"),
+ filepath.Join(pwd, "testdata/c/f/f.ext"),
+ filepath.Join(pwd, "testdata/c/g/g.ext"),
+ },
+ dirs: []string{
+ filepath.Join(pwd, "testdata"),
+ filepath.Join(pwd, "testdata/a"),
+ filepath.Join(pwd, "testdata/a/a"),
+ filepath.Join(pwd, "testdata/a/b"),
+ filepath.Join(pwd, "testdata/b"),
+ filepath.Join(pwd, "testdata/c"),
+ filepath.Join(pwd, "testdata/c/f"),
+ filepath.Join(pwd, "testdata/c/g"),
+ filepath.Join(pwd, "testdata/c/h"),
+ },
+ },
+
+ // recursive error tests
+ {
+ pattern: "**/**/*",
+ err: GlobMultipleRecursiveErr,
+ },
+ {
+ pattern: "a/**/**/*",
+ err: GlobMultipleRecursiveErr,
+ },
+ {
+ pattern: "**/a/**/*",
+ err: GlobMultipleRecursiveErr,
+ },
+ {
+ pattern: "**/**/a/*",
+ err: GlobMultipleRecursiveErr,
+ },
+ {
+ pattern: "a/**",
+ err: GlobLastRecursiveErr,
+ },
+ {
+ pattern: "**/**",
+ err: GlobLastRecursiveErr,
+ },
+
+ // exclude tests
+ {
+ pattern: "*.ext",
+ excludes: []string{"d.ext"},
+ matches: []string{"e.ext"},
+ dirs: []string{"."},
+ },
+ {
+ pattern: "*/*",
+ excludes: []string{"a/b"},
+ matches: []string{"a/a", "b/a", "c/c", "c/f", "c/g", "c/h"},
+ dirs: []string{".", "a", "b", "c"},
+ },
+ {
+ pattern: "*/*",
+ excludes: []string{"a/b", "c/c"},
+ matches: []string{"a/a", "b/a", "c/f", "c/g", "c/h"},
+ dirs: []string{".", "a", "b", "c"},
+ },
+ {
+ pattern: "*/*",
+ excludes: []string{"c/*", "*/a"},
+ matches: []string{"a/b"},
+ dirs: []string{".", "a", "b", "c"},
+ },
+ {
+ pattern: "*/*",
+ excludes: []string{"*/*"},
+ matches: nil,
+ dirs: []string{".", "a", "b", "c"},
+ },
+
+ // absolute exclude tests
+ {
+ pattern: filepath.Join(pwd, "testdata/c/*/*.ext"),
+ excludes: []string{filepath.Join(pwd, "testdata/c/*/f.ext")},
+ matches: []string{
+ filepath.Join(pwd, "testdata/c/g/g.ext"),
+ },
+ dirs: []string{
+ filepath.Join(pwd, "testdata/c"),
+ filepath.Join(pwd, "testdata/c/f"),
+ filepath.Join(pwd, "testdata/c/g"),
+ filepath.Join(pwd, "testdata/c/h"),
+ },
+ },
+ {
+ pattern: filepath.Join(pwd, "testdata/c/*/*.ext"),
+ excludes: []string{filepath.Join(pwd, "testdata/c/f/*.ext")},
+ matches: []string{
+ filepath.Join(pwd, "testdata/c/g/g.ext"),
+ },
+ dirs: []string{
+ filepath.Join(pwd, "testdata/c"),
+ filepath.Join(pwd, "testdata/c/f"),
+ filepath.Join(pwd, "testdata/c/g"),
+ filepath.Join(pwd, "testdata/c/h"),
+ },
+ },
+
+ // recursive exclude tests
+ {
+ pattern: "*.ext",
+ excludes: []string{"**/*.ext"},
+ matches: nil,
+ dirs: []string{"."},
+ },
+ {
+ pattern: "*/*",
+ excludes: []string{"**/b"},
+ matches: []string{"a/a", "b/a", "c/c", "c/f", "c/g", "c/h"},
+ dirs: []string{".", "a", "b", "c"},
+ },
+ {
+ pattern: "*/*",
+ excludes: []string{"a/**/*"},
+ matches: []string{"b/a", "c/c", "c/f", "c/g", "c/h"},
+ dirs: []string{".", "a", "b", "c"},
+ },
+ {
+ pattern: "**/*",
+ excludes: []string{"**/*"},
+ matches: nil,
+ dirs: []string{".", "a", "a/a", "a/b", "b", "c", "c/f", "c/g", "c/h"},
+ },
+ {
+ pattern: "*/*/*",
+ excludes: []string{"a/**/a"},
+ matches: []string{"a/b/b", "c/f/f.ext", "c/g/g.ext", "c/h/h"},
+ dirs: []string{".", "a", "b", "c", "a/a", "a/b", "c/f", "c/g", "c/h"},
+ },
+ {
+ pattern: "*/*/*",
+ excludes: []string{"**/a"},
+ matches: []string{"a/b/b", "c/f/f.ext", "c/g/g.ext", "c/h/h"},
+ dirs: []string{".", "a", "b", "c", "a/a", "a/b", "c/f", "c/g", "c/h"},
+ },
+ {
+ pattern: "c/*/*.ext",
+ excludes: []string{"c/**/f.ext"},
+ matches: []string{"c/g/g.ext"},
+ dirs: []string{"c", "c/f", "c/g", "c/h"},
+ },
+
+ // absoulte recursive exclude tests
+ {
+ pattern: filepath.Join(pwd, "testdata/c/*/*.ext"),
+ excludes: []string{filepath.Join(pwd, "testdata/**/f.ext")},
+ matches: []string{
+ filepath.Join(pwd, "testdata/c/g/g.ext"),
+ },
+ dirs: []string{
+ filepath.Join(pwd, "testdata/c"),
+ filepath.Join(pwd, "testdata/c/f"),
+ filepath.Join(pwd, "testdata/c/g"),
+ filepath.Join(pwd, "testdata/c/h"),
+ },
+ },
+
+ // clean exclude tests
+ {
+ pattern: "./c/*/*.ext",
+ excludes: []string{"./c/*/f.ext"},
+ matches: []string{"c/g/g.ext"},
+ dirs: []string{"c", "c/f", "c/g", "c/h"},
+ },
+ {
+ pattern: "c/*/*.ext",
+ excludes: []string{"./c/*/f.ext"},
+ matches: []string{"c/g/g.ext"},
+ dirs: []string{"c", "c/f", "c/g", "c/h"},
+ },
+ {
+ pattern: "./c/*/*.ext",
+ excludes: []string{"c/*/f.ext"},
+ matches: []string{"c/g/g.ext"},
+ dirs: []string{"c", "c/f", "c/g", "c/h"},
+ },
+
+ // non-existant non-wild path tests
+ {
+ pattern: "d/*",
+ matches: nil,
+ dirs: []string{"."},
+ },
+ {
+ pattern: "d",
+ matches: nil,
+ dirs: []string{"."},
+ },
+ {
+ pattern: "a/d/*",
+ matches: nil,
+ dirs: []string{"a"},
+ },
+ {
+ pattern: "a/d",
+ matches: nil,
+ dirs: []string{"a"},
+ },
+ {
+ pattern: "a/a/d/*",
+ matches: nil,
+ dirs: []string{"a/a"},
+ },
+ {
+ pattern: "a/a/d",
+ matches: nil,
+ dirs: []string{"a/a"},
+ },
+ {
+ pattern: "a/d/a/*",
+ matches: nil,
+ dirs: []string{"a"},
+ },
+ {
+ pattern: "a/d/a",
+ matches: nil,
+ dirs: []string{"a"},
+ },
+ {
+ pattern: "a/d/a/*/a",
+ matches: nil,
+ dirs: []string{"a"},
+ },
+ {
+ pattern: "a/d/a/**/a",
+ matches: nil,
+ dirs: []string{"a"},
+ },
+
+ // recursive exclude error tests
+ {
+ pattern: "**/*",
+ excludes: []string{"**/**/*"},
+ err: GlobMultipleRecursiveErr,
+ },
+ {
+ pattern: "**/*",
+ excludes: []string{"a/**/**/*"},
+ err: GlobMultipleRecursiveErr,
+ },
+ {
+ pattern: "**/*",
+ excludes: []string{"**/a/**/*"},
+ err: GlobMultipleRecursiveErr,
+ },
+ {
+ pattern: "**/*",
+ excludes: []string{"**/**/a/*"},
+ err: GlobMultipleRecursiveErr,
+ },
+ {
+ pattern: "**/*",
+ excludes: []string{"a/**"},
+ err: GlobLastRecursiveErr,
+ },
+ {
+ pattern: "**/*",
+ excludes: []string{"**/**"},
+ err: GlobLastRecursiveErr,
+ },
+}
+
+func TestGlob(t *testing.T) {
+ os.Chdir("testdata")
+ defer os.Chdir("..")
+ for _, testCase := range globTestCases {
+ matches, dirs, err := GlobWithExcludes(testCase.pattern, testCase.excludes)
+ if err != testCase.err {
+ t.Errorf(" pattern: %q", testCase.pattern)
+ if testCase.excludes != nil {
+ t.Errorf("excludes: %q", testCase.excludes)
+ }
+ t.Errorf(" error: %s", err)
+ continue
+ }
+
+ if !reflect.DeepEqual(matches, testCase.matches) {
+ t.Errorf("incorrect matches list:")
+ t.Errorf(" pattern: %q", testCase.pattern)
+ if testCase.excludes != nil {
+ t.Errorf("excludes: %q", testCase.excludes)
+ }
+ t.Errorf(" got: %#v", matches)
+ t.Errorf("expected: %#v", testCase.matches)
+ }
+ if !reflect.DeepEqual(dirs, testCase.dirs) {
+ t.Errorf("incorrect dirs list:")
+ t.Errorf(" pattern: %q", testCase.pattern)
+ if testCase.excludes != nil {
+ t.Errorf("excludes: %q", testCase.excludes)
+ }
+ t.Errorf(" got: %#v", dirs)
+ t.Errorf("expected: %#v", testCase.dirs)
+ }
+ }
+}
diff --git a/blueprint/pathtools/lists.go b/blueprint/pathtools/lists.go
new file mode 100644
index 0000000000000000000000000000000000000000..fbde88a243437caa7525ece6bcb3b612ea4b7d85
--- /dev/null
+++ b/blueprint/pathtools/lists.go
@@ -0,0 +1,47 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pathtools
+
+import (
+ "path/filepath"
+ "strings"
+)
+
+// PrefixPaths returns a list of paths consisting of prefix joined with each
+// element of paths. The resulting paths are "clean" in the filepath.Clean
+// sense.
+func PrefixPaths(paths []string, prefix string) []string {
+ result := make([]string, len(paths))
+ for i, path := range paths {
+ result[i] = filepath.Join(prefix, path)
+ }
+ return result
+}
+
+func ReplaceExtensions(paths []string, extension string) []string {
+ result := make([]string, len(paths))
+ for i, path := range paths {
+ result[i] = ReplaceExtension(path, extension)
+ }
+ return result
+}
+
+func ReplaceExtension(path string, extension string) string {
+ dot := strings.LastIndex(path, ".")
+ if dot == -1 {
+ return path
+ }
+ return path[:dot+1] + extension
+}
diff --git a/blueprint/pathtools/testdata/a/a/a b/blueprint/pathtools/testdata/a/a/a
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/blueprint/pathtools/testdata/a/b/b b/blueprint/pathtools/testdata/a/b/b
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/blueprint/pathtools/testdata/b/a b/blueprint/pathtools/testdata/b/a
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/blueprint/pathtools/testdata/c/c b/blueprint/pathtools/testdata/c/c
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/blueprint/pathtools/testdata/c/f/f.ext b/blueprint/pathtools/testdata/c/f/f.ext
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/blueprint/pathtools/testdata/c/g/g.ext b/blueprint/pathtools/testdata/c/g/g.ext
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/blueprint/pathtools/testdata/c/h/h b/blueprint/pathtools/testdata/c/h/h
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/blueprint/pathtools/testdata/d.ext b/blueprint/pathtools/testdata/d.ext
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/blueprint/pathtools/testdata/e.ext b/blueprint/pathtools/testdata/e.ext
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/blueprint/proptools/clone.go b/blueprint/proptools/clone.go
new file mode 100644
index 0000000000000000000000000000000000000000..3b23a5706d4cd858588b79141e7d0a42064c3546
--- /dev/null
+++ b/blueprint/proptools/clone.go
@@ -0,0 +1,252 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proptools
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func CloneProperties(structValue reflect.Value) reflect.Value {
+ result := reflect.New(structValue.Type())
+ CopyProperties(result.Elem(), structValue)
+ return result
+}
+
+func CopyProperties(dstValue, srcValue reflect.Value) {
+ typ := dstValue.Type()
+ if srcValue.Type() != typ {
+ panic(fmt.Errorf("can't copy mismatching types (%s <- %s)",
+ dstValue.Kind(), srcValue.Kind()))
+ }
+
+ for i := 0; i < srcValue.NumField(); i++ {
+ field := typ.Field(i)
+ if field.PkgPath != "" {
+ // The field is not exported so just skip it.
+ continue
+ }
+
+ srcFieldValue := srcValue.Field(i)
+ dstFieldValue := dstValue.Field(i)
+ dstFieldInterfaceValue := reflect.Value{}
+
+ switch srcFieldValue.Kind() {
+ case reflect.Bool, reflect.String, reflect.Int, reflect.Uint:
+ dstFieldValue.Set(srcFieldValue)
+ case reflect.Struct:
+ CopyProperties(dstFieldValue, srcFieldValue)
+ case reflect.Slice:
+ if !srcFieldValue.IsNil() {
+ if field.Type.Elem().Kind() != reflect.String {
+ panic(fmt.Errorf("can't copy field %q: slice elements are not strings", field.Name))
+ }
+ if srcFieldValue != dstFieldValue {
+ newSlice := reflect.MakeSlice(field.Type, srcFieldValue.Len(),
+ srcFieldValue.Len())
+ reflect.Copy(newSlice, srcFieldValue)
+ dstFieldValue.Set(newSlice)
+ }
+ } else {
+ dstFieldValue.Set(srcFieldValue)
+ }
+ case reflect.Interface:
+ if srcFieldValue.IsNil() {
+ dstFieldValue.Set(srcFieldValue)
+ break
+ }
+
+ srcFieldValue = srcFieldValue.Elem()
+
+ if srcFieldValue.Kind() != reflect.Ptr {
+ panic(fmt.Errorf("can't clone field %q: interface refers to a non-pointer",
+ field.Name))
+ }
+ if srcFieldValue.Type().Elem().Kind() != reflect.Struct {
+ panic(fmt.Errorf("can't clone field %q: interface points to a non-struct",
+ field.Name))
+ }
+
+ if dstFieldValue.IsNil() || dstFieldValue.Elem().Type() != srcFieldValue.Type() {
+ // We can't use the existing destination allocation, so
+ // clone a new one.
+ newValue := reflect.New(srcFieldValue.Type()).Elem()
+ dstFieldValue.Set(newValue)
+ dstFieldInterfaceValue = dstFieldValue
+ dstFieldValue = newValue
+ } else {
+ dstFieldValue = dstFieldValue.Elem()
+ }
+ fallthrough
+ case reflect.Ptr:
+ if srcFieldValue.IsNil() {
+ dstFieldValue.Set(srcFieldValue)
+ break
+ }
+
+ srcFieldValue := srcFieldValue.Elem()
+
+ switch srcFieldValue.Kind() {
+ case reflect.Struct:
+ if !dstFieldValue.IsNil() {
+ // Re-use the existing allocation.
+ CopyProperties(dstFieldValue.Elem(), srcFieldValue)
+ break
+ } else {
+ newValue := CloneProperties(srcFieldValue)
+ if dstFieldInterfaceValue.IsValid() {
+ dstFieldInterfaceValue.Set(newValue)
+ } else {
+ dstFieldValue.Set(newValue)
+ }
+ }
+ case reflect.Bool, reflect.String:
+ newValue := reflect.New(srcFieldValue.Type())
+ newValue.Elem().Set(srcFieldValue)
+ dstFieldValue.Set(newValue)
+ default:
+ panic(fmt.Errorf("can't clone field %q: points to a %s",
+ field.Name, srcFieldValue.Kind()))
+ }
+ default:
+ panic(fmt.Errorf("unexpected kind for property struct field %q: %s",
+ field.Name, srcFieldValue.Kind()))
+ }
+ }
+}
+
+func ZeroProperties(structValue reflect.Value) {
+ typ := structValue.Type()
+
+ for i := 0; i < structValue.NumField(); i++ {
+ field := typ.Field(i)
+ if field.PkgPath != "" {
+ // The field is not exported so just skip it.
+ continue
+ }
+
+ fieldValue := structValue.Field(i)
+
+ switch fieldValue.Kind() {
+ case reflect.Bool, reflect.String, reflect.Slice, reflect.Int, reflect.Uint:
+ fieldValue.Set(reflect.Zero(fieldValue.Type()))
+ case reflect.Interface:
+ if fieldValue.IsNil() {
+ break
+ }
+
+ // We leave the pointer intact and zero out the struct that's
+ // pointed to.
+ fieldValue = fieldValue.Elem()
+ if fieldValue.Kind() != reflect.Ptr {
+ panic(fmt.Errorf("can't zero field %q: interface refers to a non-pointer",
+ field.Name))
+ }
+ if fieldValue.Type().Elem().Kind() != reflect.Struct {
+ panic(fmt.Errorf("can't zero field %q: interface points to a non-struct",
+ field.Name))
+ }
+ fallthrough
+ case reflect.Ptr:
+ switch fieldValue.Type().Elem().Kind() {
+ case reflect.Struct:
+ if fieldValue.IsNil() {
+ break
+ }
+ ZeroProperties(fieldValue.Elem())
+ case reflect.Bool, reflect.String:
+ fieldValue.Set(reflect.Zero(fieldValue.Type()))
+ default:
+ panic(fmt.Errorf("can't zero field %q: points to a %s",
+ field.Name, fieldValue.Elem().Kind()))
+ }
+ case reflect.Struct:
+ ZeroProperties(fieldValue)
+ default:
+ panic(fmt.Errorf("unexpected kind for property struct field %q: %s",
+ field.Name, fieldValue.Kind()))
+ }
+ }
+}
+
+func CloneEmptyProperties(structValue reflect.Value) reflect.Value {
+ result := reflect.New(structValue.Type())
+ cloneEmptyProperties(result.Elem(), structValue)
+ return result
+}
+
+func cloneEmptyProperties(dstValue, srcValue reflect.Value) {
+ typ := srcValue.Type()
+ for i := 0; i < srcValue.NumField(); i++ {
+ field := typ.Field(i)
+ if field.PkgPath != "" {
+ // The field is not exported so just skip it.
+ continue
+ }
+
+ srcFieldValue := srcValue.Field(i)
+ dstFieldValue := dstValue.Field(i)
+ dstFieldInterfaceValue := reflect.Value{}
+
+ switch srcFieldValue.Kind() {
+ case reflect.Bool, reflect.String, reflect.Slice, reflect.Int, reflect.Uint:
+ // Nothing
+ case reflect.Struct:
+ cloneEmptyProperties(dstFieldValue, srcFieldValue)
+ case reflect.Interface:
+ if srcFieldValue.IsNil() {
+ break
+ }
+
+ srcFieldValue = srcFieldValue.Elem()
+ if srcFieldValue.Kind() != reflect.Ptr {
+ panic(fmt.Errorf("can't clone empty field %q: interface refers to a non-pointer",
+ field.Name))
+ }
+ if srcFieldValue.Type().Elem().Kind() != reflect.Struct {
+ panic(fmt.Errorf("can't clone empty field %q: interface points to a non-struct",
+ field.Name))
+ }
+
+ newValue := reflect.New(srcFieldValue.Type()).Elem()
+ dstFieldValue.Set(newValue)
+ dstFieldInterfaceValue = dstFieldValue
+ dstFieldValue = newValue
+ fallthrough
+ case reflect.Ptr:
+ switch srcFieldValue.Type().Elem().Kind() {
+ case reflect.Struct:
+ if srcFieldValue.IsNil() {
+ break
+ }
+ newValue := CloneEmptyProperties(srcFieldValue.Elem())
+ if dstFieldInterfaceValue.IsValid() {
+ dstFieldInterfaceValue.Set(newValue)
+ } else {
+ dstFieldValue.Set(newValue)
+ }
+ case reflect.Bool, reflect.String:
+ // Nothing
+ default:
+ panic(fmt.Errorf("can't clone empty field %q: points to a %s",
+ field.Name, srcFieldValue.Elem().Kind()))
+ }
+
+ default:
+ panic(fmt.Errorf("unexpected kind for property struct field %q: %s",
+ field.Name, srcFieldValue.Kind()))
+ }
+ }
+}
diff --git a/blueprint/proptools/clone_test.go b/blueprint/proptools/clone_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..0679c124c5e440327f9460bd7579b663b80f6a66
--- /dev/null
+++ b/blueprint/proptools/clone_test.go
@@ -0,0 +1,479 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proptools
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+var clonePropertiesTestCases = []struct {
+ in interface{}
+ out interface{}
+ err error
+}{
+ // Valid inputs
+
+ {
+ // Clone bool
+ in: &struct{ B1, B2 bool }{
+ B1: true,
+ B2: false,
+ },
+ out: &struct{ B1, B2 bool }{
+ B1: true,
+ B2: false,
+ },
+ },
+ {
+ // Clone strings
+ in: &struct{ S string }{
+ S: "string1",
+ },
+ out: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ {
+ // Clone slice
+ in: &struct{ S []string }{
+ S: []string{"string1"},
+ },
+ out: &struct{ S []string }{
+ S: []string{"string1"},
+ },
+ },
+ {
+ // Clone empty slice
+ in: &struct{ S []string }{
+ S: []string{},
+ },
+ out: &struct{ S []string }{
+ S: []string{},
+ },
+ },
+ {
+ // Clone nil slice
+ in: &struct{ S []string }{},
+ out: &struct{ S []string }{},
+ },
+ {
+ // Clone pointer to bool
+ in: &struct{ B1, B2 *bool }{
+ B1: BoolPtr(true),
+ B2: BoolPtr(false),
+ },
+ out: &struct{ B1, B2 *bool }{
+ B1: BoolPtr(true),
+ B2: BoolPtr(false),
+ },
+ },
+ {
+ // Clone pointer to string
+ in: &struct{ S *string }{
+ S: StringPtr("string1"),
+ },
+ out: &struct{ S *string }{
+ S: StringPtr("string1"),
+ },
+ },
+ {
+ // Clone struct
+ in: &struct{ S struct{ S string } }{
+ S: struct{ S string }{
+ S: "string1",
+ },
+ },
+ out: &struct{ S struct{ S string } }{
+ S: struct{ S string }{
+ S: "string1",
+ },
+ },
+ },
+ {
+ // Clone struct pointer
+ in: &struct{ S *struct{ S string } }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ out: &struct{ S *struct{ S string } }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ },
+ {
+ // Clone interface
+ in: &struct{ S interface{} }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ out: &struct{ S interface{} }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ },
+ {
+ // Clone nested interface
+ in: &struct {
+ Nested struct{ S interface{} }
+ }{
+ Nested: struct{ S interface{} }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ },
+ out: &struct {
+ Nested struct{ S interface{} }
+ }{
+ Nested: struct{ S interface{} }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ },
+ }, {
+ // Empty struct
+ in: &struct{}{},
+ out: &struct{}{},
+ },
+ {
+ // Interface nil
+ in: &struct{ S interface{} }{
+ S: nil,
+ },
+ out: &struct{ S interface{} }{
+ S: nil,
+ },
+ },
+ {
+ // Interface pointer to nil
+ in: &struct{ S interface{} }{
+ S: (*struct{ S string })(nil),
+ },
+ out: &struct{ S interface{} }{
+ S: (*struct{ S string })(nil),
+ },
+ },
+ {
+ // Pointer nil
+ in: &struct{ S *struct{} }{
+ S: nil,
+ },
+ out: &struct{ S *struct{} }{
+ S: nil,
+ },
+ },
+ {
+ // Anonymous struct
+ in: &struct {
+ EmbeddedStruct
+ Nested struct{ EmbeddedStruct }
+ }{
+ EmbeddedStruct: EmbeddedStruct{
+ S: "string1",
+ },
+ Nested: struct{ EmbeddedStruct }{
+ EmbeddedStruct: EmbeddedStruct{
+ S: "string2",
+ },
+ },
+ },
+ out: &struct {
+ EmbeddedStruct
+ Nested struct{ EmbeddedStruct }
+ }{
+ EmbeddedStruct: EmbeddedStruct{
+ S: "string1",
+ },
+ Nested: struct{ EmbeddedStruct }{
+ EmbeddedStruct: EmbeddedStruct{
+ S: "string2",
+ },
+ },
+ },
+ },
+ {
+ // Anonymous interface
+ in: &struct {
+ EmbeddedInterface
+ Nested struct{ EmbeddedInterface }
+ }{
+ EmbeddedInterface: &struct{ S string }{
+ S: "string1",
+ },
+ Nested: struct{ EmbeddedInterface }{
+ EmbeddedInterface: &struct{ S string }{
+ S: "string2",
+ },
+ },
+ },
+ out: &struct {
+ EmbeddedInterface
+ Nested struct{ EmbeddedInterface }
+ }{
+ EmbeddedInterface: &struct{ S string }{
+ S: "string1",
+ },
+ Nested: struct{ EmbeddedInterface }{
+ EmbeddedInterface: &struct{ S string }{
+ S: "string2",
+ },
+ },
+ },
+ },
+}
+
+type EmbeddedStruct struct{ S string }
+type EmbeddedInterface interface{}
+
+func TestCloneProperties(t *testing.T) {
+ for _, testCase := range clonePropertiesTestCases {
+ testString := fmt.Sprintf("%s", testCase.in)
+
+ got := CloneProperties(reflect.ValueOf(testCase.in).Elem()).Interface()
+
+ if !reflect.DeepEqual(testCase.out, got) {
+ t.Errorf("test case %s", testString)
+ t.Errorf("incorrect output")
+ t.Errorf(" expected: %#v", testCase.out)
+ t.Errorf(" got: %#v", got)
+ }
+ }
+}
+
+var cloneEmptyPropertiesTestCases = []struct {
+ in interface{}
+ out interface{}
+ err error
+}{
+ // Valid inputs
+
+ {
+ // Clone bool
+ in: &struct{ B1, B2 bool }{
+ B1: true,
+ B2: false,
+ },
+ out: &struct{ B1, B2 bool }{},
+ },
+ {
+ // Clone strings
+ in: &struct{ S string }{
+ S: "string1",
+ },
+ out: &struct{ S string }{},
+ },
+ {
+ // Clone slice
+ in: &struct{ S []string }{
+ S: []string{"string1"},
+ },
+ out: &struct{ S []string }{},
+ },
+ {
+ // Clone empty slice
+ in: &struct{ S []string }{
+ S: []string{},
+ },
+ out: &struct{ S []string }{},
+ },
+ {
+ // Clone nil slice
+ in: &struct{ S []string }{},
+ out: &struct{ S []string }{},
+ },
+ {
+ // Clone pointer to bool
+ in: &struct{ B1, B2 *bool }{
+ B1: BoolPtr(true),
+ B2: BoolPtr(false),
+ },
+ out: &struct{ B1, B2 *bool }{},
+ },
+ {
+ // Clone pointer to string
+ in: &struct{ S *string }{
+ S: StringPtr("string1"),
+ },
+ out: &struct{ S *string }{},
+ },
+ {
+ // Clone struct
+ in: &struct{ S struct{ S string } }{
+ S: struct{ S string }{
+ S: "string1",
+ },
+ },
+ out: &struct{ S struct{ S string } }{
+ S: struct{ S string }{},
+ },
+ },
+ {
+ // Clone struct pointer
+ in: &struct{ S *struct{ S string } }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ out: &struct{ S *struct{ S string } }{
+ S: &struct{ S string }{},
+ },
+ },
+ {
+ // Clone interface
+ in: &struct{ S interface{} }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ out: &struct{ S interface{} }{
+ S: &struct{ S string }{},
+ },
+ },
+ {
+ // Clone nested interface
+ in: &struct {
+ Nested struct{ S interface{} }
+ }{
+ Nested: struct{ S interface{} }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ },
+ out: &struct {
+ Nested struct{ S interface{} }
+ }{
+ Nested: struct{ S interface{} }{
+ S: &struct{ S string }{},
+ },
+ },
+ },
+ {
+ // Empty struct
+ in: &struct{}{},
+ out: &struct{}{},
+ },
+ {
+ // Interface nil
+ in: &struct{ S interface{} }{
+ S: nil,
+ },
+ out: &struct{ S interface{} }{},
+ },
+ {
+ // Interface pointer to nil
+ in: &struct{ S interface{} }{
+ S: (*struct{ S string })(nil),
+ },
+ out: &struct{ S interface{} }{
+ S: (*struct{ S string })(nil),
+ },
+ },
+ {
+ // Pointer nil
+ in: &struct{ S *struct{} }{
+ S: nil,
+ },
+ out: &struct{ S *struct{} }{},
+ },
+ {
+ // Anonymous struct
+ in: &struct {
+ EmbeddedStruct
+ Nested struct{ EmbeddedStruct }
+ }{
+ EmbeddedStruct: EmbeddedStruct{
+ S: "string1",
+ },
+ Nested: struct{ EmbeddedStruct }{
+ EmbeddedStruct: EmbeddedStruct{
+ S: "string2",
+ },
+ },
+ },
+ out: &struct {
+ EmbeddedStruct
+ Nested struct{ EmbeddedStruct }
+ }{
+ EmbeddedStruct: EmbeddedStruct{},
+ Nested: struct{ EmbeddedStruct }{
+ EmbeddedStruct: EmbeddedStruct{},
+ },
+ },
+ },
+ {
+ // Anonymous interface
+ in: &struct {
+ EmbeddedInterface
+ Nested struct{ EmbeddedInterface }
+ }{
+ EmbeddedInterface: &struct{ S string }{
+ S: "string1",
+ },
+ Nested: struct{ EmbeddedInterface }{
+ EmbeddedInterface: &struct{ S string }{
+ S: "string2",
+ },
+ },
+ },
+ out: &struct {
+ EmbeddedInterface
+ Nested struct{ EmbeddedInterface }
+ }{
+ EmbeddedInterface: &struct{ S string }{},
+ Nested: struct{ EmbeddedInterface }{
+ EmbeddedInterface: &struct{ S string }{},
+ },
+ },
+ },
+}
+
+func TestCloneEmptyProperties(t *testing.T) {
+ for _, testCase := range cloneEmptyPropertiesTestCases {
+ testString := fmt.Sprintf("%#v", testCase.in)
+
+ got := CloneEmptyProperties(reflect.ValueOf(testCase.in).Elem()).Interface()
+
+ if !reflect.DeepEqual(testCase.out, got) {
+ t.Errorf("test case %s", testString)
+ t.Errorf("incorrect output")
+ t.Errorf(" expected: %#v", testCase.out)
+ t.Errorf(" got: %#v", got)
+ }
+ }
+}
+
+func TestZeroProperties(t *testing.T) {
+ for _, testCase := range cloneEmptyPropertiesTestCases {
+ testString := fmt.Sprintf("%#v", testCase.in)
+
+ got := CloneProperties(reflect.ValueOf(testCase.in).Elem()).Interface()
+ ZeroProperties(reflect.ValueOf(got).Elem())
+
+ if !reflect.DeepEqual(testCase.out, got) {
+ t.Errorf("test case %s", testString)
+ t.Errorf("incorrect output")
+ t.Errorf(" expected: %#v", testCase.out)
+ t.Errorf(" got: %#v", got)
+ }
+ }
+}
diff --git a/blueprint/proptools/extend.go b/blueprint/proptools/extend.go
new file mode 100644
index 0000000000000000000000000000000000000000..6ebbff3e6bfc7d5532c8fe7d7955c2df0f0cb119
--- /dev/null
+++ b/blueprint/proptools/extend.go
@@ -0,0 +1,355 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proptools
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// AppendProperties appends the values of properties in the property struct src to the property
+// struct dst. dst and src must be the same type, and both must be pointers to structs.
+//
+// The filter function can prevent individual properties from being appended by returning false, or
+// abort AppendProperties with an error by returning an error. Passing nil for filter will append
+// all properties.
+//
+// An error returned by AppendProperties that applies to a specific property will be an
+// *ExtendPropertyError, and can have the property name and error extracted from it.
+//
+// The append operation is defined as appending strings and slices of strings normally, OR-ing bool
+// values, replacing non-nil pointers to booleans or strings, and recursing into
+// embedded structs, pointers to structs, and interfaces containing
+// pointers to structs. Appending the zero value of a property will always be a no-op.
+func AppendProperties(dst interface{}, src interface{}, filter ExtendPropertyFilterFunc) error {
+ return extendProperties(dst, src, filter, false)
+}
+
+// PrependProperties prepends the values of properties in the property struct src to the property
+// struct dst. dst and src must be the same type, and both must be pointers to structs.
+//
+// The filter function can prevent individual properties from being prepended by returning false, or
+// abort PrependProperties with an error by returning an error. Passing nil for filter will prepend
+// all properties.
+//
+// An error returned by PrependProperties that applies to a specific property will be an
+// *ExtendPropertyError, and can have the property name and error extracted from it.
+//
+// The prepend operation is defined as prepending strings, and slices of strings normally, OR-ing
+// bool values, replacing non-nil pointers to booleans or strings, and recursing into
+// embedded structs, pointers to structs, and interfaces containing
+// pointers to structs. Prepending the zero value of a property will always be a no-op.
+func PrependProperties(dst interface{}, src interface{}, filter ExtendPropertyFilterFunc) error {
+ return extendProperties(dst, src, filter, true)
+}
+
+// AppendMatchingProperties appends the values of properties in the property struct src to the
+// property structs in dst. dst and src do not have to be the same type, but every property in src
+// must be found in at least one property in dst. dst must be a slice of pointers to structs, and
+// src must be a pointer to a struct.
+//
+// The filter function can prevent individual properties from being appended by returning false, or
+// abort AppendProperties with an error by returning an error. Passing nil for filter will append
+// all properties.
+//
+// An error returned by AppendMatchingProperties that applies to a specific property will be an
+// *ExtendPropertyError, and can have the property name and error extracted from it.
+//
+// The append operation is defined as appending strings, and slices of strings normally, OR-ing bool
+// values, replacing non-nil pointers to booleans or strings, and recursing into
+// embedded structs, pointers to structs, and interfaces containing
+// pointers to structs. Appending the zero value of a property will always be a no-op.
+func AppendMatchingProperties(dst []interface{}, src interface{},
+ filter ExtendPropertyFilterFunc) error {
+ return extendMatchingProperties(dst, src, filter, false)
+}
+
+// PrependMatchingProperties prepends the values of properties in the property struct src to the
+// property structs in dst. dst and src do not have to be the same type, but every property in src
+// must be found in at least one property in dst. dst must be a slice of pointers to structs, and
+// src must be a pointer to a struct.
+//
+// The filter function can prevent individual properties from being prepended by returning false, or
+// abort PrependProperties with an error by returning an error. Passing nil for filter will prepend
+// all properties.
+//
+// An error returned by PrependProperties that applies to a specific property will be an
+// *ExtendPropertyError, and can have the property name and error extracted from it.
+//
+// The prepend operation is defined as prepending strings, and slices of strings normally, OR-ing
+// bool values, replacing non-nil pointers to booleans or strings, and recursing into
+// embedded structs, pointers to structs, and interfaces containing
+// pointers to structs. Prepending the zero value of a property will always be a no-op.
+func PrependMatchingProperties(dst []interface{}, src interface{},
+ filter ExtendPropertyFilterFunc) error {
+ return extendMatchingProperties(dst, src, filter, true)
+}
+
+type ExtendPropertyFilterFunc func(property string,
+ dstField, srcField reflect.StructField,
+ dstValue, srcValue interface{}) (bool, error)
+
+type ExtendPropertyError struct {
+ Err error
+ Property string
+}
+
+func (e *ExtendPropertyError) Error() string {
+ return fmt.Sprintf("can't extend property %q: %s", e.Property, e.Err)
+}
+
+func extendPropertyErrorf(property string, format string, a ...interface{}) *ExtendPropertyError {
+ return &ExtendPropertyError{
+ Err: fmt.Errorf(format, a...),
+ Property: property,
+ }
+}
+
+func extendProperties(dst interface{}, src interface{}, filter ExtendPropertyFilterFunc,
+ prepend bool) error {
+
+ dstValue, err := getStruct(dst)
+ if err != nil {
+ return err
+ }
+ srcValue, err := getStruct(src)
+ if err != nil {
+ return err
+ }
+
+ if dstValue.Type() != srcValue.Type() {
+ return fmt.Errorf("expected matching types for dst and src, got %T and %T", dst, src)
+ }
+
+ dstValues := []reflect.Value{dstValue}
+
+ return extendPropertiesRecursive(dstValues, srcValue, "", filter, true, prepend)
+}
+
+func extendMatchingProperties(dst []interface{}, src interface{}, filter ExtendPropertyFilterFunc,
+ prepend bool) error {
+
+ dstValues := make([]reflect.Value, len(dst))
+ for i := range dst {
+ var err error
+ dstValues[i], err = getStruct(dst[i])
+ if err != nil {
+ return err
+ }
+ }
+
+ srcValue, err := getStruct(src)
+ if err != nil {
+ return err
+ }
+
+ return extendPropertiesRecursive(dstValues, srcValue, "", filter, false, prepend)
+}
+
+func extendPropertiesRecursive(dstValues []reflect.Value, srcValue reflect.Value,
+ prefix string, filter ExtendPropertyFilterFunc, sameTypes, prepend bool) error {
+
+ srcType := srcValue.Type()
+ for i := 0; i < srcValue.NumField(); i++ {
+ srcField := srcType.Field(i)
+ if srcField.PkgPath != "" {
+ // The field is not exported so just skip it.
+ continue
+ }
+ if HasTag(srcField, "blueprint", "mutated") {
+ continue
+ }
+
+ propertyName := prefix + PropertyNameForField(srcField.Name)
+ srcFieldValue := srcValue.Field(i)
+
+ found := false
+ for _, dstValue := range dstValues {
+ dstType := dstValue.Type()
+ var dstField reflect.StructField
+
+ if dstType == srcType {
+ dstField = dstType.Field(i)
+ } else {
+ var ok bool
+ dstField, ok = dstType.FieldByName(srcField.Name)
+ if !ok {
+ continue
+ }
+ }
+
+ found = true
+
+ dstFieldValue := dstValue.FieldByIndex(dstField.Index)
+
+ if srcFieldValue.Kind() != dstFieldValue.Kind() {
+ return extendPropertyErrorf(propertyName, "mismatched types %s and %s",
+ dstFieldValue.Type(), srcFieldValue.Type())
+ }
+
+ switch srcFieldValue.Kind() {
+ case reflect.Interface:
+ if dstFieldValue.IsNil() != srcFieldValue.IsNil() {
+ return extendPropertyErrorf(propertyName, "nilitude mismatch")
+ }
+ if dstFieldValue.IsNil() {
+ continue
+ }
+
+ dstFieldValue = dstFieldValue.Elem()
+ srcFieldValue = srcFieldValue.Elem()
+
+ if srcFieldValue.Kind() != reflect.Ptr || dstFieldValue.Kind() != reflect.Ptr {
+ return extendPropertyErrorf(propertyName, "interface not a pointer")
+ }
+
+ fallthrough
+ case reflect.Ptr:
+ ptrKind := srcFieldValue.Type().Elem().Kind()
+ if ptrKind == reflect.Bool || ptrKind == reflect.String {
+ if srcFieldValue.Type() != dstFieldValue.Type() {
+ return extendPropertyErrorf(propertyName, "mismatched pointer types %s and %s",
+ dstFieldValue.Type(), srcFieldValue.Type())
+ }
+ break
+ } else if ptrKind != reflect.Struct {
+ return extendPropertyErrorf(propertyName, "pointer is a %s", ptrKind)
+ }
+
+ // Pointer to a struct
+ if dstFieldValue.IsNil() != srcFieldValue.IsNil() {
+ return extendPropertyErrorf(propertyName, "nilitude mismatch")
+ }
+ if dstFieldValue.IsNil() {
+ continue
+ }
+
+ dstFieldValue = dstFieldValue.Elem()
+ srcFieldValue = srcFieldValue.Elem()
+
+ fallthrough
+ case reflect.Struct:
+ if sameTypes && dstFieldValue.Type() != srcFieldValue.Type() {
+ return extendPropertyErrorf(propertyName, "mismatched types %s and %s",
+ dstFieldValue.Type(), srcFieldValue.Type())
+ }
+
+ // Recursively extend the struct's fields.
+ err := extendPropertiesRecursive([]reflect.Value{dstFieldValue}, srcFieldValue,
+ propertyName+".", filter, sameTypes, prepend)
+ if err != nil {
+ return err
+ }
+ continue
+ case reflect.Bool, reflect.String, reflect.Slice:
+ if srcFieldValue.Type() != dstFieldValue.Type() {
+ return extendPropertyErrorf(propertyName, "mismatched types %s and %s",
+ dstFieldValue.Type(), srcFieldValue.Type())
+ }
+ default:
+ return extendPropertyErrorf(propertyName, "unsupported kind %s",
+ srcFieldValue.Kind())
+ }
+
+ if filter != nil {
+ b, err := filter(propertyName, dstField, srcField,
+ dstFieldValue.Interface(), srcFieldValue.Interface())
+ if err != nil {
+ return &ExtendPropertyError{
+ Property: propertyName,
+ Err: err,
+ }
+ }
+ if !b {
+ continue
+ }
+ }
+
+ switch srcFieldValue.Kind() {
+ case reflect.Bool:
+ // Boolean OR
+ dstFieldValue.Set(reflect.ValueOf(srcFieldValue.Bool() || dstFieldValue.Bool()))
+ case reflect.String:
+ // Append the extension string.
+ if prepend {
+ dstFieldValue.SetString(srcFieldValue.String() +
+ dstFieldValue.String())
+ } else {
+ dstFieldValue.SetString(dstFieldValue.String() +
+ srcFieldValue.String())
+ }
+ case reflect.Slice:
+ if srcFieldValue.IsNil() {
+ break
+ }
+
+ newSlice := reflect.MakeSlice(srcFieldValue.Type(), 0,
+ dstFieldValue.Len()+srcFieldValue.Len())
+ if prepend {
+ newSlice = reflect.AppendSlice(newSlice, srcFieldValue)
+ newSlice = reflect.AppendSlice(newSlice, dstFieldValue)
+ } else {
+ newSlice = reflect.AppendSlice(newSlice, dstFieldValue)
+ newSlice = reflect.AppendSlice(newSlice, srcFieldValue)
+ }
+ dstFieldValue.Set(newSlice)
+ case reflect.Ptr:
+ if srcFieldValue.IsNil() {
+ break
+ }
+
+ switch ptrKind := srcFieldValue.Type().Elem().Kind(); ptrKind {
+ case reflect.Bool:
+ if prepend {
+ if dstFieldValue.IsNil() {
+ dstFieldValue.Set(reflect.ValueOf(BoolPtr(srcFieldValue.Elem().Bool())))
+ }
+ } else {
+ // For append, replace the original value.
+ dstFieldValue.Set(reflect.ValueOf(BoolPtr(srcFieldValue.Elem().Bool())))
+ }
+ case reflect.String:
+ if prepend {
+ if dstFieldValue.IsNil() {
+ dstFieldValue.Set(reflect.ValueOf(StringPtr(srcFieldValue.Elem().String())))
+ }
+ } else {
+ // For append, replace the original value.
+ dstFieldValue.Set(reflect.ValueOf(StringPtr(srcFieldValue.Elem().String())))
+ }
+ default:
+ panic(fmt.Errorf("unexpected pointer kind %s", ptrKind))
+ }
+ }
+ }
+ if !found {
+ return extendPropertyErrorf(propertyName, "failed to find property to extend")
+ }
+ }
+
+ return nil
+}
+
+func getStruct(in interface{}) (reflect.Value, error) {
+ value := reflect.ValueOf(in)
+ if value.Kind() != reflect.Ptr {
+ return reflect.Value{}, fmt.Errorf("expected pointer to struct, got %T", in)
+ }
+ value = value.Elem()
+ if value.Kind() != reflect.Struct {
+ return reflect.Value{}, fmt.Errorf("expected pointer to struct, got %T", in)
+ }
+ return value, nil
+}
diff --git a/blueprint/proptools/extend_test.go b/blueprint/proptools/extend_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..edf28fd5a6541a3db13154486984bcb5876dd8d2
--- /dev/null
+++ b/blueprint/proptools/extend_test.go
@@ -0,0 +1,1016 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proptools
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+var appendPropertiesTestCases = []struct {
+ in1 interface{}
+ in2 interface{}
+ out interface{}
+ prepend bool
+ filter ExtendPropertyFilterFunc
+ err error
+}{
+ // Valid inputs
+
+ {
+ // Append bool
+ in1: &struct{ B1, B2, B3, B4 bool }{
+ B1: true,
+ B2: false,
+ B3: true,
+ B4: false,
+ },
+ in2: &struct{ B1, B2, B3, B4 bool }{
+ B1: true,
+ B2: true,
+ B3: false,
+ B4: false,
+ },
+ out: &struct{ B1, B2, B3, B4 bool }{
+ B1: true,
+ B2: true,
+ B3: true,
+ B4: false,
+ },
+ },
+ {
+ // Prepend bool
+ in1: &struct{ B1, B2, B3, B4 bool }{
+ B1: true,
+ B2: false,
+ B3: true,
+ B4: false,
+ },
+ in2: &struct{ B1, B2, B3, B4 bool }{
+ B1: true,
+ B2: true,
+ B3: false,
+ B4: false,
+ },
+ out: &struct{ B1, B2, B3, B4 bool }{
+ B1: true,
+ B2: true,
+ B3: true,
+ B4: false,
+ },
+ prepend: true,
+ },
+ {
+ // Append strings
+ in1: &struct{ S string }{
+ S: "string1",
+ },
+ in2: &struct{ S string }{
+ S: "string2",
+ },
+ out: &struct{ S string }{
+ S: "string1string2",
+ },
+ },
+ {
+ // Prepend strings
+ in1: &struct{ S string }{
+ S: "string1",
+ },
+ in2: &struct{ S string }{
+ S: "string2",
+ },
+ out: &struct{ S string }{
+ S: "string2string1",
+ },
+ prepend: true,
+ },
+ {
+ // Append pointer to bool
+ in1: &struct{ B1, B2, B3, B4, B5, B6, B7, B8, B9 *bool }{
+ B1: BoolPtr(true),
+ B2: BoolPtr(false),
+ B3: nil,
+ B4: BoolPtr(true),
+ B5: BoolPtr(false),
+ B6: nil,
+ B7: BoolPtr(true),
+ B8: BoolPtr(false),
+ B9: nil,
+ },
+ in2: &struct{ B1, B2, B3, B4, B5, B6, B7, B8, B9 *bool }{
+ B1: nil,
+ B2: nil,
+ B3: nil,
+ B4: BoolPtr(true),
+ B5: BoolPtr(true),
+ B6: BoolPtr(true),
+ B7: BoolPtr(false),
+ B8: BoolPtr(false),
+ B9: BoolPtr(false),
+ },
+ out: &struct{ B1, B2, B3, B4, B5, B6, B7, B8, B9 *bool }{
+ B1: BoolPtr(true),
+ B2: BoolPtr(false),
+ B3: nil,
+ B4: BoolPtr(true),
+ B5: BoolPtr(true),
+ B6: BoolPtr(true),
+ B7: BoolPtr(false),
+ B8: BoolPtr(false),
+ B9: BoolPtr(false),
+ },
+ },
+ {
+ // Prepend pointer to bool
+ in1: &struct{ B1, B2, B3, B4, B5, B6, B7, B8, B9 *bool }{
+ B1: BoolPtr(true),
+ B2: BoolPtr(false),
+ B3: nil,
+ B4: BoolPtr(true),
+ B5: BoolPtr(false),
+ B6: nil,
+ B7: BoolPtr(true),
+ B8: BoolPtr(false),
+ B9: nil,
+ },
+ in2: &struct{ B1, B2, B3, B4, B5, B6, B7, B8, B9 *bool }{
+ B1: nil,
+ B2: nil,
+ B3: nil,
+ B4: BoolPtr(true),
+ B5: BoolPtr(true),
+ B6: BoolPtr(true),
+ B7: BoolPtr(false),
+ B8: BoolPtr(false),
+ B9: BoolPtr(false),
+ },
+ out: &struct{ B1, B2, B3, B4, B5, B6, B7, B8, B9 *bool }{
+ B1: BoolPtr(true),
+ B2: BoolPtr(false),
+ B3: nil,
+ B4: BoolPtr(true),
+ B5: BoolPtr(false),
+ B6: BoolPtr(true),
+ B7: BoolPtr(true),
+ B8: BoolPtr(false),
+ B9: BoolPtr(false),
+ },
+ prepend: true,
+ },
+ {
+ // Append pointer to strings
+ in1: &struct{ S1, S2, S3, S4 *string }{
+ S1: StringPtr("string1"),
+ S2: StringPtr("string2"),
+ },
+ in2: &struct{ S1, S2, S3, S4 *string }{
+ S1: StringPtr("string3"),
+ S3: StringPtr("string4"),
+ },
+ out: &struct{ S1, S2, S3, S4 *string }{
+ S1: StringPtr("string3"),
+ S2: StringPtr("string2"),
+ S3: StringPtr("string4"),
+ S4: nil,
+ },
+ },
+ {
+ // Prepend pointer to strings
+ in1: &struct{ S1, S2, S3, S4 *string }{
+ S1: StringPtr("string1"),
+ S2: StringPtr("string2"),
+ },
+ in2: &struct{ S1, S2, S3, S4 *string }{
+ S1: StringPtr("string3"),
+ S3: StringPtr("string4"),
+ },
+ out: &struct{ S1, S2, S3, S4 *string }{
+ S1: StringPtr("string1"),
+ S2: StringPtr("string2"),
+ S3: StringPtr("string4"),
+ S4: nil,
+ },
+ prepend: true,
+ },
+ {
+ // Append slice
+ in1: &struct{ S []string }{
+ S: []string{"string1"},
+ },
+ in2: &struct{ S []string }{
+ S: []string{"string2"},
+ },
+ out: &struct{ S []string }{
+ S: []string{"string1", "string2"},
+ },
+ },
+ {
+ // Prepend slice
+ in1: &struct{ S []string }{
+ S: []string{"string1"},
+ },
+ in2: &struct{ S []string }{
+ S: []string{"string2"},
+ },
+ out: &struct{ S []string }{
+ S: []string{"string2", "string1"},
+ },
+ prepend: true,
+ },
+ {
+ // Append empty slice
+ in1: &struct{ S1, S2 []string }{
+ S1: []string{"string1"},
+ S2: []string{},
+ },
+ in2: &struct{ S1, S2 []string }{
+ S1: []string{},
+ S2: []string{"string2"},
+ },
+ out: &struct{ S1, S2 []string }{
+ S1: []string{"string1"},
+ S2: []string{"string2"},
+ },
+ },
+ {
+ // Prepend empty slice
+ in1: &struct{ S1, S2 []string }{
+ S1: []string{"string1"},
+ S2: []string{},
+ },
+ in2: &struct{ S1, S2 []string }{
+ S1: []string{},
+ S2: []string{"string2"},
+ },
+ out: &struct{ S1, S2 []string }{
+ S1: []string{"string1"},
+ S2: []string{"string2"},
+ },
+ prepend: true,
+ },
+ {
+ // Append nil slice
+ in1: &struct{ S1, S2, S3 []string }{
+ S1: []string{"string1"},
+ },
+ in2: &struct{ S1, S2, S3 []string }{
+ S2: []string{"string2"},
+ },
+ out: &struct{ S1, S2, S3 []string }{
+ S1: []string{"string1"},
+ S2: []string{"string2"},
+ S3: nil,
+ },
+ },
+ {
+ // Prepend nil slice
+ in1: &struct{ S1, S2, S3 []string }{
+ S1: []string{"string1"},
+ },
+ in2: &struct{ S1, S2, S3 []string }{
+ S2: []string{"string2"},
+ },
+ out: &struct{ S1, S2, S3 []string }{
+ S1: []string{"string1"},
+ S2: []string{"string2"},
+ S3: nil,
+ },
+ prepend: true,
+ },
+ {
+ // Append pointer
+ in1: &struct{ S *struct{ S string } }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ in2: &struct{ S *struct{ S string } }{
+ S: &struct{ S string }{
+ S: "string2",
+ },
+ },
+ out: &struct{ S *struct{ S string } }{
+ S: &struct{ S string }{
+ S: "string1string2",
+ },
+ },
+ },
+ {
+ // Prepend pointer
+ in1: &struct{ S *struct{ S string } }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ in2: &struct{ S *struct{ S string } }{
+ S: &struct{ S string }{
+ S: "string2",
+ },
+ },
+ out: &struct{ S *struct{ S string } }{
+ S: &struct{ S string }{
+ S: "string2string1",
+ },
+ },
+ prepend: true,
+ },
+ {
+ // Append interface
+ in1: &struct{ S interface{} }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ in2: &struct{ S interface{} }{
+ S: &struct{ S string }{
+ S: "string2",
+ },
+ },
+ out: &struct{ S interface{} }{
+ S: &struct{ S string }{
+ S: "string1string2",
+ },
+ },
+ },
+ {
+ // Prepend interface
+ in1: &struct{ S interface{} }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ in2: &struct{ S interface{} }{
+ S: &struct{ S string }{
+ S: "string2",
+ },
+ },
+ out: &struct{ S interface{} }{
+ S: &struct{ S string }{
+ S: "string2string1",
+ },
+ },
+ prepend: true,
+ },
+ {
+ // Unexported field
+ in1: &struct{ s string }{
+ s: "string1",
+ },
+ in2: &struct{ s string }{
+ s: "string2",
+ },
+ out: &struct{ s string }{
+ s: "string1",
+ },
+ },
+ {
+ // Empty struct
+ in1: &struct{}{},
+ in2: &struct{}{},
+ out: &struct{}{},
+ },
+ {
+ // Interface nil
+ in1: &struct{ S interface{} }{
+ S: nil,
+ },
+ in2: &struct{ S interface{} }{
+ S: nil,
+ },
+ out: &struct{ S interface{} }{
+ S: nil,
+ },
+ },
+ {
+ // Pointer nil
+ in1: &struct{ S *struct{} }{
+ S: nil,
+ },
+ in2: &struct{ S *struct{} }{
+ S: nil,
+ },
+ out: &struct{ S *struct{} }{
+ S: nil,
+ },
+ },
+ {
+ // Anonymous struct
+ in1: &struct {
+ EmbeddedStruct
+ Nested struct{ EmbeddedStruct }
+ }{
+ EmbeddedStruct: EmbeddedStruct{
+ S: "string1",
+ },
+ Nested: struct{ EmbeddedStruct }{
+ EmbeddedStruct: EmbeddedStruct{
+ S: "string2",
+ },
+ },
+ },
+ in2: &struct {
+ EmbeddedStruct
+ Nested struct{ EmbeddedStruct }
+ }{
+ EmbeddedStruct: EmbeddedStruct{
+ S: "string3",
+ },
+ Nested: struct{ EmbeddedStruct }{
+ EmbeddedStruct: EmbeddedStruct{
+ S: "string4",
+ },
+ },
+ },
+ out: &struct {
+ EmbeddedStruct
+ Nested struct{ EmbeddedStruct }
+ }{
+ EmbeddedStruct: EmbeddedStruct{
+ S: "string1string3",
+ },
+ Nested: struct{ EmbeddedStruct }{
+ EmbeddedStruct: EmbeddedStruct{
+ S: "string2string4",
+ },
+ },
+ },
+ },
+ {
+ // Anonymous interface
+ in1: &struct {
+ EmbeddedInterface
+ Nested struct{ EmbeddedInterface }
+ }{
+ EmbeddedInterface: &struct{ S string }{
+ S: "string1",
+ },
+ Nested: struct{ EmbeddedInterface }{
+ EmbeddedInterface: &struct{ S string }{
+ S: "string2",
+ },
+ },
+ },
+ in2: &struct {
+ EmbeddedInterface
+ Nested struct{ EmbeddedInterface }
+ }{
+ EmbeddedInterface: &struct{ S string }{
+ S: "string3",
+ },
+ Nested: struct{ EmbeddedInterface }{
+ EmbeddedInterface: &struct{ S string }{
+ S: "string4",
+ },
+ },
+ },
+ out: &struct {
+ EmbeddedInterface
+ Nested struct{ EmbeddedInterface }
+ }{
+ EmbeddedInterface: &struct{ S string }{
+ S: "string1string3",
+ },
+ Nested: struct{ EmbeddedInterface }{
+ EmbeddedInterface: &struct{ S string }{
+ S: "string2string4",
+ },
+ },
+ },
+ },
+
+ // Errors
+
+ {
+ // Non-pointer in1
+ in1: struct{}{},
+ err: errors.New("expected pointer to struct, got struct {}"),
+ out: struct{}{},
+ },
+ {
+ // Non-pointer in2
+ in1: &struct{}{},
+ in2: struct{}{},
+ err: errors.New("expected pointer to struct, got struct {}"),
+ out: &struct{}{},
+ },
+ {
+ // Non-struct in1
+ in1: &[]string{"bad"},
+ err: errors.New("expected pointer to struct, got *[]string"),
+ out: &[]string{"bad"},
+ },
+ {
+ // Non-struct in2
+ in1: &struct{}{},
+ in2: &[]string{"bad"},
+ err: errors.New("expected pointer to struct, got *[]string"),
+ out: &struct{}{},
+ },
+ {
+ // Mismatched types
+ in1: &struct{ A string }{
+ A: "string1",
+ },
+ in2: &struct{ B string }{
+ B: "string2",
+ },
+ out: &struct{ A string }{
+ A: "string1",
+ },
+ err: errors.New("expected matching types for dst and src, got *struct { A string } and *struct { B string }"),
+ },
+ {
+ // Unsupported kind
+ in1: &struct{ I int }{
+ I: 1,
+ },
+ in2: &struct{ I int }{
+ I: 2,
+ },
+ out: &struct{ I int }{
+ I: 1,
+ },
+ err: extendPropertyErrorf("i", "unsupported kind int"),
+ },
+ {
+ // Interface nilitude mismatch
+ in1: &struct{ S interface{} }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ in2: &struct{ S interface{} }{
+ S: nil,
+ },
+ out: &struct{ S interface{} }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ err: extendPropertyErrorf("s", "nilitude mismatch"),
+ },
+ {
+ // Interface type mismatch
+ in1: &struct{ S interface{} }{
+ S: &struct{ A string }{
+ A: "string1",
+ },
+ },
+ in2: &struct{ S interface{} }{
+ S: &struct{ B string }{
+ B: "string2",
+ },
+ },
+ out: &struct{ S interface{} }{
+ S: &struct{ A string }{
+ A: "string1",
+ },
+ },
+ err: extendPropertyErrorf("s", "mismatched types struct { A string } and struct { B string }"),
+ },
+ {
+ // Interface not a pointer
+ in1: &struct{ S interface{} }{
+ S: struct{ S string }{
+ S: "string1",
+ },
+ },
+ in2: &struct{ S interface{} }{
+ S: struct{ S string }{
+ S: "string2",
+ },
+ },
+ out: &struct{ S interface{} }{
+ S: struct{ S string }{
+ S: "string1",
+ },
+ },
+ err: extendPropertyErrorf("s", "interface not a pointer"),
+ },
+ {
+ // Pointer nilitude mismatch
+ in1: &struct{ S *struct{ S string } }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ in2: &struct{ S *struct{ S string } }{
+ S: nil,
+ },
+ out: &struct{ S *struct{ S string } }{
+ S: &struct{ S string }{
+ S: "string1",
+ },
+ },
+ err: extendPropertyErrorf("s", "nilitude mismatch"),
+ },
+ {
+ // Pointer not a struct
+ in1: &struct{ S *[]string }{
+ S: &[]string{"string1"},
+ },
+ in2: &struct{ S *[]string }{
+ S: &[]string{"string2"},
+ },
+ out: &struct{ S *[]string }{
+ S: &[]string{"string1"},
+ },
+ err: extendPropertyErrorf("s", "pointer is a slice"),
+ },
+ {
+ // Error in nested struct
+ in1: &struct{ S interface{} }{
+ S: &struct{ I int }{
+ I: 1,
+ },
+ },
+ in2: &struct{ S interface{} }{
+ S: &struct{ I int }{
+ I: 2,
+ },
+ },
+ out: &struct{ S interface{} }{
+ S: &struct{ I int }{
+ I: 1,
+ },
+ },
+ err: extendPropertyErrorf("s.i", "unsupported kind int"),
+ },
+
+ // Filters
+
+ {
+ // Filter true
+ in1: &struct{ S string }{
+ S: "string1",
+ },
+ in2: &struct{ S string }{
+ S: "string2",
+ },
+ out: &struct{ S string }{
+ S: "string1string2",
+ },
+ filter: func(property string,
+ dstField, srcField reflect.StructField,
+ dstValue, srcValue interface{}) (bool, error) {
+ return true, nil
+ },
+ },
+ {
+ // Filter false
+ in1: &struct{ S string }{
+ S: "string1",
+ },
+ in2: &struct{ S string }{
+ S: "string2",
+ },
+ out: &struct{ S string }{
+ S: "string1",
+ },
+ filter: func(property string,
+ dstField, srcField reflect.StructField,
+ dstValue, srcValue interface{}) (bool, error) {
+ return false, nil
+ },
+ },
+ {
+ // Filter check args
+ in1: &struct{ S string }{
+ S: "string1",
+ },
+ in2: &struct{ S string }{
+ S: "string2",
+ },
+ out: &struct{ S string }{
+ S: "string1string2",
+ },
+ filter: func(property string,
+ dstField, srcField reflect.StructField,
+ dstValue, srcValue interface{}) (bool, error) {
+ return property == "s" &&
+ dstField.Name == "S" && srcField.Name == "S" &&
+ dstValue.(string) == "string1" && srcValue.(string) == "string2", nil
+ },
+ },
+ {
+ // Filter mutated
+ in1: &struct {
+ S string `blueprint:"mutated"`
+ }{
+ S: "string1",
+ },
+ in2: &struct {
+ S string `blueprint:"mutated"`
+ }{
+ S: "string2",
+ },
+ out: &struct {
+ S string `blueprint:"mutated"`
+ }{
+ S: "string1",
+ },
+ },
+ {
+ // Filter error
+ in1: &struct{ S string }{
+ S: "string1",
+ },
+ in2: &struct{ S string }{
+ S: "string2",
+ },
+ out: &struct{ S string }{
+ S: "string1",
+ },
+ filter: func(property string,
+ dstField, srcField reflect.StructField,
+ dstValue, srcValue interface{}) (bool, error) {
+ return true, fmt.Errorf("filter error")
+ },
+ err: extendPropertyErrorf("s", "filter error"),
+ },
+}
+
+func TestAppendProperties(t *testing.T) {
+ for _, testCase := range appendPropertiesTestCases {
+ testString := fmt.Sprintf("%v, %v -> %v", testCase.in1, testCase.in2, testCase.out)
+
+ got := testCase.in1
+ var err error
+ var testType string
+
+ if testCase.prepend {
+ testType = "prepend"
+ err = PrependProperties(got, testCase.in2, testCase.filter)
+ } else {
+ testType = "append"
+ err = AppendProperties(got, testCase.in2, testCase.filter)
+ }
+
+ check(t, testType, testString, got, err, testCase.out, testCase.err)
+ }
+}
+
+var appendMatchingPropertiesTestCases = []struct {
+ in1 []interface{}
+ in2 interface{}
+ out []interface{}
+ prepend bool
+ filter ExtendPropertyFilterFunc
+ err error
+}{
+ {
+ // Append strings
+ in1: []interface{}{&struct{ S string }{
+ S: "string1",
+ }},
+ in2: &struct{ S string }{
+ S: "string2",
+ },
+ out: []interface{}{&struct{ S string }{
+ S: "string1string2",
+ }},
+ },
+ {
+ // Prepend strings
+ in1: []interface{}{&struct{ S string }{
+ S: "string1",
+ }},
+ in2: &struct{ S string }{
+ S: "string2",
+ },
+ out: []interface{}{&struct{ S string }{
+ S: "string2string1",
+ }},
+ prepend: true,
+ },
+ {
+ // Append all
+ in1: []interface{}{
+ &struct{ S, A string }{
+ S: "string1",
+ },
+ &struct{ S, B string }{
+ S: "string2",
+ },
+ },
+ in2: &struct{ S string }{
+ S: "string3",
+ },
+ out: []interface{}{
+ &struct{ S, A string }{
+ S: "string1string3",
+ },
+ &struct{ S, B string }{
+ S: "string2string3",
+ },
+ },
+ },
+ {
+ // Append some
+ in1: []interface{}{
+ &struct{ S, A string }{
+ S: "string1",
+ },
+ &struct{ B string }{},
+ },
+ in2: &struct{ S string }{
+ S: "string2",
+ },
+ out: []interface{}{
+ &struct{ S, A string }{
+ S: "string1string2",
+ },
+ &struct{ B string }{},
+ },
+ },
+ {
+ // Append mismatched structs
+ in1: []interface{}{&struct{ S, A string }{
+ S: "string1",
+ }},
+ in2: &struct{ S string }{
+ S: "string2",
+ },
+ out: []interface{}{&struct{ S, A string }{
+ S: "string1string2",
+ }},
+ },
+ {
+ // Append mismatched pointer structs
+ in1: []interface{}{&struct{ S *struct{ S, A string } }{
+ S: &struct{ S, A string }{
+ S: "string1",
+ },
+ }},
+ in2: &struct{ S *struct{ S string } }{
+ S: &struct{ S string }{
+ S: "string2",
+ },
+ },
+ out: []interface{}{&struct{ S *struct{ S, A string } }{
+ S: &struct{ S, A string }{
+ S: "string1string2",
+ },
+ }},
+ },
+
+ // Errors
+
+ {
+ // Non-pointer in1
+ in1: []interface{}{struct{}{}},
+ err: errors.New("expected pointer to struct, got struct {}"),
+ out: []interface{}{struct{}{}},
+ },
+ {
+ // Non-pointer in2
+ in1: []interface{}{&struct{}{}},
+ in2: struct{}{},
+ err: errors.New("expected pointer to struct, got struct {}"),
+ out: []interface{}{&struct{}{}},
+ },
+ {
+ // Non-struct in1
+ in1: []interface{}{&[]string{"bad"}},
+ err: errors.New("expected pointer to struct, got *[]string"),
+ out: []interface{}{&[]string{"bad"}},
+ },
+ {
+ // Non-struct in2
+ in1: []interface{}{&struct{}{}},
+ in2: &[]string{"bad"},
+ err: errors.New("expected pointer to struct, got *[]string"),
+ out: []interface{}{&struct{}{}},
+ },
+ {
+ // Append none
+ in1: []interface{}{
+ &struct{ A string }{},
+ &struct{ B string }{},
+ },
+ in2: &struct{ S string }{
+ S: "string1",
+ },
+ out: []interface{}{
+ &struct{ A string }{},
+ &struct{ B string }{},
+ },
+ err: extendPropertyErrorf("s", "failed to find property to extend"),
+ },
+ {
+ // Append mismatched kinds
+ in1: []interface{}{
+ &struct{ S string }{
+ S: "string1",
+ },
+ },
+ in2: &struct{ S []string }{
+ S: []string{"string2"},
+ },
+ out: []interface{}{
+ &struct{ S string }{
+ S: "string1",
+ },
+ },
+ err: extendPropertyErrorf("s", "mismatched types string and []string"),
+ },
+ {
+ // Append mismatched types
+ in1: []interface{}{
+ &struct{ S []int }{
+ S: []int{1},
+ },
+ },
+ in2: &struct{ S []string }{
+ S: []string{"string2"},
+ },
+ out: []interface{}{
+ &struct{ S []int }{
+ S: []int{1},
+ },
+ },
+ err: extendPropertyErrorf("s", "mismatched types []int and []string"),
+ },
+}
+
+func TestAppendMatchingProperties(t *testing.T) {
+ for _, testCase := range appendMatchingPropertiesTestCases {
+ testString := fmt.Sprintf("%s, %s -> %s", p(testCase.in1), p(testCase.in2), p(testCase.out))
+
+ got := testCase.in1
+ var err error
+ var testType string
+
+ if testCase.prepend {
+ testType = "prepend matching"
+ err = PrependMatchingProperties(got, testCase.in2, testCase.filter)
+ } else {
+ testType = "append matching"
+ err = AppendMatchingProperties(got, testCase.in2, testCase.filter)
+ }
+
+ check(t, testType, testString, got, err, testCase.out, testCase.err)
+ }
+}
+
+func check(t *testing.T, testType, testString string,
+ got interface{}, err error,
+ expected interface{}, expectedErr error) {
+
+ printedTestCase := false
+ e := func(s string, expected, got interface{}) {
+ if !printedTestCase {
+ t.Errorf("test case %s: %s", testType, testString)
+ printedTestCase = true
+ }
+ t.Errorf("incorrect %s", s)
+ t.Errorf(" expected: %s", p(expected))
+ t.Errorf(" got: %s", p(got))
+ }
+
+ if err != nil {
+ if expectedErr != nil {
+ if err.Error() != expectedErr.Error() {
+ e("unexpected error", expectedErr.Error(), err.Error())
+ }
+ } else {
+ e("unexpected error", nil, err.Error())
+ }
+ } else {
+ if expectedErr != nil {
+ e("missing error", expectedErr, nil)
+ }
+ }
+
+ if !reflect.DeepEqual(expected, got) {
+ e("output:", expected, got)
+ }
+}
+
+func p(in interface{}) string {
+ if v, ok := in.([]interface{}); ok {
+ s := make([]string, len(v))
+ for i := range v {
+ s[i] = fmt.Sprintf("%#v", v[i])
+ }
+ return "[" + strings.Join(s, ", ") + "]"
+ } else {
+ return fmt.Sprintf("%#v", in)
+ }
+}
diff --git a/blueprint/proptools/proptools.go b/blueprint/proptools/proptools.go
new file mode 100644
index 0000000000000000000000000000000000000000..690d3847e9a46831b7f2dee62b48a890af6ac4f1
--- /dev/null
+++ b/blueprint/proptools/proptools.go
@@ -0,0 +1,79 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proptools
+
+import (
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+func PropertyNameForField(fieldName string) string {
+ r, size := utf8.DecodeRuneInString(fieldName)
+ propertyName := string(unicode.ToLower(r))
+ if len(fieldName) > size {
+ propertyName += fieldName[size:]
+ }
+ return propertyName
+}
+
+func FieldNameForProperty(propertyName string) string {
+ r, size := utf8.DecodeRuneInString(propertyName)
+ fieldName := string(unicode.ToUpper(r))
+ if len(propertyName) > size {
+ fieldName += propertyName[size:]
+ }
+ return fieldName
+}
+
+func HasTag(field reflect.StructField, name, value string) bool {
+ tag := field.Tag.Get(name)
+ for _, entry := range strings.Split(tag, ",") {
+ if entry == value {
+ return true
+ }
+ }
+
+ return false
+}
+
+// BoolPtr returns a pointer to a new bool containing the given value.
+func BoolPtr(b bool) *bool {
+ return &b
+}
+
+// StringPtr returns a pointer to a new string containing the given value.
+func StringPtr(s string) *string {
+ return &s
+}
+
+// Bool takes a pointer to a bool and returns true iff the pointer is non-nil and points to a true
+// value.
+func Bool(b *bool) bool {
+ if b != nil {
+ return *b
+ }
+ return false
+}
+
+// String takes a pointer to a string and returns the value of the string if the pointer is non-nil,
+// or an empty string.
+func String(s *string) string {
+ if s != nil {
+ return *s
+ }
+ return ""
+}
diff --git a/blueprint/proptools/typeequal.go b/blueprint/proptools/typeequal.go
new file mode 100644
index 0000000000000000000000000000000000000000..e68f91a7c235a472a66c879f760a8e9626a6d081
--- /dev/null
+++ b/blueprint/proptools/typeequal.go
@@ -0,0 +1,76 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proptools
+
+import "reflect"
+
+// TypeEqual takes two property structs, and returns true if they are of equal type, any embedded
+// pointers to structs or interfaces having matching nilitude, and any interface{} values in any
+// embedded structs, pointers to structs, or interfaces are also of equal type.
+func TypeEqual(s1, s2 interface{}) bool {
+ return typeEqual(reflect.ValueOf(s1), reflect.ValueOf(s2))
+}
+
+func typeEqual(v1, v2 reflect.Value) bool {
+ if v1.Type() != v2.Type() {
+ return false
+ }
+
+ if v1.Kind() == reflect.Interface {
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ if v1.IsNil() {
+ return true
+ }
+ v1 = v1.Elem()
+ v2 = v2.Elem()
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ }
+
+ if v1.Kind() == reflect.Ptr {
+ if v1.Type().Elem().Kind() != reflect.Struct {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ if v1.IsNil() {
+ return true
+ }
+ v1 = v1.Elem()
+ v2 = v2.Elem()
+ }
+
+ if v1.Kind() != reflect.Struct {
+ return true
+ }
+
+ for i := 0; i < v1.NumField(); i++ {
+ v1 := v1.Field(i)
+ v2 := v2.Field(i)
+
+ switch kind := v1.Kind(); kind {
+ case reflect.Interface, reflect.Ptr, reflect.Struct:
+ if !typeEqual(v1, v2) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
diff --git a/blueprint/proptools/typeequal_test.go b/blueprint/proptools/typeequal_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d3746099b75d3c6b6b9a473c137390164127dc72
--- /dev/null
+++ b/blueprint/proptools/typeequal_test.go
@@ -0,0 +1,150 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proptools
+
+import (
+ "fmt"
+ "testing"
+)
+
+var typeEqualTestCases = []struct {
+ in1 interface{}
+ in2 interface{}
+ out bool
+}{
+ {
+ // Matching structs
+ in1: struct{ S1 string }{},
+ in2: struct{ S1 string }{},
+ out: true,
+ },
+ {
+ // Mismatching structs
+ in1: struct{ S1 string }{},
+ in2: struct{ S2 string }{},
+ out: false,
+ },
+ {
+ // Matching pointer to struct
+ in1: &struct{ S1 string }{},
+ in2: &struct{ S1 string }{},
+ out: true,
+ },
+ {
+ // Mismatching pointer to struct
+ in1: &struct{ S1 string }{},
+ in2: &struct{ S2 string }{},
+ out: false,
+ },
+ {
+ // Matching embedded structs
+ in1: struct{ S struct{ S1 string } }{},
+ in2: struct{ S struct{ S1 string } }{},
+ out: true,
+ },
+ {
+ // Misatching embedded structs
+ in1: struct{ S struct{ S1 string } }{},
+ in2: struct{ S struct{ S2 string } }{},
+ out: false,
+ },
+ {
+ // Matching embedded pointer to struct
+ in1: &struct{ S *struct{ S1 string } }{S: &struct{ S1 string }{}},
+ in2: &struct{ S *struct{ S1 string } }{S: &struct{ S1 string }{}},
+ out: true,
+ },
+ {
+ // Mismatching embedded pointer to struct
+ in1: &struct{ S *struct{ S1 string } }{S: &struct{ S1 string }{}},
+ in2: &struct{ S *struct{ S2 string } }{S: &struct{ S2 string }{}},
+ out: false,
+ },
+ {
+ // Matching embedded nil pointer to struct
+ in1: &struct{ S *struct{ S1 string } }{},
+ in2: &struct{ S *struct{ S1 string } }{},
+ out: true,
+ },
+ {
+ // Mismatching embedded nil pointer to struct
+ in1: &struct{ S *struct{ S1 string } }{},
+ in2: &struct{ S *struct{ S2 string } }{},
+ out: false,
+ },
+ {
+ // Mismatching nilitude embedded pointer to struct
+ in1: &struct{ S *struct{ S1 string } }{S: &struct{ S1 string }{}},
+ in2: &struct{ S *struct{ S1 string } }{},
+ out: false,
+ },
+ {
+ // Matching embedded interface to pointer to struct
+ in1: &struct{ S interface{} }{S: &struct{ S1 string }{}},
+ in2: &struct{ S interface{} }{S: &struct{ S1 string }{}},
+ out: true,
+ },
+ {
+ // Mismatching embedded interface to pointer to struct
+ in1: &struct{ S interface{} }{S: &struct{ S1 string }{}},
+ in2: &struct{ S interface{} }{S: &struct{ S2 string }{}},
+ out: false,
+ },
+ {
+ // Matching embedded nil interface to pointer to struct
+ in1: &struct{ S interface{} }{},
+ in2: &struct{ S interface{} }{},
+ out: true,
+ },
+ {
+ // Mismatching nilitude embedded interface to pointer to struct
+ in1: &struct{ S interface{} }{S: &struct{ S1 string }{}},
+ in2: &struct{ S interface{} }{},
+ out: false,
+ },
+ {
+ // Matching pointer to non-struct
+ in1: struct{ S1 *string }{ S1: StringPtr("test1") },
+ in2: struct{ S1 *string }{ S1: StringPtr("test2") },
+ out: true,
+ },
+ {
+ // Matching nilitude pointer to non-struct
+ in1: struct{ S1 *string }{ S1: StringPtr("test1") },
+ in2: struct{ S1 *string }{},
+ out: true,
+ },
+ {
+ // Mismatching pointer to non-struct
+ in1: struct{ S1 *string }{},
+ in2: struct{ S2 *string }{},
+ out: false,
+ },
+}
+
+func TestTypeEqualProperties(t *testing.T) {
+ for _, testCase := range typeEqualTestCases {
+ testString := fmt.Sprintf("%#v, %#v -> %t", testCase.in1, testCase.in2, testCase.out)
+
+ got := TypeEqual(testCase.in1, testCase.in2)
+
+ if got != testCase.out {
+ t.Errorf("test case: %s", testString)
+ t.Errorf("incorrect output")
+ t.Errorf(" expected: %t", testCase.out)
+ t.Errorf(" got: %t", got)
+ }
+ }
+}
diff --git a/blueprint/scope.go b/blueprint/scope.go
new file mode 100644
index 0000000000000000000000000000000000000000..84db0cf7d3ef29ccd91ff9298fa454e46a3d4bea
--- /dev/null
+++ b/blueprint/scope.go
@@ -0,0 +1,411 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A Variable represents a global Ninja variable definition that will be written
+// to the output .ninja file. A variable may contain references to other global
+// Ninja variables, but circular variable references are not allowed.
+type Variable interface {
+ packageContext() *packageContext
+ name() string // "foo"
+ fullName(pkgNames map[*packageContext]string) string // "pkg.foo" or "path.to.pkg.foo"
+ value(config interface{}) (*ninjaString, error)
+ String() string
+}
+
+// A Pool represents a Ninja pool that will be written to the output .ninja
+// file.
+type Pool interface {
+ packageContext() *packageContext
+ name() string // "foo"
+ fullName(pkgNames map[*packageContext]string) string // "pkg.foo" or "path.to.pkg.foo"
+ def(config interface{}) (*poolDef, error)
+ String() string
+}
+
+// A Rule represents a Ninja build rule that will be written to the output
+// .ninja file.
+type Rule interface {
+ packageContext() *packageContext
+ name() string // "foo"
+ fullName(pkgNames map[*packageContext]string) string // "pkg.foo" or "path.to.pkg.foo"
+ def(config interface{}) (*ruleDef, error)
+ scope() *basicScope
+ isArg(argName string) bool
+ String() string
+}
+
+type basicScope struct {
+ parent *basicScope
+ variables map[string]Variable
+ pools map[string]Pool
+ rules map[string]Rule
+ imports map[string]*basicScope
+}
+
+func newScope(parent *basicScope) *basicScope {
+ return &basicScope{
+ parent: parent,
+ variables: make(map[string]Variable),
+ pools: make(map[string]Pool),
+ rules: make(map[string]Rule),
+ imports: make(map[string]*basicScope),
+ }
+}
+
+func makeRuleScope(parent *basicScope, argNames map[string]bool) *basicScope {
+ scope := newScope(parent)
+ for argName := range argNames {
+ _, err := scope.LookupVariable(argName)
+ if err != nil {
+ arg := &argVariable{argName}
+ err = scope.AddVariable(arg)
+ if err != nil {
+ // This should not happen. We should have already checked that
+ // the name is valid and that the scope doesn't have a variable
+ // with this name.
+ panic(err)
+ }
+ }
+ }
+
+ // We treat built-in variables like arguments for the purpose of this scope.
+ for _, builtin := range builtinRuleArgs {
+ arg := &argVariable{builtin}
+ err := scope.AddVariable(arg)
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ return scope
+}
+
+func (s *basicScope) LookupVariable(name string) (Variable, error) {
+ dotIndex := strings.IndexRune(name, '.')
+ if dotIndex >= 0 {
+ // The variable name looks like "pkg.var"
+ if dotIndex+1 == len(name) {
+ return nil, fmt.Errorf("variable name %q ends with a '.'", name)
+ }
+ if strings.ContainsRune(name[dotIndex+1:], '.') {
+ return nil, fmt.Errorf("variable name %q contains multiple '.' "+
+ "characters", name)
+ }
+
+ pkgName := name[:dotIndex]
+ varName := name[dotIndex+1:]
+
+ first, _ := utf8.DecodeRuneInString(varName)
+ if !unicode.IsUpper(first) {
+ return nil, fmt.Errorf("cannot refer to unexported name %q", name)
+ }
+
+ importedScope, err := s.lookupImportedScope(pkgName)
+ if err != nil {
+ return nil, err
+ }
+
+ v, ok := importedScope.variables[varName]
+ if !ok {
+ return nil, fmt.Errorf("package %q does not contain variable %q",
+ pkgName, varName)
+ }
+
+ return v, nil
+ } else {
+ // The variable name has no package part; just "var"
+ for ; s != nil; s = s.parent {
+ v, ok := s.variables[name]
+ if ok {
+ return v, nil
+ }
+ }
+ return nil, fmt.Errorf("undefined variable %q", name)
+ }
+}
+
+func (s *basicScope) IsRuleVisible(rule Rule) bool {
+ _, isBuiltin := rule.(*builtinRule)
+ if isBuiltin {
+ return true
+ }
+
+ name := rule.name()
+
+ for s != nil {
+ if s.rules[name] == rule {
+ return true
+ }
+
+ for _, import_ := range s.imports {
+ if import_.rules[name] == rule {
+ return true
+ }
+ }
+
+ s = s.parent
+ }
+
+ return false
+}
+
+func (s *basicScope) IsPoolVisible(pool Pool) bool {
+ _, isBuiltin := pool.(*builtinPool)
+ if isBuiltin {
+ return true
+ }
+
+ name := pool.name()
+
+ for s != nil {
+ if s.pools[name] == pool {
+ return true
+ }
+
+ for _, import_ := range s.imports {
+ if import_.pools[name] == pool {
+ return true
+ }
+ }
+
+ s = s.parent
+ }
+
+ return false
+}
+
+func (s *basicScope) lookupImportedScope(pkgName string) (*basicScope, error) {
+ for ; s != nil; s = s.parent {
+ importedScope, ok := s.imports[pkgName]
+ if ok {
+ return importedScope, nil
+ }
+ }
+ return nil, fmt.Errorf("unknown imported package %q (missing call to "+
+ "blueprint.Import()?)", pkgName)
+}
+
+func (s *basicScope) AddImport(name string, importedScope *basicScope) error {
+ _, present := s.imports[name]
+ if present {
+ return fmt.Errorf("import %q is already defined in this scope", name)
+ }
+ s.imports[name] = importedScope
+ return nil
+}
+
+func (s *basicScope) AddVariable(v Variable) error {
+ name := v.name()
+ _, present := s.variables[name]
+ if present {
+ return fmt.Errorf("variable %q is already defined in this scope", name)
+ }
+ s.variables[name] = v
+ return nil
+}
+
+func (s *basicScope) AddPool(p Pool) error {
+ name := p.name()
+ _, present := s.pools[name]
+ if present {
+ return fmt.Errorf("pool %q is already defined in this scope", name)
+ }
+ s.pools[name] = p
+ return nil
+}
+
+func (s *basicScope) AddRule(r Rule) error {
+ name := r.name()
+ _, present := s.rules[name]
+ if present {
+ return fmt.Errorf("rule %q is already defined in this scope", name)
+ }
+ s.rules[name] = r
+ return nil
+}
+
+type localScope struct {
+ namePrefix string
+ scope *basicScope
+}
+
+func newLocalScope(parent *basicScope, namePrefix string) *localScope {
+ return &localScope{
+ namePrefix: namePrefix,
+ scope: newScope(parent),
+ }
+}
+
+// ReparentTo sets the localScope's parent scope to the scope of the given
+// package context. This allows a ModuleContext and SingletonContext to call
+// a function defined in a different Go package and have that function retain
+// access to all of the package-scoped variables of its own package.
+func (s *localScope) ReparentTo(pctx PackageContext) {
+ s.scope.parent = pctx.getScope()
+}
+
+func (s *localScope) LookupVariable(name string) (Variable, error) {
+ return s.scope.LookupVariable(name)
+}
+
+func (s *localScope) IsRuleVisible(rule Rule) bool {
+ return s.scope.IsRuleVisible(rule)
+}
+
+func (s *localScope) IsPoolVisible(pool Pool) bool {
+ return s.scope.IsPoolVisible(pool)
+}
+
+func (s *localScope) AddLocalVariable(name, value string) (*localVariable,
+ error) {
+
+ err := validateNinjaName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ if strings.ContainsRune(name, '.') {
+ return nil, fmt.Errorf("local variable name %q contains '.'", name)
+ }
+
+ ninjaValue, err := parseNinjaString(s.scope, value)
+ if err != nil {
+ return nil, err
+ }
+
+ v := &localVariable{
+ namePrefix: s.namePrefix,
+ name_: name,
+ value_: ninjaValue,
+ }
+
+ err = s.scope.AddVariable(v)
+ if err != nil {
+ return nil, err
+ }
+
+ return v, nil
+}
+
+func (s *localScope) AddLocalRule(name string, params *RuleParams,
+ argNames ...string) (*localRule, error) {
+
+ err := validateNinjaName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ err = validateArgNames(argNames)
+ if err != nil {
+ return nil, fmt.Errorf("invalid argument name: %s", err)
+ }
+
+ argNamesSet := make(map[string]bool)
+ for _, argName := range argNames {
+ argNamesSet[argName] = true
+ }
+
+ ruleScope := makeRuleScope(s.scope, argNamesSet)
+
+ def, err := parseRuleParams(ruleScope, params)
+ if err != nil {
+ return nil, err
+ }
+
+ r := &localRule{
+ namePrefix: s.namePrefix,
+ name_: name,
+ def_: def,
+ argNames: argNamesSet,
+ scope_: ruleScope,
+ }
+
+ err = s.scope.AddRule(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return r, nil
+}
+
+type localVariable struct {
+ namePrefix string
+ name_ string
+ value_ *ninjaString
+}
+
+func (l *localVariable) packageContext() *packageContext {
+ return nil
+}
+
+func (l *localVariable) name() string {
+ return l.name_
+}
+
+func (l *localVariable) fullName(pkgNames map[*packageContext]string) string {
+ return l.namePrefix + l.name_
+}
+
+func (l *localVariable) value(interface{}) (*ninjaString, error) {
+ return l.value_, nil
+}
+
+func (l *localVariable) String() string {
+ return ":" + l.namePrefix + l.name_
+}
+
+type localRule struct {
+ namePrefix string
+ name_ string
+ def_ *ruleDef
+ argNames map[string]bool
+ scope_ *basicScope
+}
+
+func (l *localRule) packageContext() *packageContext {
+ return nil
+}
+
+func (l *localRule) name() string {
+ return l.name_
+}
+
+func (l *localRule) fullName(pkgNames map[*packageContext]string) string {
+ return l.namePrefix + l.name_
+}
+
+func (l *localRule) def(interface{}) (*ruleDef, error) {
+ return l.def_, nil
+}
+
+func (r *localRule) scope() *basicScope {
+ return r.scope_
+}
+
+func (r *localRule) isArg(argName string) bool {
+ return r.argNames[argName]
+}
+
+func (r *localRule) String() string {
+ return ":" + r.namePrefix + r.name_
+}
diff --git a/blueprint/singleton_ctx.go b/blueprint/singleton_ctx.go
new file mode 100644
index 0000000000000000000000000000000000000000..a2d6109e864c394aa479433d10d8559e94c947bc
--- /dev/null
+++ b/blueprint/singleton_ctx.go
@@ -0,0 +1,203 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+ "fmt"
+)
+
+type Singleton interface {
+ GenerateBuildActions(SingletonContext)
+}
+
+type SingletonContext interface {
+ Config() interface{}
+
+ ModuleName(module Module) string
+ ModuleDir(module Module) string
+ ModuleSubDir(module Module) string
+ BlueprintFile(module Module) string
+
+ ModuleErrorf(module Module, format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Failed() bool
+
+ Variable(pctx PackageContext, name, value string)
+ Rule(pctx PackageContext, name string, params RuleParams, argNames ...string) Rule
+ Build(pctx PackageContext, params BuildParams)
+ RequireNinjaVersion(major, minor, micro int)
+
+ // SetNinjaBuildDir sets the value of the top-level "builddir" Ninja variable
+ // that controls where Ninja stores its build log files. This value can be
+ // set at most one time for a single build, later calls are ignored.
+ SetNinjaBuildDir(pctx PackageContext, value string)
+
+ VisitAllModules(visit func(Module))
+ VisitAllModulesIf(pred func(Module) bool, visit func(Module))
+ VisitDepsDepthFirst(module Module, visit func(Module))
+ VisitDepsDepthFirstIf(module Module, pred func(Module) bool,
+ visit func(Module))
+
+ VisitAllModuleVariants(module Module, visit func(Module))
+
+ PrimaryModule(module Module) Module
+ FinalModule(module Module) Module
+
+ AddNinjaFileDeps(deps ...string)
+}
+
+var _ SingletonContext = (*singletonContext)(nil)
+
+type singletonContext struct {
+ context *Context
+ config interface{}
+ scope *localScope
+
+ ninjaFileDeps []string
+ errs []error
+
+ actionDefs localBuildActions
+}
+
+func (s *singletonContext) Config() interface{} {
+ return s.config
+}
+
+func (s *singletonContext) ModuleName(logicModule Module) string {
+ return s.context.ModuleName(logicModule)
+}
+
+func (s *singletonContext) ModuleDir(logicModule Module) string {
+ return s.context.ModuleDir(logicModule)
+}
+
+func (s *singletonContext) ModuleSubDir(logicModule Module) string {
+ return s.context.ModuleSubDir(logicModule)
+}
+
+func (s *singletonContext) BlueprintFile(logicModule Module) string {
+ return s.context.BlueprintFile(logicModule)
+}
+
+func (s *singletonContext) error(err error) {
+ if err != nil {
+ s.errs = append(s.errs, err)
+ }
+}
+
+func (s *singletonContext) ModuleErrorf(logicModule Module, format string,
+ args ...interface{}) {
+
+ s.error(s.context.ModuleErrorf(logicModule, format, args...))
+}
+
+func (s *singletonContext) Errorf(format string, args ...interface{}) {
+ // TODO: Make this not result in the error being printed as "internal error"
+ s.error(fmt.Errorf(format, args...))
+}
+
+func (s *singletonContext) Failed() bool {
+ return len(s.errs) > 0
+}
+
+func (s *singletonContext) Variable(pctx PackageContext, name, value string) {
+ s.scope.ReparentTo(pctx)
+
+ v, err := s.scope.AddLocalVariable(name, value)
+ if err != nil {
+ panic(err)
+ }
+
+ s.actionDefs.variables = append(s.actionDefs.variables, v)
+}
+
+func (s *singletonContext) Rule(pctx PackageContext, name string,
+ params RuleParams, argNames ...string) Rule {
+
+ s.scope.ReparentTo(pctx)
+
+ r, err := s.scope.AddLocalRule(name, ¶ms, argNames...)
+ if err != nil {
+ panic(err)
+ }
+
+ s.actionDefs.rules = append(s.actionDefs.rules, r)
+
+ return r
+}
+
+func (s *singletonContext) Build(pctx PackageContext, params BuildParams) {
+ s.scope.ReparentTo(pctx)
+
+ def, err := parseBuildParams(s.scope, ¶ms)
+ if err != nil {
+ panic(err)
+ }
+
+ s.actionDefs.buildDefs = append(s.actionDefs.buildDefs, def)
+}
+
+func (s *singletonContext) RequireNinjaVersion(major, minor, micro int) {
+ s.context.requireNinjaVersion(major, minor, micro)
+}
+
+func (s *singletonContext) SetNinjaBuildDir(pctx PackageContext, value string) {
+ s.scope.ReparentTo(pctx)
+
+ ninjaValue, err := parseNinjaString(s.scope, value)
+ if err != nil {
+ panic(err)
+ }
+
+ s.context.setNinjaBuildDir(ninjaValue)
+}
+
+func (s *singletonContext) VisitAllModules(visit func(Module)) {
+ s.context.VisitAllModules(visit)
+}
+
+func (s *singletonContext) VisitAllModulesIf(pred func(Module) bool,
+ visit func(Module)) {
+
+ s.context.VisitAllModulesIf(pred, visit)
+}
+
+func (s *singletonContext) VisitDepsDepthFirst(module Module,
+ visit func(Module)) {
+
+ s.context.VisitDepsDepthFirst(module, visit)
+}
+
+func (s *singletonContext) VisitDepsDepthFirstIf(module Module,
+ pred func(Module) bool, visit func(Module)) {
+
+ s.context.VisitDepsDepthFirstIf(module, pred, visit)
+}
+
+func (s *singletonContext) PrimaryModule(module Module) Module {
+ return s.context.PrimaryModule(module)
+}
+
+func (s *singletonContext) FinalModule(module Module) Module {
+ return s.context.FinalModule(module)
+}
+
+func (s *singletonContext) VisitAllModuleVariants(module Module, visit func(Module)) {
+ s.context.VisitAllModuleVariants(module, visit)
+}
+
+func (s *singletonContext) AddNinjaFileDeps(deps ...string) {
+ s.ninjaFileDeps = append(s.ninjaFileDeps, deps...)
+}
diff --git a/blueprint/splice_modules_test.go b/blueprint/splice_modules_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..cfe905a66eb01d3ee55a030e72f638f4380c8848
--- /dev/null
+++ b/blueprint/splice_modules_test.go
@@ -0,0 +1,129 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+ "reflect"
+ "testing"
+)
+
+var (
+ testModuleA = &moduleInfo{variantName: "testModuleA"}
+ testModuleB = &moduleInfo{variantName: "testModuleB"}
+ testModuleC = &moduleInfo{variantName: "testModuleC"}
+ testModuleD = &moduleInfo{variantName: "testModuleD"}
+ testModuleE = &moduleInfo{variantName: "testModuleE"}
+ testModuleF = &moduleInfo{variantName: "testModuleF"}
+)
+
+var spliceModulesTestCases = []struct {
+ in []*moduleInfo
+ replace *moduleInfo
+ with []*moduleInfo
+ out []*moduleInfo
+ reallocate bool
+}{
+ {
+ // Insert at the beginning
+ in: []*moduleInfo{testModuleA, testModuleB, testModuleC},
+ replace: testModuleA,
+ with: []*moduleInfo{testModuleD, testModuleE},
+ out: []*moduleInfo{testModuleD, testModuleE, testModuleB, testModuleC},
+ reallocate: true,
+ },
+ {
+ // Insert in the middle
+ in: []*moduleInfo{testModuleA, testModuleB, testModuleC},
+ replace: testModuleB,
+ with: []*moduleInfo{testModuleD, testModuleE},
+ out: []*moduleInfo{testModuleA, testModuleD, testModuleE, testModuleC},
+ reallocate: true,
+ },
+ {
+ // Insert at the end
+ in: []*moduleInfo{testModuleA, testModuleB, testModuleC},
+ replace: testModuleC,
+ with: []*moduleInfo{testModuleD, testModuleE},
+ out: []*moduleInfo{testModuleA, testModuleB, testModuleD, testModuleE},
+ reallocate: true,
+ },
+ {
+ // Insert over a single element
+ in: []*moduleInfo{testModuleA},
+ replace: testModuleA,
+ with: []*moduleInfo{testModuleD, testModuleE},
+ out: []*moduleInfo{testModuleD, testModuleE},
+ reallocate: true,
+ },
+ {
+ // Insert at the beginning without reallocating
+ in: []*moduleInfo{testModuleA, testModuleB, testModuleC, nil}[0:3],
+ replace: testModuleA,
+ with: []*moduleInfo{testModuleD, testModuleE},
+ out: []*moduleInfo{testModuleD, testModuleE, testModuleB, testModuleC},
+ reallocate: false,
+ },
+ {
+ // Insert in the middle without reallocating
+ in: []*moduleInfo{testModuleA, testModuleB, testModuleC, nil}[0:3],
+ replace: testModuleB,
+ with: []*moduleInfo{testModuleD, testModuleE},
+ out: []*moduleInfo{testModuleA, testModuleD, testModuleE, testModuleC},
+ reallocate: false,
+ },
+ {
+ // Insert at the end without reallocating
+ in: []*moduleInfo{testModuleA, testModuleB, testModuleC, nil}[0:3],
+ replace: testModuleC,
+ with: []*moduleInfo{testModuleD, testModuleE},
+ out: []*moduleInfo{testModuleA, testModuleB, testModuleD, testModuleE},
+ reallocate: false,
+ },
+ {
+ // Insert over a single element without reallocating
+ in: []*moduleInfo{testModuleA, nil}[0:1],
+ replace: testModuleA,
+ with: []*moduleInfo{testModuleD, testModuleE},
+ out: []*moduleInfo{testModuleD, testModuleE},
+ reallocate: false,
+ },
+}
+
+func TestSpliceModules(t *testing.T) {
+ for _, testCase := range spliceModulesTestCases {
+ in := make([]*moduleInfo, len(testCase.in), cap(testCase.in))
+ copy(in, testCase.in)
+ origIn := in
+ got := spliceModules(in, testCase.replace, testCase.with)
+ if !reflect.DeepEqual(got, testCase.out) {
+ t.Errorf("test case: %v, %v -> %v", testCase.in, testCase.replace, testCase.with)
+ t.Errorf("incorrect output:")
+ t.Errorf(" expected: %v", testCase.out)
+ t.Errorf(" got: %v", got)
+ }
+ if sameArray(origIn, got) != !testCase.reallocate {
+ t.Errorf("test case: %v, %v -> %v", testCase.in, testCase.replace, testCase.with)
+ not := ""
+ if !testCase.reallocate {
+ not = " not"
+ }
+ t.Errorf(" expected to%s reallocate", not)
+ }
+ }
+}
+
+func sameArray(a, b []*moduleInfo) bool {
+ return &a[0:cap(a)][cap(a)-1] == &b[0:cap(b)][cap(b)-1]
+}
diff --git a/blueprint/tests/bootstrap.bash b/blueprint/tests/bootstrap.bash
new file mode 100755
index 0000000000000000000000000000000000000000..4b58b198bd81f6c51ba128775d9b0e59c8ef78fe
--- /dev/null
+++ b/blueprint/tests/bootstrap.bash
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+export BOOTSTRAP="${BASH_SOURCE[0]}"
+export SRCDIR=".."
+export BOOTSTRAP_MANIFEST="src.build.ninja.in"
+
+../bootstrap.bash "$@"
diff --git a/blueprint/tests/expected_all b/blueprint/tests/expected_all
new file mode 100644
index 0000000000000000000000000000000000000000..b16fc78f03f3ba5b0ac5bcd7a3f59bad01e754b0
--- /dev/null
+++ b/blueprint/tests/expected_all
@@ -0,0 +1,3 @@
+Choosing bootstrap.ninja.in for next stage
+Choosing primary.ninja.in for next stage
+Choosing main.ninja.in for next stage
diff --git a/blueprint/tests/expected_manifest b/blueprint/tests/expected_manifest
new file mode 100644
index 0000000000000000000000000000000000000000..3970edb2dd472ec792e9d8b5e03af963d4d37fc9
--- /dev/null
+++ b/blueprint/tests/expected_manifest
@@ -0,0 +1,4 @@
+Newer source version of build.ninja.in. Copying to bootstrap.ninja.in
+Choosing bootstrap.ninja.in for next stage
+Choosing primary.ninja.in for next stage
+Choosing main.ninja.in for next stage
diff --git a/blueprint/tests/expected_none b/blueprint/tests/expected_none
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/blueprint/tests/expected_primary b/blueprint/tests/expected_primary
new file mode 100644
index 0000000000000000000000000000000000000000..43f2d354840182173e005f778b9e023e0ff6e364
--- /dev/null
+++ b/blueprint/tests/expected_primary
@@ -0,0 +1,2 @@
+Choosing primary.ninja.in for next stage
+Choosing main.ninja.in for next stage
diff --git a/blueprint/tests/expected_rebuild_test b/blueprint/tests/expected_rebuild_test
new file mode 100644
index 0000000000000000000000000000000000000000..b16fc78f03f3ba5b0ac5bcd7a3f59bad01e754b0
--- /dev/null
+++ b/blueprint/tests/expected_rebuild_test
@@ -0,0 +1,3 @@
+Choosing bootstrap.ninja.in for next stage
+Choosing primary.ninja.in for next stage
+Choosing main.ninja.in for next stage
diff --git a/blueprint/tests/expected_regen b/blueprint/tests/expected_regen
new file mode 100644
index 0000000000000000000000000000000000000000..e2e10b8610eda25ab2ad733ab0eef219f67be292
--- /dev/null
+++ b/blueprint/tests/expected_regen
@@ -0,0 +1,6 @@
+Newer source version of src.build.ninja.in. Copying to bootstrap.ninja.in
+Choosing bootstrap.ninja.in for next stage
+Stage bootstrap.ninja.in has changed, restarting
+Choosing bootstrap.ninja.in for next stage
+Choosing primary.ninja.in for next stage
+Choosing main.ninja.in for next stage
diff --git a/blueprint/tests/expected_start b/blueprint/tests/expected_start
new file mode 100644
index 0000000000000000000000000000000000000000..43f2d354840182173e005f778b9e023e0ff6e364
--- /dev/null
+++ b/blueprint/tests/expected_start
@@ -0,0 +1,2 @@
+Choosing primary.ninja.in for next stage
+Choosing main.ninja.in for next stage
diff --git a/blueprint/tests/expected_start2 b/blueprint/tests/expected_start2
new file mode 100644
index 0000000000000000000000000000000000000000..4c339e2a5099e8a9e64e8ee6f58dff4d66c80618
--- /dev/null
+++ b/blueprint/tests/expected_start2
@@ -0,0 +1,4 @@
+Stage bootstrap.ninja.in has changed, restarting
+Choosing bootstrap.ninja.in for next stage
+Choosing primary.ninja.in for next stage
+Choosing main.ninja.in for next stage
diff --git a/blueprint/tests/expected_start_add_tests b/blueprint/tests/expected_start_add_tests
new file mode 100644
index 0000000000000000000000000000000000000000..4c339e2a5099e8a9e64e8ee6f58dff4d66c80618
--- /dev/null
+++ b/blueprint/tests/expected_start_add_tests
@@ -0,0 +1,4 @@
+Stage bootstrap.ninja.in has changed, restarting
+Choosing bootstrap.ninja.in for next stage
+Choosing primary.ninja.in for next stage
+Choosing main.ninja.in for next stage
diff --git a/blueprint/tests/expected_wrapper_all b/blueprint/tests/expected_wrapper_all
new file mode 100644
index 0000000000000000000000000000000000000000..43f2d354840182173e005f778b9e023e0ff6e364
--- /dev/null
+++ b/blueprint/tests/expected_wrapper_all
@@ -0,0 +1,2 @@
+Choosing primary.ninja.in for next stage
+Choosing main.ninja.in for next stage
diff --git a/blueprint/tests/expected_wrapper_regen b/blueprint/tests/expected_wrapper_regen
new file mode 100644
index 0000000000000000000000000000000000000000..4c339e2a5099e8a9e64e8ee6f58dff4d66c80618
--- /dev/null
+++ b/blueprint/tests/expected_wrapper_regen
@@ -0,0 +1,4 @@
+Stage bootstrap.ninja.in has changed, restarting
+Choosing bootstrap.ninja.in for next stage
+Choosing primary.ninja.in for next stage
+Choosing main.ninja.in for next stage
diff --git a/blueprint/tests/expected_wrapper_start b/blueprint/tests/expected_wrapper_start
new file mode 100644
index 0000000000000000000000000000000000000000..43f2d354840182173e005f778b9e023e0ff6e364
--- /dev/null
+++ b/blueprint/tests/expected_wrapper_start
@@ -0,0 +1,2 @@
+Choosing primary.ninja.in for next stage
+Choosing main.ninja.in for next stage
diff --git a/blueprint/tests/expected_wrapper_start2 b/blueprint/tests/expected_wrapper_start2
new file mode 100644
index 0000000000000000000000000000000000000000..43f2d354840182173e005f778b9e023e0ff6e364
--- /dev/null
+++ b/blueprint/tests/expected_wrapper_start2
@@ -0,0 +1,2 @@
+Choosing primary.ninja.in for next stage
+Choosing main.ninja.in for next stage
diff --git a/blueprint/tests/test.sh b/blueprint/tests/test.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8880a86c6901b15425592d9bf2af58fd859a8483
--- /dev/null
+++ b/blueprint/tests/test.sh
@@ -0,0 +1,141 @@
+#!/bin/bash
+
+# Go to srcdir
+cd $(dirname ${BASH_SOURCE[0]})/..
+
+rm -rf out.test
+mkdir out.test
+cd out.test
+../bootstrap.bash
+
+# Run ninja, filter the output, and compare against expectations
+# $1: Name of test
+function testcase()
+{
+ echo -n "Running $1..."
+ if ! ninja -v -d explain >log_$1 2>&1; then
+ echo " Failed."
+ echo "Test $1 Failed:" >>failed
+ tail log_$1 >>failed
+ return
+ fi
+ grep -E "^(Choosing|Newer|Stage)" log_$1 >test_$1
+ if ! cmp -s test_$1 ../tests/expected_$1; then
+ echo " Failed."
+ echo "Test $1 Failed:" >>failed
+ diff -u ../tests/expected_$1 test_$1 >>failed
+ else
+ echo " Passed."
+ fi
+}
+
+# Run wrapper, filter the output, and compare against expectations
+# $1: Name of test
+function testcase_wrapper()
+{
+ echo -n "Running wrapper_$1..."
+ if ! ./blueprint.bash -v -d explain >log_wrapper_$1 2>&1; then
+ echo " Failed."
+ echo "Test wrapper_$1 Failed:" >>failed
+ tail log_wrapper_$1 >>failed
+ return
+ fi
+ grep -E "^(Choosing|Newer|Stage)" log_wrapper_$1 >test_wrapper_$1
+ if ! cmp -s test_wrapper_$1 ../tests/expected_wrapper_$1; then
+ echo " Failed."
+ echo "Test wrapper_$1 Failed:" >>failed
+ diff -u ../tests/expected_wrapper_$1 test_wrapper_$1 >>failed
+ else
+ echo " Passed."
+ fi
+}
+
+
+testcase start
+
+# The 2 second sleeps are needed until ninja understands sub-second timestamps
+# https://github.com/martine/ninja/issues/371
+
+# This test affects all bootstrap stages
+sleep 2
+touch ../Blueprints
+testcase all
+
+# This test affects only the primary bootstrap stage
+sleep 2
+touch ../bpmodify/bpmodify.go
+testcase primary
+
+# This test affects nothing, nothing should be done
+sleep 2
+testcase none
+
+# This test will cause the source build.ninja.in to be copied into the first
+# stage.
+sleep 2
+touch ../build.ninja.in
+testcase manifest
+
+# From now on, we're going to be modifying the build.ninja.in, so let's make our
+# own copy
+sleep 2
+../tests/bootstrap.bash -r
+
+sleep 2
+testcase start2
+
+# This is similar to the last test, but incorporates a change into the source
+# build.ninja.in, so that we'll restart into the new version created by the
+# build.
+sleep 2
+echo "# test" >>src.build.ninja.in
+testcase regen
+
+# Add tests to our build by using '-t'
+sleep 2
+../tests/bootstrap.bash -r -t
+
+sleep 2
+testcase start_add_tests
+
+# Make sure that updating a test file causes us to go back to the bootstrap
+# stage
+sleep 2
+touch ../parser/parser_test.go
+testcase rebuild_test
+
+# Restart testing using the wrapper instead of going straight to ninja. This
+# will force each test to start in the correct bootstrap stage, so there are
+# less cases to test.
+cd ..
+rm -rf out.test
+mkdir -p out.test
+cd out.test
+../bootstrap.bash
+
+testcase_wrapper start
+
+# This test affects all bootstrap stages
+sleep 2
+touch ../Blueprints
+testcase_wrapper all
+
+# From now on, we're going to be modifying the build.ninja.in, so let's make our
+# own copy
+sleep 2
+../tests/bootstrap.bash -r
+
+sleep 2
+testcase_wrapper start2
+
+# This is similar to the last test, but incorporates a change into the source
+# build.ninja.in, so that we'll restart into the new version created by the
+# build.
+sleep 2
+echo "# test" >>src.build.ninja.in
+testcase_wrapper regen
+
+if [ -f failed ]; then
+ cat failed
+ exit 1
+fi
diff --git a/blueprint/unpack.go b/blueprint/unpack.go
new file mode 100644
index 0000000000000000000000000000000000000000..b023956eef5a57333b150a4f355dd29090d9d8a0
--- /dev/null
+++ b/blueprint/unpack.go
@@ -0,0 +1,370 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/google/blueprint/parser"
+ "github.com/google/blueprint/proptools"
+)
+
+type packedProperty struct {
+ property *parser.Property
+ unpacked bool
+}
+
+func unpackProperties(propertyDefs []*parser.Property,
+ propertiesStructs ...interface{}) (map[string]*parser.Property, []error) {
+
+ propertyMap := make(map[string]*packedProperty)
+ errs := buildPropertyMap("", propertyDefs, propertyMap)
+ if len(errs) > 0 {
+ return nil, errs
+ }
+
+ for _, properties := range propertiesStructs {
+ propertiesValue := reflect.ValueOf(properties)
+ if propertiesValue.Kind() != reflect.Ptr {
+ panic("properties must be a pointer to a struct")
+ }
+
+ propertiesValue = propertiesValue.Elem()
+ if propertiesValue.Kind() != reflect.Struct {
+ panic("properties must be a pointer to a struct")
+ }
+
+ newErrs := unpackStructValue("", propertiesValue, propertyMap, "", "")
+ errs = append(errs, newErrs...)
+
+ if len(errs) >= maxErrors {
+ return nil, errs
+ }
+ }
+
+ // Report any properties that didn't have corresponding struct fields as
+ // errors.
+ result := make(map[string]*parser.Property)
+ for name, packedProperty := range propertyMap {
+ result[name] = packedProperty.property
+ if !packedProperty.unpacked {
+ err := &Error{
+ Err: fmt.Errorf("unrecognized property %q", name),
+ Pos: packedProperty.property.Pos,
+ }
+ errs = append(errs, err)
+ }
+ }
+
+ if len(errs) > 0 {
+ return nil, errs
+ }
+
+ return result, nil
+}
+
+func buildPropertyMap(namePrefix string, propertyDefs []*parser.Property,
+ propertyMap map[string]*packedProperty) (errs []error) {
+
+ for _, propertyDef := range propertyDefs {
+ name := namePrefix + propertyDef.Name.Name
+ if first, present := propertyMap[name]; present {
+ if first.property == propertyDef {
+ // We've already added this property.
+ continue
+ }
+
+ errs = append(errs, &Error{
+ Err: fmt.Errorf("property %q already defined", name),
+ Pos: propertyDef.Pos,
+ })
+ errs = append(errs, &Error{
+ Err: fmt.Errorf("<-- previous definition here"),
+ Pos: first.property.Pos,
+ })
+ if len(errs) >= maxErrors {
+ return errs
+ }
+ continue
+ }
+
+ propertyMap[name] = &packedProperty{
+ property: propertyDef,
+ unpacked: false,
+ }
+
+ // We intentionally do not rescursively add MapValue properties to the
+ // property map here. Instead we add them when we encounter a struct
+ // into which they can be unpacked. We do this so that if we never
+ // encounter such a struct then the "unrecognized property" error will
+ // be reported only once for the map property and not for each of its
+ // sub-properties.
+ }
+
+ return
+}
+
+func unpackStructValue(namePrefix string, structValue reflect.Value,
+ propertyMap map[string]*packedProperty, filterKey, filterValue string) []error {
+
+ structType := structValue.Type()
+
+ var errs []error
+ for i := 0; i < structValue.NumField(); i++ {
+ fieldValue := structValue.Field(i)
+ field := structType.Field(i)
+
+ if field.PkgPath != "" {
+ // This is an unexported field, so just skip it.
+ continue
+ }
+
+ propertyName := namePrefix + proptools.PropertyNameForField(field.Name)
+
+ if !fieldValue.CanSet() {
+ panic(fmt.Errorf("field %s is not settable", propertyName))
+ }
+
+ // To make testing easier we validate the struct field's type regardless
+ // of whether or not the property was specified in the parsed string.
+ switch kind := fieldValue.Kind(); kind {
+ case reflect.Bool, reflect.String, reflect.Struct:
+ // Do nothing
+ case reflect.Slice:
+ elemType := field.Type.Elem()
+ if elemType.Kind() != reflect.String {
+ panic(fmt.Errorf("field %s is a non-string slice", propertyName))
+ }
+ case reflect.Interface:
+ if fieldValue.IsNil() {
+ panic(fmt.Errorf("field %s contains a nil interface", propertyName))
+ }
+ fieldValue = fieldValue.Elem()
+ elemType := fieldValue.Type()
+ if elemType.Kind() != reflect.Ptr {
+ panic(fmt.Errorf("field %s contains a non-pointer interface", propertyName))
+ }
+ fallthrough
+ case reflect.Ptr:
+ switch ptrKind := fieldValue.Type().Elem().Kind(); ptrKind {
+ case reflect.Struct:
+ if fieldValue.IsNil() {
+ panic(fmt.Errorf("field %s contains a nil pointer", propertyName))
+ }
+ fieldValue = fieldValue.Elem()
+ case reflect.Bool, reflect.String:
+ // Nothing
+ default:
+ panic(fmt.Errorf("field %s contains a pointer to %s", propertyName, ptrKind))
+ }
+
+ case reflect.Int, reflect.Uint:
+ if !proptools.HasTag(field, "blueprint", "mutated") {
+ panic(fmt.Errorf(`int field %s must be tagged blueprint:"mutated"`, propertyName))
+ }
+
+ default:
+ panic(fmt.Errorf("unsupported kind for field %s: %s", propertyName, kind))
+ }
+
+ if field.Anonymous && fieldValue.Kind() == reflect.Struct {
+ newErrs := unpackStructValue(namePrefix, fieldValue, propertyMap, filterKey, filterValue)
+ errs = append(errs, newErrs...)
+ continue
+ }
+
+ // Get the property value if it was specified.
+ packedProperty, ok := propertyMap[propertyName]
+ if !ok {
+ // This property wasn't specified.
+ continue
+ }
+
+ packedProperty.unpacked = true
+
+ if proptools.HasTag(field, "blueprint", "mutated") {
+ errs = append(errs,
+ &Error{
+ Err: fmt.Errorf("mutated field %s cannot be set in a Blueprint file", propertyName),
+ Pos: packedProperty.property.Pos,
+ })
+ if len(errs) >= maxErrors {
+ return errs
+ }
+ continue
+ }
+
+ if filterKey != "" && !proptools.HasTag(field, filterKey, filterValue) {
+ errs = append(errs,
+ &Error{
+ Err: fmt.Errorf("filtered field %s cannot be set in a Blueprint file", propertyName),
+ Pos: packedProperty.property.Pos,
+ })
+ if len(errs) >= maxErrors {
+ return errs
+ }
+ continue
+ }
+
+ var newErrs []error
+
+ switch kind := fieldValue.Kind(); kind {
+ case reflect.Bool:
+ newErrs = unpackBool(fieldValue, packedProperty.property)
+ case reflect.String:
+ newErrs = unpackString(fieldValue, packedProperty.property)
+ case reflect.Slice:
+ newErrs = unpackSlice(fieldValue, packedProperty.property)
+ case reflect.Ptr:
+ switch ptrKind := fieldValue.Type().Elem().Kind(); ptrKind {
+ case reflect.Bool:
+ newValue := reflect.New(fieldValue.Type().Elem())
+ newErrs = unpackBool(newValue.Elem(), packedProperty.property)
+ fieldValue.Set(newValue)
+ case reflect.String:
+ newValue := reflect.New(fieldValue.Type().Elem())
+ newErrs = unpackString(newValue.Elem(), packedProperty.property)
+ fieldValue.Set(newValue)
+ default:
+ panic(fmt.Errorf("unexpected pointer kind %s", ptrKind))
+ }
+ case reflect.Struct:
+ localFilterKey, localFilterValue := filterKey, filterValue
+ if k, v, err := HasFilter(field.Tag); err != nil {
+ errs = append(errs, err)
+ if len(errs) >= maxErrors {
+ return errs
+ }
+ } else if k != "" {
+ if filterKey != "" {
+ errs = append(errs, fmt.Errorf("nested filter tag not supported on field %q",
+ field.Name))
+ if len(errs) >= maxErrors {
+ return errs
+ }
+ } else {
+ localFilterKey, localFilterValue = k, v
+ }
+ }
+ newErrs = unpackStruct(propertyName+".", fieldValue,
+ packedProperty.property, propertyMap, localFilterKey, localFilterValue)
+ default:
+ panic(fmt.Errorf("unexpected kind %s", kind))
+ }
+ errs = append(errs, newErrs...)
+ if len(errs) >= maxErrors {
+ return errs
+ }
+ }
+
+ return errs
+}
+
+func unpackBool(boolValue reflect.Value, property *parser.Property) []error {
+ if property.Value.Type != parser.Bool {
+ return []error{
+ fmt.Errorf("%s: can't assign %s value to %s property %q",
+ property.Value.Pos, property.Value.Type, parser.Bool,
+ property.Name),
+ }
+ }
+ boolValue.SetBool(property.Value.BoolValue)
+ return nil
+}
+
+func unpackString(stringValue reflect.Value,
+ property *parser.Property) []error {
+
+ if property.Value.Type != parser.String {
+ return []error{
+ fmt.Errorf("%s: can't assign %s value to %s property %q",
+ property.Value.Pos, property.Value.Type, parser.String,
+ property.Name),
+ }
+ }
+ stringValue.SetString(property.Value.StringValue)
+ return nil
+}
+
+func unpackSlice(sliceValue reflect.Value, property *parser.Property) []error {
+ if property.Value.Type != parser.List {
+ return []error{
+ fmt.Errorf("%s: can't assign %s value to %s property %q",
+ property.Value.Pos, property.Value.Type, parser.List,
+ property.Name),
+ }
+ }
+
+ list := []string{}
+ for _, value := range property.Value.ListValue {
+ if value.Type != parser.String {
+ // The parser should not produce this.
+ panic("non-string value found in list")
+ }
+ list = append(list, value.StringValue)
+ }
+
+ sliceValue.Set(reflect.ValueOf(list))
+ return nil
+}
+
+func unpackStruct(namePrefix string, structValue reflect.Value,
+ property *parser.Property, propertyMap map[string]*packedProperty,
+ filterKey, filterValue string) []error {
+
+ if property.Value.Type != parser.Map {
+ return []error{
+ fmt.Errorf("%s: can't assign %s value to %s property %q",
+ property.Value.Pos, property.Value.Type, parser.Map,
+ property.Name),
+ }
+ }
+
+ errs := buildPropertyMap(namePrefix, property.Value.MapValue, propertyMap)
+ if len(errs) > 0 {
+ return errs
+ }
+
+ return unpackStructValue(namePrefix, structValue, propertyMap, filterKey, filterValue)
+}
+
+func HasFilter(field reflect.StructTag) (k, v string, err error) {
+ tag := field.Get("blueprint")
+ for _, entry := range strings.Split(tag, ",") {
+ if strings.HasPrefix(entry, "filter") {
+ if !strings.HasPrefix(entry, "filter(") || !strings.HasSuffix(entry, ")") {
+ return "", "", fmt.Errorf("unexpected format for filter %q: missing ()", entry)
+ }
+ entry = strings.TrimPrefix(entry, "filter(")
+ entry = strings.TrimSuffix(entry, ")")
+
+ s := strings.Split(entry, ":")
+ if len(s) != 2 {
+ return "", "", fmt.Errorf("unexpected format for filter %q: expected single ':'", entry)
+ }
+ k = s[0]
+ v, err = strconv.Unquote(s[1])
+ if err != nil {
+ return "", "", fmt.Errorf("unexpected format for filter %q: %s", entry, err.Error())
+ }
+ return k, v, nil
+ }
+ }
+
+ return "", "", nil
+}
diff --git a/blueprint/unpack_test.go b/blueprint/unpack_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..b33ae7905679ccd1773d7625215c59399032694b
--- /dev/null
+++ b/blueprint/unpack_test.go
@@ -0,0 +1,401 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package blueprint
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "testing"
+ "text/scanner"
+
+ "github.com/google/blueprint/parser"
+ "github.com/google/blueprint/proptools"
+)
+
+var validUnpackTestCases = []struct {
+ input string
+ output interface{}
+ errs []error
+}{
+ {`
+ m {
+ name: "abc",
+ blank: "",
+ }
+ `,
+ struct {
+ Name *string
+ Blank *string
+ Unset *string
+ }{
+ Name: proptools.StringPtr("abc"),
+ Blank: proptools.StringPtr(""),
+ Unset: nil,
+ },
+ nil,
+ },
+
+ {`
+ m {
+ name: "abc",
+ }
+ `,
+ struct {
+ Name string
+ }{
+ Name: "abc",
+ },
+ nil,
+ },
+
+ {`
+ m {
+ isGood: true,
+ }
+ `,
+ struct {
+ IsGood bool
+ }{
+ IsGood: true,
+ },
+ nil,
+ },
+
+ {`
+ m {
+ isGood: true,
+ isBad: false,
+ }
+ `,
+ struct {
+ IsGood *bool
+ IsBad *bool
+ IsUgly *bool
+ }{
+ IsGood: proptools.BoolPtr(true),
+ IsBad: proptools.BoolPtr(false),
+ IsUgly: nil,
+ },
+ nil,
+ },
+
+ {`
+ m {
+ stuff: ["asdf", "jkl;", "qwert",
+ "uiop", "bnm,"],
+ empty: []
+ }
+ `,
+ struct {
+ Stuff []string
+ Empty []string
+ Nil []string
+ }{
+ Stuff: []string{"asdf", "jkl;", "qwert", "uiop", "bnm,"},
+ Empty: []string{},
+ Nil: nil,
+ },
+ nil,
+ },
+
+ {`
+ m {
+ nested: {
+ name: "abc",
+ }
+ }
+ `,
+ struct {
+ Nested struct {
+ Name string
+ }
+ }{
+ Nested: struct{ Name string }{
+ Name: "abc",
+ },
+ },
+ nil,
+ },
+
+ {`
+ m {
+ nested: {
+ name: "def",
+ }
+ }
+ `,
+ struct {
+ Nested interface{}
+ }{
+ Nested: &struct{ Name string }{
+ Name: "def",
+ },
+ },
+ nil,
+ },
+
+ {`
+ m {
+ nested: {
+ foo: "abc",
+ },
+ bar: false,
+ baz: ["def", "ghi"],
+ }
+ `,
+ struct {
+ Nested struct {
+ Foo string
+ }
+ Bar bool
+ Baz []string
+ }{
+ Nested: struct{ Foo string }{
+ Foo: "abc",
+ },
+ Bar: false,
+ Baz: []string{"def", "ghi"},
+ },
+ nil,
+ },
+
+ {`
+ m {
+ nested: {
+ foo: "abc",
+ },
+ bar: false,
+ baz: ["def", "ghi"],
+ }
+ `,
+ struct {
+ Nested struct {
+ Foo string `allowNested:"true"`
+ } `blueprint:"filter(allowNested:\"true\")"`
+ Bar bool
+ Baz []string
+ }{
+ Nested: struct {
+ Foo string `allowNested:"true"`
+ }{
+ Foo: "abc",
+ },
+ Bar: false,
+ Baz: []string{"def", "ghi"},
+ },
+ nil,
+ },
+
+ {`
+ m {
+ nested: {
+ foo: "abc",
+ },
+ bar: false,
+ baz: ["def", "ghi"],
+ }
+ `,
+ struct {
+ Nested struct {
+ Foo string
+ } `blueprint:"filter(allowNested:\"true\")"`
+ Bar bool
+ Baz []string
+ }{
+ Nested: struct{ Foo string }{
+ Foo: "",
+ },
+ Bar: false,
+ Baz: []string{"def", "ghi"},
+ },
+ []error{
+ &Error{
+ Err: fmt.Errorf("filtered field nested.foo cannot be set in a Blueprint file"),
+ Pos: scanner.Position{"", 27, 4, 8},
+ },
+ },
+ },
+
+ // Anonymous struct
+ {`
+ m {
+ name: "abc",
+ nested: {
+ name: "def",
+ },
+ }
+ `,
+ struct {
+ EmbeddedStruct
+ Nested struct {
+ EmbeddedStruct
+ }
+ }{
+ EmbeddedStruct: EmbeddedStruct{
+ Name: "abc",
+ },
+ Nested: struct {
+ EmbeddedStruct
+ }{
+ EmbeddedStruct: EmbeddedStruct{
+ Name: "def",
+ },
+ },
+ },
+ nil,
+ },
+
+ // Anonymous interface
+ {`
+ m {
+ name: "abc",
+ nested: {
+ name: "def",
+ },
+ }
+ `,
+ struct {
+ EmbeddedInterface
+ Nested struct {
+ EmbeddedInterface
+ }
+ }{
+ EmbeddedInterface: &struct{ Name string }{
+ Name: "abc",
+ },
+ Nested: struct {
+ EmbeddedInterface
+ }{
+ EmbeddedInterface: &struct{ Name string }{
+ Name: "def",
+ },
+ },
+ },
+ nil,
+ },
+
+ // Anonymous struct with name collision
+ {`
+ m {
+ name: "abc",
+ nested: {
+ name: "def",
+ },
+ }
+ `,
+ struct {
+ Name string
+ EmbeddedStruct
+ Nested struct {
+ Name string
+ EmbeddedStruct
+ }
+ }{
+ Name: "abc",
+ EmbeddedStruct: EmbeddedStruct{
+ Name: "abc",
+ },
+ Nested: struct {
+ Name string
+ EmbeddedStruct
+ }{
+ Name: "def",
+ EmbeddedStruct: EmbeddedStruct{
+ Name: "def",
+ },
+ },
+ },
+ nil,
+ },
+
+ // Anonymous interface with name collision
+ {`
+ m {
+ name: "abc",
+ nested: {
+ name: "def",
+ },
+ }
+ `,
+ struct {
+ Name string
+ EmbeddedInterface
+ Nested struct {
+ Name string
+ EmbeddedInterface
+ }
+ }{
+ Name: "abc",
+ EmbeddedInterface: &struct{ Name string }{
+ Name: "abc",
+ },
+ Nested: struct {
+ Name string
+ EmbeddedInterface
+ }{
+ Name: "def",
+ EmbeddedInterface: &struct{ Name string }{
+ Name: "def",
+ },
+ },
+ },
+ nil,
+ },
+}
+
+type EmbeddedStruct struct{ Name string }
+type EmbeddedInterface interface{}
+
+func TestUnpackProperties(t *testing.T) {
+ for _, testCase := range validUnpackTestCases {
+ r := bytes.NewBufferString(testCase.input)
+ file, errs := parser.Parse("", r, nil)
+ if len(errs) != 0 {
+ t.Errorf("test case: %s", testCase.input)
+ t.Errorf("unexpected parse errors:")
+ for _, err := range errs {
+ t.Errorf(" %s", err)
+ }
+ t.FailNow()
+ }
+
+ module := file.Defs[0].(*parser.Module)
+ properties := proptools.CloneProperties(reflect.ValueOf(testCase.output))
+ proptools.ZeroProperties(properties.Elem())
+ _, errs = unpackProperties(module.Properties, properties.Interface())
+ if len(errs) != 0 && len(testCase.errs) == 0 {
+ t.Errorf("test case: %s", testCase.input)
+ t.Errorf("unexpected unpack errors:")
+ for _, err := range errs {
+ t.Errorf(" %s", err)
+ }
+ t.FailNow()
+ } else if !reflect.DeepEqual(errs, testCase.errs) {
+ t.Errorf("test case: %s", testCase.input)
+ t.Errorf("incorrect errors:")
+ t.Errorf(" expected: %+v", testCase.errs)
+ t.Errorf(" got: %+v", errs)
+ }
+
+ output := properties.Elem().Interface()
+ if !reflect.DeepEqual(output, testCase.output) {
+ t.Errorf("test case: %s", testCase.input)
+ t.Errorf("incorrect output:")
+ t.Errorf(" expected: %+v", testCase.output)
+ t.Errorf(" got: %+v", output)
+ }
+ }
+}
diff --git a/buildspec.mk.default b/buildspec.mk.default
new file mode 100644
index 0000000000000000000000000000000000000000..d14208e09994a1a7619d08a4c3ca0e78d4fd6e11
--- /dev/null
+++ b/buildspec.mk.default
@@ -0,0 +1,111 @@
+#
+# Copyright (C) 2007 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+######################################################################
+# This is a do-nothing template file. To use it, copy it to a file
+# named "buildspec.mk" in the root directory, and uncomment or change
+# the variables necessary for your desired configuration. The file
+# "buildspec.mk" should never be checked in to source control.
+######################################################################
+
+# Choose a product to build for. Look in the products directory for ones
+# that work.
+ifndef TARGET_PRODUCT
+#TARGET_PRODUCT:=generic
+endif
+
+# Choose a variant to build. If you don't pick one, the default is eng.
+# User is what we ship. Userdebug is that, with a few flags turned on
+# for debugging. Eng has lots of extra tools for development.
+ifndef TARGET_BUILD_VARIANT
+#TARGET_BUILD_VARIANT:=user
+#TARGET_BUILD_VARIANT:=userdebug
+#TARGET_BUILD_VARIANT:=eng
+endif
+
+# Choose additional targets to always install, even when building
+# minimal targets like "make droid". This takes simple target names
+# like "Browser" or "MyApp", the names used by LOCAL_MODULE or
+# LOCAL_PACKAGE_NAME. Modules listed here will always be installed in
+# /system, even if they'd usually go in /data.
+ifndef CUSTOM_MODULES
+#CUSTOM_MODULES:=
+endif
+
+# Set this to debug or release if you care. Otherwise, it defaults to release.
+ifndef TARGET_BUILD_TYPE
+#TARGET_BUILD_TYPE:=release
+endif
+
+# Uncomment this if you want the host tools built in debug mode. Otherwise
+# it defaults to release.
+ifndef HOST_BUILD_TYPE
+#HOST_BUILD_TYPE:=debug
+endif
+
+# Turn on debugging for selected modules. If DEBUG_MODULE_ is set
+# to a non-empty value, the appropriate HOST_/TARGET_CUSTOM_DEBUG_CFLAGS
+# will be added to LOCAL_CFLAGS when building the module.
+#DEBUG_MODULE_ModuleName:=true
+
+# Specify an alternative tool chain prefix if needed.
+#TARGET_TOOLS_PREFIX:=
+
+# Specify the extra CFLAGS to use when building a module whose
+# DEBUG_MODULE_ variable is set. Host and device flags are handled
+# separately.
+#HOST_CUSTOM_DEBUG_CFLAGS:=
+#TARGET_CUSTOM_DEBUG_CFLAGS:=
+
+# Choose additional locales, like "en_US" or "it_IT", to add to any
+# built product. Any locales that appear in CUSTOM_LOCALES but not in
+# the locale list for the selected product will be added to the end
+# of PRODUCT_LOCALES.
+ifndef CUSTOM_LOCALES
+#CUSTOM_LOCALES:=
+endif
+
+# If you have a special place to put your ouput files, set this, otherwise
+# it goes to /out
+#OUT_DIR:=/tmp/stuff
+
+# If you want to always set certain system properties, add them to this list.
+# E.g., "ADDITIONAL_BUILD_PROPERTIES += ro.prop1=5 prop2=value"
+# This mechanism does not currently support values containing spaces.
+#ADDITIONAL_BUILD_PROPERTIES +=
+
+# If you want to reduce the system.img size by several meg, and are willing to
+# lose access to CJK (and other) character sets, define NO_FALLBACK_FONT:=true
+ifndef NO_FALLBACK_FONT
+#NO_FALLBACK_FONT:=true
+endif
+
+# OVERRIDE_RUNTIMES allows you to locally override PRODUCT_RUNTIMES.
+#
+# To only build ART, use "runtime_libart_default"
+# To use Dalvik but also include ART, use "runtime_libdvm_default runtime_libart"
+# To use ART but also include Dalvik, use "runtime_libart_default runtime_libdvm"
+ifndef OVERRIDE_RUNTIMES
+#OVERRIDE_RUNTIMES:=runtime_libart_default
+#OVERRIDE_RUNTIMES:=runtime_libdvm_default runtime_libart
+#OVERRIDE_RUNTIMES:=runtime_libart_default runtime_libdvm
+endif
+
+# when the build system changes such that this file must be updated, this
+# variable will be changed. After you have modified this file with the new
+# changes (see buildspec.mk.default), update this to the new value from
+# buildspec.mk.default.
+BUILD_ENV_SEQUENCE_NUMBER := 10
diff --git a/core/LINUX_KERNEL_COPYING b/core/LINUX_KERNEL_COPYING
new file mode 100644
index 0000000000000000000000000000000000000000..ca442d313d86dc67e0a2e5d584b465bd382cbf5c
--- /dev/null
+++ b/core/LINUX_KERNEL_COPYING
@@ -0,0 +1,356 @@
+
+ NOTE! This copyright does *not* cover user programs that use kernel
+ services by normal system calls - this is merely considered normal use
+ of the kernel, and does *not* fall under the heading of "derived work".
+ Also note that the GPL below is copyrighted by the Free Software
+ Foundation, but the instance of code that it refers to (the Linux
+ kernel) is copyrighted by me and others who actually wrote it.
+
+ Also note that the only valid version of the GPL as far as the kernel
+ is concerned is _this_ particular version of the license (ie v2, not
+ v2.2 or v3.x or whatever), unless explicitly otherwise stated.
+
+ Linus Torvalds
+
+----------------------------------------
+
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ , 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/core/Makefile b/core/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..5badde5587efa10f6848d29119d5371ccc257feb
--- /dev/null
+++ b/core/Makefile
@@ -0,0 +1,2262 @@
+# Put some miscellaneous rules here
+
+# HACK: clear LOCAL_PATH from including last build target before calling
+# intermedites-dir-for
+LOCAL_PATH := $(BUILD_SYSTEM)
+
+# Pick a reasonable string to use to identify files.
+ifneq "" "$(filter eng.%,$(BUILD_NUMBER))"
+ # BUILD_NUMBER has a timestamp in it, which means that
+ # it will change every time. Pick a stable value.
+ FILE_NAME_TAG := eng.$(USER)
+else
+ FILE_NAME_TAG := $(BUILD_NUMBER)
+endif
+
+# -----------------------------------------------------------------
+# Define rules to copy PRODUCT_COPY_FILES defined by the product.
+# PRODUCT_COPY_FILES contains words like :[:].
+# is relative to $(PRODUCT_OUT), so it should look like,
+# e.g., "system/etc/file.xml".
+# The filter part means "only eval the copy-one-file rule if this
+# src:dest pair is the first one to match the same dest"
+#$(1): the src:dest pair
+define check-product-copy-files
+$(if $(filter %.apk, $(call word-colon, 2, $(1))),$(error \
+ Prebuilt apk found in PRODUCT_COPY_FILES: $(1), use BUILD_PREBUILT instead!))
+endef
+# filter out the duplicate : pairs.
+unique_product_copy_files_pairs :=
+$(foreach cf,$(PRODUCT_COPY_FILES), \
+ $(if $(filter $(unique_product_copy_files_pairs),$(cf)),,\
+ $(eval unique_product_copy_files_pairs += $(cf))))
+unique_product_copy_files_destinations :=
+$(foreach cf,$(unique_product_copy_files_pairs), \
+ $(eval _src := $(call word-colon,1,$(cf))) \
+ $(eval _dest := $(call word-colon,2,$(cf))) \
+ $(call check-product-copy-files,$(cf)) \
+ $(if $(filter $(unique_product_copy_files_destinations),$(_dest)), \
+ $(info PRODUCT_COPY_FILES $(cf) ignored.), \
+ $(eval _fulldest := $(call append-path,$(PRODUCT_OUT),$(_dest))) \
+ $(if $(filter %.xml,$(_dest)),\
+ $(eval $(call copy-xml-file-checked,$(_src),$(_fulldest))),\
+ $(eval $(call copy-one-file,$(_src),$(_fulldest)))) \
+ $(eval ALL_DEFAULT_INSTALLED_MODULES += $(_fulldest)) \
+ $(eval unique_product_copy_files_destinations += $(_dest))))
+unique_product_copy_files_pairs :=
+unique_product_copy_files_destinations :=
+
+# -----------------------------------------------------------------
+# Define rules to copy headers defined in copy_headers.mk
+# If more than one makefile declared a header, print a warning,
+# then copy the last one defined. This matches the previous make
+# behavior.
+$(foreach dest,$(ALL_COPIED_HEADERS), \
+ $(eval _srcs := $(ALL_COPIED_HEADERS.$(dest).SRC)) \
+ $(eval _src := $(word $(words $(_srcs)),$(_srcs))) \
+ $(if $(call streq,$(_src),$(_srcs)),, \
+ $(warning Duplicate header copy: $(dest)) \
+ $(warning Defined in: $(ALL_COPIED_HEADERS.$(dest).MAKEFILE))) \
+ $(eval $(call copy-one-header,$(_src),$(dest))))
+all_copied_headers: $(ALL_COPIED_HEADERS)
+
+# -----------------------------------------------------------------
+# docs/index.html
+ifeq (,$(TARGET_BUILD_APPS))
+gen := $(OUT_DOCS)/index.html
+ALL_DOCS += $(gen)
+$(gen): frameworks/base/docs/docs-redirect-index.html
+ @mkdir -p $(dir $@)
+ @cp -f $< $@
+endif
+
+# -----------------------------------------------------------------
+# default.prop
+INSTALLED_DEFAULT_PROP_TARGET := $(TARGET_ROOT_OUT)/default.prop
+ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_DEFAULT_PROP_TARGET)
+ADDITIONAL_DEFAULT_PROPERTIES := \
+ $(call collapse-pairs, $(ADDITIONAL_DEFAULT_PROPERTIES))
+ADDITIONAL_DEFAULT_PROPERTIES += \
+ $(call collapse-pairs, $(PRODUCT_DEFAULT_PROPERTY_OVERRIDES))
+ADDITIONAL_DEFAULT_PROPERTIES := $(call uniq-pairs-by-first-component, \
+ $(ADDITIONAL_DEFAULT_PROPERTIES),=)
+
+intermediate_system_build_prop := $(call intermediates-dir-for,ETC,system_build_prop)/build.prop
+
+$(INSTALLED_DEFAULT_PROP_TARGET): $(intermediate_system_build_prop)
+ @echo Target buildinfo: $@
+ @mkdir -p $(dir $@)
+ $(hide) echo "#" > $@; \
+ echo "# ADDITIONAL_DEFAULT_PROPERTIES" >> $@; \
+ echo "#" >> $@;
+ $(hide) $(foreach line,$(ADDITIONAL_DEFAULT_PROPERTIES), \
+ echo "$(line)" >> $@;)
+ $(hide) echo "#" >> $@; \
+ echo "# BOOTIMAGE_BUILD_PROPERTIES" >> $@; \
+ echo "#" >> $@;
+ $(hide) echo ro.bootimage.build.date=`$(DATE_FROM_FILE)`>>$@
+ $(hide) echo ro.bootimage.build.date.utc=`$(DATE_FROM_FILE) +%s`>>$@
+ $(hide) echo ro.bootimage.build.fingerprint="$(BUILD_FINGERPRINT_FROM_FILE)">>$@
+ $(hide) build/tools/post_process_props.py $@
+
+# -----------------------------------------------------------------
+# build.prop
+INSTALLED_BUILD_PROP_TARGET := $(TARGET_OUT)/build.prop
+ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_BUILD_PROP_TARGET)
+ADDITIONAL_BUILD_PROPERTIES := \
+ $(call collapse-pairs, $(ADDITIONAL_BUILD_PROPERTIES))
+ADDITIONAL_BUILD_PROPERTIES := $(call uniq-pairs-by-first-component, \
+ $(ADDITIONAL_BUILD_PROPERTIES),=)
+
+# A list of arbitrary tags describing the build configuration.
+# Force ":=" so we can use +=
+BUILD_VERSION_TAGS := $(BUILD_VERSION_TAGS)
+ifeq ($(TARGET_BUILD_TYPE),debug)
+ BUILD_VERSION_TAGS += debug
+endif
+# The "test-keys" tag marks builds signed with the old test keys,
+# which are available in the SDK. "dev-keys" marks builds signed with
+# non-default dev keys (usually private keys from a vendor directory).
+# Both of these tags will be removed and replaced with "release-keys"
+# when the target-files is signed in a post-build step.
+ifeq ($(DEFAULT_SYSTEM_DEV_CERTIFICATE),build/target/product/security/testkey)
+BUILD_KEYS := test-keys
+else
+BUILD_KEYS := dev-keys
+endif
+BUILD_VERSION_TAGS += $(BUILD_KEYS)
+BUILD_VERSION_TAGS := $(subst $(space),$(comma),$(sort $(BUILD_VERSION_TAGS)))
+
+# A human-readable string that descibes this build in detail.
+build_desc := $(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT) $(PLATFORM_VERSION) $(BUILD_ID) $(BUILD_NUMBER_FROM_FILE) $(BUILD_VERSION_TAGS)
+$(intermediate_system_build_prop): PRIVATE_BUILD_DESC := $(build_desc)
+
+# The string used to uniquely identify the combined build and product; used by the OTA server.
+ifeq (,$(strip $(BUILD_FINGERPRINT)))
+ ifneq ($(filter eng.%,$(BUILD_NUMBER)),)
+ # Trim down BUILD_FINGERPRINT: the default BUILD_NUMBER makes it easily exceed
+ # the Android system property length limit (PROPERTY_VALUE_MAX=92).
+ BF_BUILD_NUMBER := $(shell echo $${USER:0:6})$(shell $(DATE) +%m%d%H%M)
+ else
+ BF_BUILD_NUMBER := $(BUILD_NUMBER)
+ endif
+ BUILD_FINGERPRINT := $(PRODUCT_BRAND)/$(TARGET_PRODUCT)/$(TARGET_DEVICE):$(PLATFORM_VERSION)/$(BUILD_ID)/$(BF_BUILD_NUMBER):$(TARGET_BUILD_VARIANT)/$(BUILD_VERSION_TAGS)
+endif
+ifneq ($(words $(BUILD_FINGERPRINT)),1)
+ $(error BUILD_FINGERPRINT cannot contain spaces: "$(BUILD_FINGERPRINT)")
+endif
+
+$(shell mkdir -p $(PRODUCT_OUT) && echo $(BUILD_FINGERPRINT) > $(PRODUCT_OUT)/build_fingerprint.txt)
+BUILD_FINGERPRINT_FROM_FILE := $$(cat $(PRODUCT_OUT)/build_fingerprint.txt)
+
+# The string used to uniquely identify the system build; used by the OTA server.
+# This purposefully excludes any product-specific variables.
+ifeq (,$(strip $(BUILD_THUMBPRINT)))
+ BUILD_THUMBPRINT := $(PLATFORM_VERSION)/$(BUILD_ID)/$(BUILD_NUMBER):$(TARGET_BUILD_VARIANT)/$(BUILD_VERSION_TAGS)
+endif
+ifneq ($(words $(BUILD_THUMBPRINT)),1)
+ $(error BUILD_THUMBPRINT cannot contain spaces: "$(BUILD_THUMBPRINT)")
+endif
+
+KNOWN_OEM_THUMBPRINT_PROPERTIES := \
+ ro.product.brand \
+ ro.product.name \
+ ro.product.device
+OEM_THUMBPRINT_PROPERTIES := $(filter $(KNOWN_OEM_THUMBPRINT_PROPERTIES),\
+ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_OEM_PROPERTIES))
+
+# Display parameters shown under Settings -> About Phone
+ifeq ($(TARGET_BUILD_VARIANT),user)
+ # User builds should show:
+ # release build number or branch.buld_number non-release builds
+
+ # Dev. branches should have DISPLAY_BUILD_NUMBER set
+ ifeq "true" "$(DISPLAY_BUILD_NUMBER)"
+ BUILD_DISPLAY_ID := $(BUILD_ID).$(BUILD_NUMBER_FROM_FILE) $(BUILD_KEYS)
+ else
+ BUILD_DISPLAY_ID := $(BUILD_ID) $(BUILD_KEYS)
+ endif
+else
+ # Non-user builds should show detailed build information
+ BUILD_DISPLAY_ID := $(build_desc)
+endif
+
+# Accepts a whitespace separated list of product locales such as
+# (en_US en_AU en_GB...) and returns the first locale in the list with
+# underscores replaced with hyphens. In the example above, this will
+# return "en-US".
+define get-default-product-locale
+$(strip $(subst _,-, $(firstword $(1))))
+endef
+
+BUILDINFO_SH := build/tools/buildinfo.sh
+
+# TARGET_BUILD_FLAVOR and ro.build.flavor are used only by the test harness to distinguish builds.
+TARGET_BUILD_FLAVOR := $(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT)
+ifdef SANITIZE_TARGET
+TARGET_BUILD_FLAVOR := $(TARGET_BUILD_FLAVOR)_asan
+endif
+
+ifdef TARGET_SYSTEM_PROP
+system_prop_file := $(TARGET_SYSTEM_PROP)
+else
+system_prop_file := $(wildcard $(TARGET_DEVICE_DIR)/system.prop)
+endif
+$(intermediate_system_build_prop): $(BUILDINFO_SH) $(INTERNAL_BUILD_ID_MAKEFILE) $(BUILD_SYSTEM)/version_defaults.mk $(system_prop_file) $(INSTALLED_ANDROID_INFO_TXT_TARGET)
+ @echo Target buildinfo: $@
+ @mkdir -p $(dir $@)
+ $(hide) echo > $@
+ifneq ($(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_OEM_PROPERTIES),)
+ $(hide) echo "#" >> $@; \
+ echo "# PRODUCT_OEM_PROPERTIES" >> $@; \
+ echo "#" >> $@;
+ $(hide) $(foreach prop,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_OEM_PROPERTIES), \
+ echo "import /oem/oem.prop $(prop)" >> $@;)
+endif
+ $(hide) TARGET_BUILD_TYPE="$(TARGET_BUILD_VARIANT)" \
+ TARGET_BUILD_FLAVOR="$(TARGET_BUILD_FLAVOR)" \
+ TARGET_DEVICE="$(TARGET_DEVICE)" \
+ PRODUCT_NAME="$(TARGET_PRODUCT)" \
+ PRODUCT_BRAND="$(PRODUCT_BRAND)" \
+ PRODUCT_DEFAULT_LOCALE="$(call get-default-product-locale,$(PRODUCT_LOCALES))" \
+ PRODUCT_DEFAULT_WIFI_CHANNELS="$(PRODUCT_DEFAULT_WIFI_CHANNELS)" \
+ PRODUCT_MODEL="$(PRODUCT_MODEL)" \
+ PRODUCT_MANUFACTURER="$(PRODUCT_MANUFACTURER)" \
+ PRIVATE_BUILD_DESC="$(PRIVATE_BUILD_DESC)" \
+ BUILD_ID="$(BUILD_ID)" \
+ BUILD_DISPLAY_ID="$(BUILD_DISPLAY_ID)" \
+ DATE="$(DATE_FROM_FILE)" \
+ BUILD_NUMBER="$(BUILD_NUMBER_FROM_FILE)" \
+ BOARD_BUILD_SYSTEM_ROOT_IMAGE="$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)" \
+ AB_OTA_UPDATER="$(AB_OTA_UPDATER)" \
+ PLATFORM_VERSION="$(PLATFORM_VERSION)" \
+ PLATFORM_SECURITY_PATCH="$(PLATFORM_SECURITY_PATCH)" \
+ PLATFORM_BASE_OS="$(PLATFORM_BASE_OS)" \
+ PLATFORM_SDK_VERSION="$(PLATFORM_SDK_VERSION)" \
+ PLATFORM_PREVIEW_SDK_VERSION="$(PLATFORM_PREVIEW_SDK_VERSION)" \
+ PLATFORM_VERSION_CODENAME="$(PLATFORM_VERSION_CODENAME)" \
+ PLATFORM_VERSION_ALL_CODENAMES="$(PLATFORM_VERSION_ALL_CODENAMES)" \
+ BUILD_VERSION_TAGS="$(BUILD_VERSION_TAGS)" \
+ TARGET_BOOTLOADER_BOARD_NAME="$(TARGET_BOOTLOADER_BOARD_NAME)" \
+ BUILD_FINGERPRINT="$(BUILD_FINGERPRINT_FROM_FILE)" \
+ $(if $(OEM_THUMBPRINT_PROPERTIES),BUILD_THUMBPRINT="$(BUILD_THUMBPRINT)") \
+ TARGET_BOARD_PLATFORM="$(TARGET_BOARD_PLATFORM)" \
+ TARGET_CPU_ABI_LIST="$(TARGET_CPU_ABI_LIST)" \
+ TARGET_CPU_ABI_LIST_32_BIT="$(TARGET_CPU_ABI_LIST_32_BIT)" \
+ TARGET_CPU_ABI_LIST_64_BIT="$(TARGET_CPU_ABI_LIST_64_BIT)" \
+ TARGET_CPU_ABI="$(TARGET_CPU_ABI)" \
+ TARGET_CPU_ABI2="$(TARGET_CPU_ABI2)" \
+ TARGET_AAPT_CHARACTERISTICS="$(TARGET_AAPT_CHARACTERISTICS)" \
+ bash $(BUILDINFO_SH) >> $@
+ $(hide) $(foreach file,$(system_prop_file), \
+ if [ -f "$(file)" ]; then \
+ echo "#" >> $@; \
+ echo Target buildinfo from: "$(file)"; \
+ echo "# from $(file)" >> $@; \
+ echo "#" >> $@; \
+ cat $(file) >> $@; \
+ fi;)
+ $(if $(ADDITIONAL_BUILD_PROPERTIES), \
+ $(hide) echo >> $@; \
+ echo "#" >> $@; \
+ echo "# ADDITIONAL_BUILD_PROPERTIES" >> $@; \
+ echo "#" >> $@; )
+ $(hide) $(foreach line,$(ADDITIONAL_BUILD_PROPERTIES), \
+ echo "$(line)" >> $@;)
+ $(hide) cat $(INSTALLED_ANDROID_INFO_TXT_TARGET) | grep 'require version-' | sed -e 's/require version-/ro.build.expect./g' >> $@
+ $(hide) build/tools/post_process_props.py $@ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_PROPERTY_BLACKLIST)
+
+build_desc :=
+
+ifeq (,$(filter true, $(TARGET_NO_KERNEL) $(TARGET_NO_RECOVERY)))
+INSTALLED_RECOVERYIMAGE_TARGET := $(PRODUCT_OUT)/recovery.img
+else
+INSTALLED_RECOVERYIMAGE_TARGET :=
+endif
+
+$(INSTALLED_BUILD_PROP_TARGET): $(intermediate_system_build_prop) $(INSTALLED_RECOVERYIMAGE_TARGET)
+ @echo "Target build info: $@"
+ $(hide) cat $(intermediate_system_build_prop) > $@
+ifdef INSTALLED_RECOVERYIMAGE_TARGET
+ $(hide) echo ro.expect.recovery_id=`cat $(RECOVERYIMAGE_ID_FILE)` >> $@
+endif
+
+# -----------------------------------------------------------------
+# vendor build.prop
+#
+# For verifying that the vendor build is what we thing it is
+ifdef BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE
+INSTALLED_VENDOR_BUILD_PROP_TARGET := $(TARGET_OUT_VENDOR)/build.prop
+ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_VENDOR_BUILD_PROP_TARGET)
+$(INSTALLED_VENDOR_BUILD_PROP_TARGET): $(INSTALLED_BUILD_PROP_TARGET)
+ @echo Target vendor buildinfo: $@
+ @mkdir -p $(dir $@)
+ $(hide) echo > $@
+ $(hide) echo ro.vendor.build.date=`$(DATE_FROM_FILE)`>>$@
+ $(hide) echo ro.vendor.build.date.utc=`$(DATE_FROM_FILE) +%s`>>$@
+ $(hide) echo ro.vendor.build.fingerprint="$(BUILD_FINGERPRINT_FROM_FILE)">>$@
+endif
+
+# ----------------------------------------------------------------
+
+# -----------------------------------------------------------------
+# sdk-build.prop
+#
+# There are certain things in build.prop that we don't want to
+# ship with the sdk; remove them.
+
+# This must be a list of entire property keys followed by
+# "=" characters, without any internal spaces.
+sdk_build_prop_remove := \
+ ro.build.user= \
+ ro.build.host= \
+ ro.product.brand= \
+ ro.product.manufacturer= \
+ ro.product.device=
+# TODO: Remove this soon-to-be obsolete property
+sdk_build_prop_remove += ro.build.product=
+INSTALLED_SDK_BUILD_PROP_TARGET := $(PRODUCT_OUT)/sdk/sdk-build.prop
+$(INSTALLED_SDK_BUILD_PROP_TARGET): $(INSTALLED_BUILD_PROP_TARGET)
+ @echo SDK buildinfo: $@
+ @mkdir -p $(dir $@)
+ $(hide) grep -v "$(subst $(space),\|,$(strip \
+ $(sdk_build_prop_remove)))" $< > $@.tmp
+ $(hide) for x in $(sdk_build_prop_remove); do \
+ echo "$$x"generic >> $@.tmp; done
+ $(hide) mv $@.tmp $@
+
+# -----------------------------------------------------------------
+# package stats
+PACKAGE_STATS_FILE := $(PRODUCT_OUT)/package-stats.txt
+PACKAGES_TO_STAT := \
+ $(sort $(filter $(TARGET_OUT)/% $(TARGET_OUT_DATA)/%, \
+ $(filter %.jar %.apk, $(ALL_DEFAULT_INSTALLED_MODULES))))
+$(PACKAGE_STATS_FILE): $(PACKAGES_TO_STAT)
+ @echo Package stats: $@
+ @mkdir -p $(dir $@)
+ $(hide) rm -f $@
+ifeq ($(PACKAGES_TO_STAT),)
+# Create empty package stats file if target builds no jar(s) or apk(s).
+ $(hide) touch $@
+else
+ $(hide) build/tools/dump-package-stats $^ > $@
+endif
+
+.PHONY: package-stats
+package-stats: $(PACKAGE_STATS_FILE)
+
+# -----------------------------------------------------------------
+# Cert-to-package mapping. Used by the post-build signing tools.
+# Use a macro to add newline to each echo command
+define _apkcerts_echo_with_newline
+$(hide) echo $(1)
+
+endef
+
+name := $(TARGET_PRODUCT)
+ifeq ($(TARGET_BUILD_TYPE),debug)
+ name := $(name)_debug
+endif
+name := $(name)-apkcerts-$(FILE_NAME_TAG)
+intermediates := \
+ $(call intermediates-dir-for,PACKAGING,apkcerts)
+APKCERTS_FILE := $(intermediates)/$(name).txt
+# We don't need to really build all the modules.
+# TODO: rebuild APKCERTS_FILE if any app change its cert.
+$(APKCERTS_FILE):
+ @echo APK certs list: $@
+ @mkdir -p $(dir $@)
+ @rm -f $@
+ $(foreach p,$(PACKAGES),\
+ $(if $(PACKAGES.$(p).EXTERNAL_KEY),\
+ $(call _apkcerts_echo_with_newline,\
+ 'name="$(p).apk" certificate="EXTERNAL" \
+ private_key=""' >> $@),\
+ $(call _apkcerts_echo_with_newline,\
+ 'name="$(p).apk" certificate="$(PACKAGES.$(p).CERTIFICATE)" \
+ private_key="$(PACKAGES.$(p).PRIVATE_KEY)"' >> $@)))
+ # In case value of PACKAGES is empty.
+ $(hide) touch $@
+
+.PHONY: apkcerts-list
+apkcerts-list: $(APKCERTS_FILE)
+
+ifneq (,$(TARGET_BUILD_APPS))
+ $(call dist-for-goals, apps_only, $(APKCERTS_FILE):apkcerts.txt)
+endif
+
+# -----------------------------------------------------------------
+
+# The dev key is used to sign this package, and as the key required
+# for future OTA packages installed by this system. Actual product
+# deliverables will be re-signed by hand. We expect this file to
+# exist with the suffixes ".x509.pem" and ".pk8".
+DEFAULT_KEY_CERT_PAIR := $(DEFAULT_SYSTEM_DEV_CERTIFICATE)
+
+
+# Rules that need to be present for the all targets, even
+# if they don't do anything.
+.PHONY: systemimage
+systemimage:
+
+# -----------------------------------------------------------------
+
+.PHONY: event-log-tags
+
+# Produce an event logs tag file for everything we know about, in order
+# to properly allocate numbers. Then produce a file that's filtered
+# for what's going to be installed.
+
+all_event_log_tags_file := $(TARGET_OUT_COMMON_INTERMEDIATES)/all-event-log-tags.txt
+
+event_log_tags_file := $(TARGET_OUT)/etc/event-log-tags
+
+# Include tags from all packages that we know about
+all_event_log_tags_src := \
+ $(sort $(foreach m, $(ALL_MODULES), $(ALL_MODULES.$(m).EVENT_LOG_TAGS)))
+
+# PDK builds will already have a full list of tags that needs to get merged
+# in with the ones from source
+pdk_fusion_log_tags_file := $(patsubst $(PRODUCT_OUT)/%,$(_pdk_fusion_intermediates)/%,$(filter $(event_log_tags_file),$(ALL_PDK_FUSION_FILES)))
+
+$(all_event_log_tags_file): PRIVATE_SRC_FILES := $(all_event_log_tags_src) $(pdk_fusion_log_tags_file)
+$(all_event_log_tags_file): $(all_event_log_tags_src) $(pdk_fusion_log_tags_file)
+ $(hide) mkdir -p $(dir $@)
+ $(hide) build/tools/merge-event-log-tags.py -o $@ $(PRIVATE_SRC_FILES)
+
+# Include tags from all packages included in this product, plus all
+# tags that are part of the system (ie, not in a vendor/ or device/
+# directory).
+event_log_tags_src := \
+ $(sort $(foreach m,\
+ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_PACKAGES) \
+ $(call module-names-for-tag-list,user), \
+ $(ALL_MODULES.$(m).EVENT_LOG_TAGS)) \
+ $(filter-out vendor/% device/% out/%,$(all_event_log_tags_src)))
+
+$(event_log_tags_file): PRIVATE_SRC_FILES := $(event_log_tags_src) $(pdk_fusion_log_tags_file)
+$(event_log_tags_file): PRIVATE_MERGED_FILE := $(all_event_log_tags_file)
+$(event_log_tags_file): $(event_log_tags_src) $(all_event_log_tags_file) $(pdk_fusion_log_tags_file)
+ $(hide) mkdir -p $(dir $@)
+ $(hide) build/tools/merge-event-log-tags.py -o $@ -m $(PRIVATE_MERGED_FILE) $(PRIVATE_SRC_FILES)
+
+event-log-tags: $(event_log_tags_file)
+
+ALL_DEFAULT_INSTALLED_MODULES += $(event_log_tags_file)
+
+
+# #################################################################
+# Targets for boot/OS images
+# #################################################################
+ifneq ($(strip $(TARGET_NO_BOOTLOADER)),true)
+ INSTALLED_BOOTLOADER_MODULE := $(PRODUCT_OUT)/bootloader
+ ifeq ($(strip $(TARGET_BOOTLOADER_IS_2ND)),true)
+ INSTALLED_2NDBOOTLOADER_TARGET := $(PRODUCT_OUT)/2ndbootloader
+ else
+ INSTALLED_2NDBOOTLOADER_TARGET :=
+ endif
+else
+ INSTALLED_BOOTLOADER_MODULE :=
+ INSTALLED_2NDBOOTLOADER_TARGET :=
+endif # TARGET_NO_BOOTLOADER
+ifneq ($(strip $(TARGET_NO_KERNEL)),true)
+ INSTALLED_KERNEL_TARGET := $(PRODUCT_OUT)/kernel
+else
+ INSTALLED_KERNEL_TARGET :=
+endif
+
+# -----------------------------------------------------------------
+# the ramdisk
+INTERNAL_RAMDISK_FILES := $(filter $(TARGET_ROOT_OUT)/%, \
+ $(ALL_PREBUILT) \
+ $(ALL_GENERATED_SOURCES) \
+ $(ALL_DEFAULT_INSTALLED_MODULES))
+
+BUILT_RAMDISK_TARGET := $(PRODUCT_OUT)/ramdisk.img
+
+# We just build this directly to the install location.
+INSTALLED_RAMDISK_TARGET := $(BUILT_RAMDISK_TARGET)
+$(INSTALLED_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_RAMDISK_FILES) | $(MINIGZIP)
+ $(call pretty,"Target ram disk: $@")
+ $(hide) $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_ROOT_OUT) | $(MINIGZIP) > $@
+
+.PHONY: ramdisk-nodeps
+ramdisk-nodeps: $(MKBOOTFS) | $(MINIGZIP)
+ @echo "make $@: ignoring dependencies"
+ $(hide) $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_ROOT_OUT) | $(MINIGZIP) > $(INSTALLED_RAMDISK_TARGET)
+
+ifneq ($(strip $(TARGET_NO_KERNEL)),true)
+
+# -----------------------------------------------------------------
+# the boot image, which is a collection of other images.
+INTERNAL_BOOTIMAGE_ARGS := \
+ $(addprefix --second ,$(INSTALLED_2NDBOOTLOADER_TARGET)) \
+ --kernel $(INSTALLED_KERNEL_TARGET)
+
+ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
+INTERNAL_BOOTIMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
+endif
+
+INTERNAL_BOOTIMAGE_FILES := $(filter-out --%,$(INTERNAL_BOOTIMAGE_ARGS))
+
+BOARD_KERNEL_BASE := $(strip $(BOARD_KERNEL_BASE))
+ifdef BOARD_KERNEL_BASE
+ INTERNAL_BOOTIMAGE_ARGS += --base $(BOARD_KERNEL_BASE)
+endif
+
+BOARD_KERNEL_PAGESIZE := $(strip $(BOARD_KERNEL_PAGESIZE))
+ifdef BOARD_KERNEL_PAGESIZE
+ INTERNAL_BOOTIMAGE_ARGS += --pagesize $(BOARD_KERNEL_PAGESIZE)
+endif
+
+ifeq ($(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY),true)
+ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
+VERITY_KEYID := veritykeyid=id:`openssl x509 -in $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem -text \
+ | grep keyid | sed 's/://g' | tr -d '[:space:]' | tr '[:upper:]' '[:lower:]' | sed 's/keyid//g'`
+endif
+endif
+
+BOARD_KERNEL_CMDLINE := $(strip $(BOARD_KERNEL_CMDLINE) buildvariant=$(TARGET_BUILD_VARIANT) $(VERITY_KEYID))
+ifdef BOARD_KERNEL_CMDLINE
+INTERNAL_BOOTIMAGE_ARGS += --cmdline "$(BOARD_KERNEL_CMDLINE)"
+endif
+
+INTERNAL_MKBOOTIMG_VERSION_ARGS := \
+ --os_version $(PLATFORM_VERSION) \
+ --os_patch_level $(PLATFORM_SECURITY_PATCH)
+
+INSTALLED_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img
+
+# BOARD_USES_RECOVERY_AS_BOOT = true must have BOARD_BUILD_SYSTEM_ROOT_IMAGE = true.
+ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
+ $(error BOARD_BUILD_SYSTEM_ROOT_IMAGE must be enabled for BOARD_USES_RECOVERY_AS_BOOT.)
+endif
+endif
+
+# We build recovery as boot image if BOARD_USES_RECOVERY_AS_BOOT is true.
+ifneq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+ifeq ($(TARGET_BOOTIMAGE_USE_EXT2),true)
+$(error TARGET_BOOTIMAGE_USE_EXT2 is not supported anymore)
+else ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER)) # TARGET_BOOTIMAGE_USE_EXT2 != true
+
+$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(BOOT_SIGNER)
+ $(call pretty,"Target boot image: $@")
+ $(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@
+ $(BOOT_SIGNER) /boot $@ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $@
+ $(hide) $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE))
+
+.PHONY: bootimage-nodeps
+bootimage-nodeps: $(MKBOOTIMG) $(BOOT_SIGNER)
+ @echo "make $@: ignoring dependencies"
+ $(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(INSTALLED_BOOTIMAGE_TARGET)
+ $(BOOT_SIGNER) /boot $(INSTALLED_BOOTIMAGE_TARGET) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $(INSTALLED_BOOTIMAGE_TARGET)
+ $(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE))
+
+else ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)) # PRODUCT_SUPPORTS_BOOT_SIGNER != true
+
+$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(VBOOT_SIGNER)
+ $(call pretty,"Target boot image: $@")
+ $(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@.unsigned
+ $(VBOOT_SIGNER) $(FUTILITY) $@.unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $@.keyblock $@
+ $(hide) $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE))
+
+.PHONY: bootimage-nodeps
+bootimage-nodeps: $(MKBOOTIMG) $(VBOOT_SIGNER)
+ @echo "make $@: ignoring dependencies"
+ $(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(INSTALLED_BOOTIMAGE_TARGET).unsigned
+ $(VBOOT_SIGNER) $(FUTILITY) $(INSTALLED_BOOTIMAGE_TARGET).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(INSTALLED_BOOTIMAGE_TARGET).keyblock $(INSTALLED_BOOTIMAGE_TARGET)
+ $(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE))
+
+else # PRODUCT_SUPPORTS_VBOOT != true
+
+$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES)
+ $(call pretty,"Target boot image: $@")
+ $(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@
+ $(hide) $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE))
+
+.PHONY: bootimage-nodeps
+bootimage-nodeps: $(MKBOOTIMG)
+ @echo "make $@: ignoring dependencies"
+ $(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(INSTALLED_BOOTIMAGE_TARGET)
+ $(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE))
+
+endif # TARGET_BOOTIMAGE_USE_EXT2
+endif # BOARD_USES_RECOVERY_AS_BOOT
+
+else # TARGET_NO_KERNEL
+# HACK: The top-level targets depend on the bootimage. Not all targets
+# can produce a bootimage, though, and emulator targets need the ramdisk
+# instead. Fake it out by calling the ramdisk the bootimage.
+# TODO: make the emulator use bootimages, and make mkbootimg accept
+# kernel-less inputs.
+INSTALLED_BOOTIMAGE_TARGET := $(INSTALLED_RAMDISK_TARGET)
+endif
+
+# -----------------------------------------------------------------
+# NOTICE files
+#
+# We are required to publish the licenses for all code under BSD, GPL and
+# Apache licenses (and possibly other more exotic ones as well). We err on the
+# side of caution, so the licenses for other third-party code are included here
+# too.
+#
+# This needs to be before the systemimage rules, because it adds to
+# ALL_DEFAULT_INSTALLED_MODULES, which those use to pick which files
+# go into the systemimage.
+
+.PHONY: notice_files
+
+# Create the rule to combine the files into text and html forms
+# $(1) - Plain text output file
+# $(2) - HTML output file
+# $(3) - File title
+# $(4) - Directory to use. Notice files are all $(4)/src. Other
+# directories in there will be used for scratch
+# $(5) - Dependencies for the output files
+#
+# The algorithm here is that we go collect a hash for each of the notice
+# files and write the names of the files that match that hash. Then
+# to generate the real files, we go print out all of the files and their
+# hashes.
+#
+# These rules are fairly complex, so they depend on this makefile so if
+# it changes, they'll run again.
+#
+# TODO: We could clean this up so that we just record the locations of the
+# original notice files instead of making rules to copy them somwehere.
+# Then we could traverse that without quite as much bash drama.
+define combine-notice-files
+$(1) $(2): PRIVATE_MESSAGE := $(3)
+$(1) $(2): PRIVATE_DIR := $(4)
+$(1) : $(2)
+$(2) : $(5) $(BUILD_SYSTEM)/Makefile build/tools/generate-notice-files.py
+ build/tools/generate-notice-files.py $(1) $(2) $$(PRIVATE_MESSAGE) $$(PRIVATE_DIR)/src
+notice_files: $(1) $(2)
+endef
+
+# TODO These intermediate NOTICE.txt/NOTICE.html files should go into
+# TARGET_OUT_NOTICE_FILES now that the notice files are gathered from
+# the src subdirectory.
+
+target_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE.txt
+target_notice_file_html := $(TARGET_OUT_INTERMEDIATES)/NOTICE.html
+target_notice_file_html_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE.html.gz
+tools_notice_file_txt := $(HOST_OUT_INTERMEDIATES)/NOTICE.txt
+tools_notice_file_html := $(HOST_OUT_INTERMEDIATES)/NOTICE.html
+
+ifndef TARGET_BUILD_APPS
+kernel_notice_file := $(TARGET_OUT_NOTICE_FILES)/src/kernel.txt
+pdk_fusion_notice_files := $(filter $(TARGET_OUT_NOTICE_FILES)/%, $(ALL_PDK_FUSION_FILES))
+
+$(eval $(call combine-notice-files, \
+ $(target_notice_file_txt), \
+ $(target_notice_file_html), \
+ "Notices for files contained in the filesystem images in this directory:", \
+ $(TARGET_OUT_NOTICE_FILES), \
+ $(ALL_DEFAULT_INSTALLED_MODULES) $(kernel_notice_file) $(pdk_fusion_notice_files)))
+
+$(eval $(call combine-notice-files, \
+ $(tools_notice_file_txt), \
+ $(tools_notice_file_html), \
+ "Notices for files contained in the tools directory:", \
+ $(HOST_OUT_NOTICE_FILES), \
+ $(ALL_DEFAULT_INSTALLED_MODULES)))
+
+# Install the html file at /system/etc/NOTICE.html.gz.
+# This is not ideal, but this is very late in the game, after a lot of
+# the module processing has already been done -- in fact, we used the
+# fact that all that has been done to get the list of modules that we
+# need notice files for.
+$(target_notice_file_html_gz): $(target_notice_file_html) | $(MINIGZIP)
+ $(hide) $(MINIGZIP) -9 < $< > $@
+installed_notice_html_gz := $(TARGET_OUT)/etc/NOTICE.html.gz
+$(installed_notice_html_gz): $(target_notice_file_html_gz) | $(ACP)
+ $(copy-file-to-target)
+
+# if we've been run my mm, mmm, etc, don't reinstall this every time
+ifeq ($(ONE_SHOT_MAKEFILE),)
+ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_gz)
+endif
+endif # TARGET_BUILD_APPS
+
+# The kernel isn't really a module, so to get its module file in there, we
+# make the target NOTICE files depend on this particular file too, which will
+# then be in the right directory for the find in combine-notice-files to work.
+$(kernel_notice_file): \
+ $(BUILD_SYSTEM)/LINUX_KERNEL_COPYING \
+ | $(ACP)
+ @echo Copying: $@
+ $(hide) mkdir -p $(dir $@)
+ $(hide) $(ACP) $< $@
+
+
+# -----------------------------------------------------------------
+# Build a keystore with the authorized keys in it, used to verify the
+# authenticity of downloaded OTA packages.
+#
+# This rule adds to ALL_DEFAULT_INSTALLED_MODULES, so it needs to come
+# before the rules that use that variable to build the image.
+ALL_DEFAULT_INSTALLED_MODULES += $(TARGET_OUT_ETC)/security/otacerts.zip
+$(TARGET_OUT_ETC)/security/otacerts.zip: KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
+$(TARGET_OUT_ETC)/security/otacerts.zip: $(addsuffix .x509.pem,$(DEFAULT_KEY_CERT_PAIR)) | $(ZIPTIME)
+ $(hide) rm -f $@
+ $(hide) mkdir -p $(dir $@)
+ $(hide) zip -qjX $@ $<
+ $(remove-timestamps-from-package)
+
+# Carry the public key for update_engine if it's a non-Brillo target that
+# uses the AB updater. We use the same key as otacerts but in RSA public key
+# format.
+ifeq ($(AB_OTA_UPDATER),true)
+ifeq ($(BRILLO),)
+ALL_DEFAULT_INSTALLED_MODULES += $(TARGET_OUT_ETC)/update_engine/update-payload-key.pub.pem
+$(TARGET_OUT_ETC)/update_engine/update-payload-key.pub.pem: $(addsuffix .x509.pem,$(DEFAULT_KEY_CERT_PAIR))
+ $(hide) rm -f $@
+ $(hide) mkdir -p $(dir $@)
+ $(hide) openssl x509 -pubkey -noout -in $< > $@
+
+ALL_DEFAULT_INSTALLED_MODULES += $(TARGET_RECOVERY_ROOT_OUT)/etc/update_engine/update-payload-key.pub.pem
+$(TARGET_RECOVERY_ROOT_OUT)/etc/update_engine/update-payload-key.pub.pem: $(TARGET_OUT_ETC)/update_engine/update-payload-key.pub.pem
+ $(hide) cp -f $< $@
+endif
+endif
+
+.PHONY: otacerts
+otacerts: $(TARGET_OUT_ETC)/security/otacerts.zip
+
+
+# #################################################################
+# Targets for user images
+# #################################################################
+
+INTERNAL_USERIMAGES_EXT_VARIANT :=
+ifeq ($(TARGET_USERIMAGES_USE_EXT2),true)
+INTERNAL_USERIMAGES_USE_EXT := true
+INTERNAL_USERIMAGES_EXT_VARIANT := ext2
+else
+ifeq ($(TARGET_USERIMAGES_USE_EXT3),true)
+INTERNAL_USERIMAGES_USE_EXT := true
+INTERNAL_USERIMAGES_EXT_VARIANT := ext3
+else
+ifeq ($(TARGET_USERIMAGES_USE_EXT4),true)
+INTERNAL_USERIMAGES_USE_EXT := true
+INTERNAL_USERIMAGES_EXT_VARIANT := ext4
+endif
+endif
+endif
+
+# These options tell the recovery updater/installer how to mount the partitions writebale.
+# =[|]...
+# fstype_opts := [,]...
+# opt := [=]
+# The following worked on Nexus devices with Kernel 3.1, 3.4, 3.10
+DEFAULT_TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS := ext4=max_batch_time=0,commit=1,data=ordered,barrier=1,errors=panic,nodelalloc
+
+ifneq (true,$(TARGET_USERIMAGES_SPARSE_EXT_DISABLED))
+ INTERNAL_USERIMAGES_SPARSE_EXT_FLAG := -s
+endif
+
+ifeq ($(INTERNAL_USERIMAGES_USE_EXT),true)
+INTERNAL_USERIMAGES_DEPS := $(SIMG2IMG)
+INTERNAL_USERIMAGES_DEPS += $(MKEXTUSERIMG) $(MAKE_EXT4FS) $(E2FSCK)
+ifeq ($(TARGET_USERIMAGES_USE_F2FS),true)
+INTERNAL_USERIMAGES_DEPS += $(MKF2FSUSERIMG) $(MAKE_F2FS)
+endif
+endif
+
+ifneq (true,$(TARGET_USERIMAGES_SPARSE_SQUASHFS_DISABLED))
+ INTERNAL_USERIMAGES_SPARSE_SQUASHFS_FLAG := -s
+endif
+ifneq ($(filter $(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE) $(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE),squashfs),)
+INTERNAL_USERIMAGES_DEPS += $(MAKE_SQUASHFS) $(MKSQUASHFSUSERIMG) $(IMG2SIMG)
+endif
+
+INTERNAL_USERIMAGES_BINARY_PATHS := $(sort $(dir $(INTERNAL_USERIMAGES_DEPS)))
+
+ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY))
+INTERNAL_USERIMAGES_DEPS += $(BUILD_VERITY_TREE) $(APPEND2SIMG) $(VERITY_SIGNER)
+ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY_FEC))
+INTERNAL_USERIMAGES_DEPS += $(FEC)
+endif
+endif
+
+SELINUX_FC := $(TARGET_ROOT_OUT)/file_contexts.bin
+INTERNAL_USERIMAGES_DEPS += $(SELINUX_FC)
+
+INTERNAL_USERIMAGES_DEPS += $(BLK_ALLOC_TO_BASE_FS)
+
+# $(1): the path of the output dictionary file
+# $(2): additional "key=value" pairs to append to the dictionary file.
+define generate-userimage-prop-dictionary
+$(if $(INTERNAL_USERIMAGES_EXT_VARIANT),$(hide) echo "fs_type=$(INTERNAL_USERIMAGES_EXT_VARIANT)" >> $(1))
+$(if $(BOARD_SYSTEMIMAGE_PARTITION_SIZE),$(hide) echo "system_size=$(BOARD_SYSTEMIMAGE_PARTITION_SIZE)" >> $(1))
+$(if $(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "system_fs_type=$(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
+$(if $(BOARD_SYSTEMIMAGE_JOURNAL_SIZE),$(hide) echo "system_journal_size=$(BOARD_SYSTEMIMAGE_JOURNAL_SIZE)" >> $(1))
+$(if $(BOARD_HAS_EXT4_RESERVED_BLOCKS),$(hide) echo "has_ext4_reserved_blocks=$(BOARD_HAS_EXT4_RESERVED_BLOCKS)" >> $(1))
+$(if $(BOARD_SYSTEMIMAGE_SQUASHFS_COMPRESSOR),$(hide) echo "system_squashfs_compressor=$(BOARD_SYSTEMIMAGE_SQUASHFS_COMPRESSOR)" >> $(1))
+$(if $(BOARD_SYSTEMIMAGE_SQUASHFS_COMPRESSOR_OPT),$(hide) echo "system_squashfs_compressor_opt=$(BOARD_SYSTEMIMAGE_SQUASHFS_COMPRESSOR_OPT)" >> $(1))
+$(if $(BOARD_SYSTEMIMAGE_SQUASHFS_BLOCK_SIZE),$(hide) echo "system_squashfs_block_size=$(BOARD_SYSTEMIMAGE_SQUASHFS_BLOCK_SIZE)" >> $(1))
+$(if $(BOARD_SYSTEMIMAGE_SQUASHFS_DISABLE_4K_ALIGN),$(hide) echo "system_squashfs_disable_4k_align=$(BOARD_SYSTEMIMAGE_SQUASHFS_DISABLE_4K_ALIGN)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH),$(hide) echo "system_base_fs_file=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH)" >> $(1))
+$(if $(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "userdata_fs_type=$(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
+$(if $(BOARD_USERDATAIMAGE_PARTITION_SIZE),$(hide) echo "userdata_size=$(BOARD_USERDATAIMAGE_PARTITION_SIZE)" >> $(1))
+$(if $(BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "cache_fs_type=$(BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
+$(if $(BOARD_CACHEIMAGE_PARTITION_SIZE),$(hide) echo "cache_size=$(BOARD_CACHEIMAGE_PARTITION_SIZE)" >> $(1))
+$(if $(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "vendor_fs_type=$(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
+$(if $(BOARD_VENDORIMAGE_PARTITION_SIZE),$(hide) echo "vendor_size=$(BOARD_VENDORIMAGE_PARTITION_SIZE)" >> $(1))
+$(if $(BOARD_VENDORIMAGE_JOURNAL_SIZE),$(hide) echo "vendor_journal_size=$(BOARD_VENDORIMAGE_JOURNAL_SIZE)" >> $(1))
+$(if $(BOARD_VENDORIMAGE_SQUASHFS_COMPRESSOR),$(hide) echo "vendor_squashfs_compressor=$(BOARD_VENDORIMAGE_SQUASHFS_COMPRESSOR)" >> $(1))
+$(if $(BOARD_VENDORIMAGE_SQUASHFS_COMPRESSOR_OPT),$(hide) echo "vendor_squashfs_compressor_opt=$(BOARD_VENDORIMAGE_SQUASHFS_COMPRESSOR_OPT)" >> $(1))
+$(if $(BOARD_VENDORIMAGE_SQUASHFS_BLOCK_SIZE),$(hide) echo "vendor_squashfs_block_size=$(BOARD_VENDORIMAGE_SQUASHFS_BLOCK_SIZE)" >> $(1))
+$(if $(BOARD_VENDORIMAGE_SQUASHFS_DISABLE_4K_ALIGN),$(hide) echo "vendor_squashfs_disable_4k_align=$(BOARD_VENDORIMAGE_SQUASHFS_DISABLE_4K_ALIGN)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH),$(hide) echo "vendor_base_fs_file=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH)" >> $(1))
+$(if $(BOARD_OEMIMAGE_PARTITION_SIZE),$(hide) echo "oem_size=$(BOARD_OEMIMAGE_PARTITION_SIZE)" >> $(1))
+$(if $(BOARD_OEMIMAGE_JOURNAL_SIZE),$(hide) echo "oem_journal_size=$(BOARD_OEMIMAGE_JOURNAL_SIZE)" >> $(1))
+$(if $(INTERNAL_USERIMAGES_SPARSE_EXT_FLAG),$(hide) echo "extfs_sparse_flag=$(INTERNAL_USERIMAGES_SPARSE_EXT_FLAG)" >> $(1))
+$(if $(INTERNAL_USERIMAGES_SPARSE_SQUASHFS_FLAG),$(hide) echo "squashfs_sparse_flag=$(INTERNAL_USERIMAGES_SPARSE_SQUASHFS_FLAG)" >> $(1))
+$(hide) echo "selinux_fc=$(SELINUX_FC)" >> $(1)
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER),$(hide) echo "boot_signer=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY),$(hide) echo "verity=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY),$(hide) echo "verity_key=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY),$(hide) echo "verity_signer_cmd=$(notdir $(VERITY_SIGNER))" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY_FEC),$(hide) echo "verity_fec=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY_FEC)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_VERITY_PARTITION),$(hide) echo "system_verity_block_device=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_VERITY_PARTITION)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_VERITY_PARTITION),$(hide) echo "vendor_verity_block_device=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_VERITY_PARTITION)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot_key=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot_subkey=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "futility=$(FUTILITY)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot_signer_cmd=$(VBOOT_SIGNER)" >> $(1))
+$(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)),\
+ $(hide) echo "recovery_as_boot=true" >> $(1))
+$(if $(filter true,$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)),\
+ $(hide) echo "system_root_image=true" >> $(1);\
+ echo "ramdisk_dir=$(TARGET_ROOT_OUT)" >> $(1))
+$(if $(2),$(hide) $(foreach kv,$(2),echo "$(kv)" >> $(1);))
+endef
+
+# -----------------------------------------------------------------
+# Recovery image
+
+# Recovery image exists if we are building recovery, or building recovery as boot.
+ifneq (,$(INSTALLED_RECOVERYIMAGE_TARGET)$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)))
+
+INTERNAL_RECOVERYIMAGE_FILES := $(filter $(TARGET_RECOVERY_OUT)/%, \
+ $(ALL_DEFAULT_INSTALLED_MODULES))
+
+recovery_initrc := $(call include-path-for, recovery)/etc/init.rc
+recovery_sepolicy := $(call intermediates-dir-for,ETC,sepolicy.recovery)/sepolicy.recovery
+recovery_kernel := $(INSTALLED_KERNEL_TARGET) # same as a non-recovery system
+recovery_ramdisk := $(PRODUCT_OUT)/ramdisk-recovery.img
+recovery_build_prop := $(intermediate_system_build_prop)
+recovery_resources_common := $(call include-path-for, recovery)/res
+
+# Set recovery_density to the density bucket of the device.
+recovery_density := unknown
+ifneq (,$(PRODUCT_AAPT_PREF_CONFIG))
+# If PRODUCT_AAPT_PREF_CONFIG includes a dpi bucket, then use that value.
+recovery_density := $(filter %dpi,$(PRODUCT_AAPT_PREF_CONFIG))
+else
+# Otherwise, use the default medium density.
+recovery_densities := mdpi
+endif
+
+ifneq (,$(wildcard $(recovery_resources_common)-$(recovery_density)))
+recovery_resources_common := $(recovery_resources_common)-$(recovery_density)
+else
+recovery_resources_common := $(recovery_resources_common)-xhdpi
+endif
+
+# Select the 18x32 font on high-density devices (xhdpi and up); and
+# the 12x22 font on other devices. Note that the font selected here
+# can be overridden for a particular device by putting a font.png in
+# its private recovery resources.
+
+ifneq (,$(filter xxxhdpi 560dpi xxhdpi 400dpi xhdpi,$(recovery_density)))
+recovery_font := $(call include-path-for, recovery)/fonts/18x32.png
+else
+recovery_font := $(call include-path-for, recovery)/fonts/12x22.png
+endif
+
+ifndef TARGET_PRIVATE_RES_DIRS
+TARGET_PRIVATE_RES_DIRS := $(wildcard $(TARGET_DEVICE_DIR)/recovery/res)
+endif
+recovery_resource_deps := $(shell find $(recovery_resources_common) \
+ $(TARGET_PRIVATE_RES_DIRS) -type f)
+ifdef TARGET_RECOVERY_FSTAB
+recovery_fstab := $(TARGET_RECOVERY_FSTAB)
+else
+recovery_fstab := $(strip $(wildcard $(TARGET_DEVICE_DIR)/recovery.fstab))
+endif
+ifdef TARGET_RECOVERY_WIPE
+recovery_wipe := $(TARGET_RECOVERY_WIPE)
+else
+recovery_wipe :=
+endif
+
+# Prior to A/B update, we used to have:
+# boot.img + recovery-from-boot.p + recovery-resource.dat = recovery.img.
+# recovery-resource.dat is needed only if we carry a patch of the boot and
+# recovery images and invoke install-recovery.sh on the first boot post an
+# OTA update.
+#
+# We no longer need that if one of the following conditions holds:
+# a) We carry a full copy of the recovery image
+# (BOARD_USES_FULL_RECOVERY_IMAGE = true);
+# b) We build a single image that contains boot and recovery both
+# (BOARD_USES_RECOVERY_AS_BOOT = true).
+
+ifeq (,$(filter true, $(BOARD_USES_FULL_RECOVERY_IMAGE) $(BOARD_USES_RECOVERY_AS_BOOT)))
+# Named '.dat' so we don't attempt to use imgdiff for patching it.
+RECOVERY_RESOURCE_ZIP := $(TARGET_OUT)/etc/recovery-resource.dat
+else
+RECOVERY_RESOURCE_ZIP :=
+endif
+
+ifeq ($(TARGET_PRIVATE_RES_DIRS),)
+ $(info No private recovery resources for TARGET_DEVICE $(TARGET_DEVICE))
+endif
+
+ifeq ($(recovery_fstab),)
+ $(info No recovery.fstab for TARGET_DEVICE $(TARGET_DEVICE))
+endif
+
+INTERNAL_RECOVERYIMAGE_ARGS := \
+ $(addprefix --second ,$(INSTALLED_2NDBOOTLOADER_TARGET)) \
+ --kernel $(recovery_kernel) \
+ --ramdisk $(recovery_ramdisk)
+
+# Assumes this has already been stripped
+ifdef BOARD_KERNEL_CMDLINE
+ INTERNAL_RECOVERYIMAGE_ARGS += --cmdline "$(BOARD_KERNEL_CMDLINE)"
+endif
+ifdef BOARD_KERNEL_BASE
+ INTERNAL_RECOVERYIMAGE_ARGS += --base $(BOARD_KERNEL_BASE)
+endif
+BOARD_KERNEL_PAGESIZE := $(strip $(BOARD_KERNEL_PAGESIZE))
+ifdef BOARD_KERNEL_PAGESIZE
+ INTERNAL_RECOVERYIMAGE_ARGS += --pagesize $(BOARD_KERNEL_PAGESIZE)
+endif
+
+# Keys authorized to sign OTA packages this build will accept. The
+# build always uses dev-keys for this; release packaging tools will
+# substitute other keys for this one.
+OTA_PUBLIC_KEYS := $(DEFAULT_SYSTEM_DEV_CERTIFICATE).x509.pem
+
+# Generate a file containing the keys that will be read by the
+# recovery binary.
+RECOVERY_INSTALL_OTA_KEYS := \
+ $(call intermediates-dir-for,PACKAGING,ota_keys)/keys
+DUMPKEY_JAR := $(HOST_OUT_JAVA_LIBRARIES)/dumpkey.jar
+$(RECOVERY_INSTALL_OTA_KEYS): PRIVATE_OTA_PUBLIC_KEYS := $(OTA_PUBLIC_KEYS)
+$(RECOVERY_INSTALL_OTA_KEYS): extra_keys := $(patsubst %,%.x509.pem,$(PRODUCT_EXTRA_RECOVERY_KEYS))
+$(RECOVERY_INSTALL_OTA_KEYS): $(OTA_PUBLIC_KEYS) $(DUMPKEY_JAR) $(extra_keys)
+ @echo "DumpPublicKey: $@ <= $(PRIVATE_OTA_PUBLIC_KEYS) $(extra_keys)"
+ @rm -rf $@
+ @mkdir -p $(dir $@)
+ java -jar $(DUMPKEY_JAR) $(PRIVATE_OTA_PUBLIC_KEYS) $(extra_keys) > $@
+
+RECOVERYIMAGE_ID_FILE := $(PRODUCT_OUT)/recovery.id
+# $(1): output file
+define build-recoveryimage-target
+ @echo ----- Making recovery image ------
+ $(hide) mkdir -p $(TARGET_RECOVERY_OUT)
+ $(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/etc $(TARGET_RECOVERY_ROOT_OUT)/sdcard $(TARGET_RECOVERY_ROOT_OUT)/tmp
+ @echo Copying baseline ramdisk...
+ $(hide) rsync -a --exclude=etc --exclude=sdcard $(IGNORE_CACHE_LINK) $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT) # "cp -Rf" fails to overwrite broken symlinks on Mac.
+ @echo Modifying ramdisk contents...
+ $(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/init*.rc
+ $(hide) cp -f $(recovery_initrc) $(TARGET_RECOVERY_ROOT_OUT)/
+ $(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/sepolicy
+ $(hide) cp -f $(recovery_sepolicy) $(TARGET_RECOVERY_ROOT_OUT)/sepolicy
+ $(hide) cp $(TARGET_ROOT_OUT)/init.recovery.*.rc $(TARGET_RECOVERY_ROOT_OUT)/ || true # Ignore error when the src file doesn't exist.
+ $(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/res
+ $(hide) rm -rf $(TARGET_RECOVERY_ROOT_OUT)/res/*
+ $(hide) cp -rf $(recovery_resources_common)/* $(TARGET_RECOVERY_ROOT_OUT)/res
+ $(hide) cp -f $(recovery_font) $(TARGET_RECOVERY_ROOT_OUT)/res/images/font.png
+ $(hide) $(foreach item,$(TARGET_PRIVATE_RES_DIRS), \
+ cp -rf $(item) $(TARGET_RECOVERY_ROOT_OUT)/$(newline))
+ $(hide) $(foreach item,$(recovery_fstab), \
+ cp -f $(item) $(TARGET_RECOVERY_ROOT_OUT)/etc/recovery.fstab)
+ $(if $(strip $(recovery_wipe)), \
+ $(hide) cp -f $(recovery_wipe) $(TARGET_RECOVERY_ROOT_OUT)/etc/recovery.wipe)
+ $(hide) cp $(RECOVERY_INSTALL_OTA_KEYS) $(TARGET_RECOVERY_ROOT_OUT)/res/keys
+ $(hide) cat $(INSTALLED_DEFAULT_PROP_TARGET) $(recovery_build_prop) \
+ > $(TARGET_RECOVERY_ROOT_OUT)/default.prop
+ $(BOARD_RECOVERY_IMAGE_PREPARE)
+ $(if $(filter true,$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)), \
+ $(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/system_root; \
+ rm -rf $(TARGET_RECOVERY_ROOT_OUT)/system; \
+ ln -sf /system_root/system $(TARGET_RECOVERY_ROOT_OUT)/system) # Mount the system_root_image to /system_root and symlink /system.
+ $(hide) $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_RECOVERY_ROOT_OUT) | $(MINIGZIP) > $(recovery_ramdisk)
+ $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \
+ $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1).unsigned, \
+ $(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1) --id > $(RECOVERYIMAGE_ID_FILE))
+ $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER)),\
+ $(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)),\
+ $(BOOT_SIGNER) /boot $(1) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $(1),\
+ $(BOOT_SIGNER) /recovery $(1) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $(1)\
+ )\
+ )
+ $(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \
+ $(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(1).keyblock $(1))
+ $(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)), \
+ $(hide) $(call assert-max-image-size,$(1),$(BOARD_BOOTIMAGE_PARTITION_SIZE)), \
+ $(hide) $(call assert-max-image-size,$(1),$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)))
+ @echo ----- Made recovery image: $(1) --------
+endef
+
+ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER))
+$(INSTALLED_BOOTIMAGE_TARGET) : $(BOOT_SIGNER)
+endif
+ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT))
+$(INSTALLED_BOOTIMAGE_TARGET) : $(VBOOT_SIGNER)
+endif
+$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTFS) $(MKBOOTIMG) $(MINIGZIP) \
+ $(INSTALLED_RAMDISK_TARGET) \
+ $(INTERNAL_RECOVERYIMAGE_FILES) \
+ $(recovery_initrc) $(recovery_sepolicy) $(recovery_kernel) \
+ $(INSTALLED_2NDBOOTLOADER_TARGET) \
+ $(recovery_build_prop) $(recovery_resource_deps) \
+ $(recovery_fstab) \
+ $(RECOVERY_INSTALL_OTA_KEYS)
+ $(call pretty,"Target boot image from recovery: $@")
+ $(call build-recoveryimage-target, $@)
+endif
+
+$(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTFS) $(MKBOOTIMG) $(MINIGZIP) \
+ $(INSTALLED_RAMDISK_TARGET) \
+ $(INSTALLED_BOOTIMAGE_TARGET) \
+ $(INTERNAL_RECOVERYIMAGE_FILES) \
+ $(recovery_initrc) $(recovery_sepolicy) $(recovery_kernel) \
+ $(INSTALLED_2NDBOOTLOADER_TARGET) \
+ $(recovery_build_prop) $(recovery_resource_deps) \
+ $(recovery_fstab) \
+ $(RECOVERY_INSTALL_OTA_KEYS)
+ $(call build-recoveryimage-target, $@)
+
+ifdef RECOVERY_RESOURCE_ZIP
+$(RECOVERY_RESOURCE_ZIP): $(INSTALLED_RECOVERYIMAGE_TARGET) | $(ZIPTIME)
+ $(hide) mkdir -p $(dir $@)
+ $(hide) find $(TARGET_RECOVERY_ROOT_OUT)/res -type f | sort | zip -0qrjX $@ -@
+ $(remove-timestamps-from-package)
+endif
+
+.PHONY: recoveryimage-nodeps
+recoveryimage-nodeps:
+ @echo "make $@: ignoring dependencies"
+ $(call build-recoveryimage-target, $(INSTALLED_RECOVERYIMAGE_TARGET))
+
+else # INSTALLED_RECOVERYIMAGE_TARGET not defined
+RECOVERY_RESOURCE_ZIP :=
+endif
+
+.PHONY: recoveryimage
+recoveryimage: $(INSTALLED_RECOVERYIMAGE_TARGET) $(RECOVERY_RESOURCE_ZIP)
+
+ifeq ($(BOARD_NAND_PAGE_SIZE),)
+BOARD_NAND_PAGE_SIZE := 2048
+endif
+
+ifeq ($(BOARD_NAND_SPARE_SIZE),)
+BOARD_NAND_SPARE_SIZE := 64
+endif
+
+# -----------------------------------------------------------------
+# system image
+#
+# Remove overridden packages from $(ALL_PDK_FUSION_FILES)
+PDK_FUSION_SYSIMG_FILES := \
+ $(filter-out $(foreach p,$(overridden_packages),$(p) %/$(p).apk), \
+ $(ALL_PDK_FUSION_FILES))
+
+INTERNAL_SYSTEMIMAGE_FILES := $(filter $(TARGET_OUT)/%, \
+ $(ALL_PREBUILT) \
+ $(ALL_GENERATED_SOURCES) \
+ $(ALL_DEFAULT_INSTALLED_MODULES) \
+ $(PDK_FUSION_SYSIMG_FILES) \
+ $(RECOVERY_RESOURCE_ZIP))
+
+
+FULL_SYSTEMIMAGE_DEPS := $(INTERNAL_SYSTEMIMAGE_FILES) $(INTERNAL_USERIMAGES_DEPS)
+# -----------------------------------------------------------------
+# installed file list
+# Depending on anything that $(BUILT_SYSTEMIMAGE) depends on.
+# We put installed-files.txt ahead of image itself in the dependency graph
+# so that we can get the size stat even if the build fails due to too large
+# system image.
+INSTALLED_FILES_FILE := $(PRODUCT_OUT)/installed-files.txt
+$(INSTALLED_FILES_FILE): $(FULL_SYSTEMIMAGE_DEPS)
+ @echo Installed file list: $@
+ @mkdir -p $(dir $@)
+ @rm -f $@
+ $(hide) build/tools/fileslist.py $(TARGET_OUT) > $@
+
+.PHONY: installed-file-list
+installed-file-list: $(INSTALLED_FILES_FILE)
+
+$(call dist-for-goals, sdk win_sdk sdk_addon, $(INSTALLED_FILES_FILE))
+
+systemimage_intermediates := \
+ $(call intermediates-dir-for,PACKAGING,systemimage)
+BUILT_SYSTEMIMAGE := $(systemimage_intermediates)/system.img
+
+# Create symlink /system/vendor to /vendor if necessary.
+ifdef BOARD_USES_VENDORIMAGE
+define create-system-vendor-symlink
+$(hide) if [ -d $(TARGET_OUT)/vendor ] && [ ! -h $(TARGET_OUT)/vendor ]; then \
+ echo 'Non-symlink $(TARGET_OUT)/vendor detected!' 1>&2; \
+ echo 'You cannot install files to $(TARGET_OUT)/vendor while building a separate vendor.img!' 1>&2; \
+ exit 1; \
+fi
+$(hide) ln -sf /vendor $(TARGET_OUT)/vendor
+endef
+else
+define create-system-vendor-symlink
+endef
+endif
+
+# $(1): output file
+define build-systemimage-target
+ @echo "Target system fs image: $(1)"
+ $(call create-system-vendor-symlink)
+ @mkdir -p $(dir $(1)) $(systemimage_intermediates) && rm -rf $(systemimage_intermediates)/system_image_info.txt
+ $(call generate-userimage-prop-dictionary, $(systemimage_intermediates)/system_image_info.txt, \
+ skip_fsck=true)
+ $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH \
+ ./build/tools/releasetools/build_image.py \
+ $(TARGET_OUT) $(systemimage_intermediates)/system_image_info.txt $(1) $(TARGET_OUT) \
+ || ( echo "Out of space? the tree size of $(TARGET_OUT) is (MB): " 1>&2 ;\
+ du -sm $(TARGET_OUT) 1>&2;\
+ if [ "$(INTERNAL_USERIMAGES_EXT_VARIANT)" == "ext4" ]; then \
+ maxsize=$(BOARD_SYSTEMIMAGE_PARTITION_SIZE); \
+ if [ "$(BOARD_HAS_EXT4_RESERVED_BLOCKS)" == "true" ]; then \
+ maxsize=$$((maxsize - 4096 * 4096)); \
+ fi; \
+ echo "The max is $$(( maxsize / 1048576 )) MB." 1>&2 ;\
+ else \
+ echo "The max is $$(( $(BOARD_SYSTEMIMAGE_PARTITION_SIZE) / 1048576 )) MB." 1>&2 ;\
+ fi; \
+ mkdir -p $(DIST_DIR); cp $(INSTALLED_FILES_FILE) $(DIST_DIR)/installed-files-rescued.txt; \
+ exit 1 )
+endef
+
+$(BUILT_SYSTEMIMAGE): $(FULL_SYSTEMIMAGE_DEPS) $(INSTALLED_FILES_FILE)
+ $(call build-systemimage-target,$@)
+
+INSTALLED_SYSTEMIMAGE := $(PRODUCT_OUT)/system.img
+SYSTEMIMAGE_SOURCE_DIR := $(TARGET_OUT)
+
+# The system partition needs room for the recovery image as well. We
+# now store the recovery image as a binary patch using the boot image
+# as the source (since they are very similar). Generate the patch so
+# we can see how big it's going to be, and include that in the system
+# image size check calculation.
+ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
+ifneq ($(BOARD_USES_FULL_RECOVERY_IMAGE),true)
+intermediates := $(call intermediates-dir-for,PACKAGING,recovery_patch)
+RECOVERY_FROM_BOOT_PATCH := $(intermediates)/recovery_from_boot.p
+$(RECOVERY_FROM_BOOT_PATCH): $(INSTALLED_RECOVERYIMAGE_TARGET) \
+ $(INSTALLED_BOOTIMAGE_TARGET) \
+ $(HOST_OUT_EXECUTABLES)/imgdiff \
+ $(HOST_OUT_EXECUTABLES)/bsdiff
+ @echo "Construct recovery from boot"
+ mkdir -p $(dir $@)
+ PATH=$(HOST_OUT_EXECUTABLES):$$PATH $(HOST_OUT_EXECUTABLES)/imgdiff $(INSTALLED_BOOTIMAGE_TARGET) $(INSTALLED_RECOVERYIMAGE_TARGET) $@
+else # $(BOARD_USES_FULL_RECOVERY_IMAGE) == true
+RECOVERY_FROM_BOOT_PATCH := $(INSTALLED_RECOVERYIMAGE_TARGET)
+endif
+endif
+
+
+$(INSTALLED_SYSTEMIMAGE): $(BUILT_SYSTEMIMAGE) $(RECOVERY_FROM_BOOT_PATCH) | $(ACP)
+ @echo "Install system fs image: $@"
+ $(copy-file-to-target)
+ $(hide) $(call assert-max-image-size,$@ $(RECOVERY_FROM_BOOT_PATCH),$(BOARD_SYSTEMIMAGE_PARTITION_SIZE))
+
+systemimage: $(INSTALLED_SYSTEMIMAGE)
+
+.PHONY: systemimage-nodeps snod
+systemimage-nodeps snod: $(filter-out systemimage-nodeps snod,$(MAKECMDGOALS)) \
+ | $(INTERNAL_USERIMAGES_DEPS)
+ @echo "make $@: ignoring dependencies"
+ $(call build-systemimage-target,$(INSTALLED_SYSTEMIMAGE))
+ $(hide) $(call assert-max-image-size,$(INSTALLED_SYSTEMIMAGE),$(BOARD_SYSTEMIMAGE_PARTITION_SIZE))
+
+ifneq (,$(filter systemimage-nodeps snod, $(MAKECMDGOALS)))
+ifeq (true,$(WITH_DEXPREOPT))
+$(warning Warning: with dexpreopt enabled, you may need a full rebuild.)
+endif
+endif
+
+#######
+## system tarball
+define build-systemtarball-target
+ $(call pretty,"Target system fs tarball: $(INSTALLED_SYSTEMTARBALL_TARGET)")
+ $(call create-system-vendor-symlink)
+ $(MKTARBALL) $(FS_GET_STATS) \
+ $(PRODUCT_OUT) system $(PRIVATE_SYSTEM_TAR) \
+ $(INSTALLED_SYSTEMTARBALL_TARGET) $(TARGET_OUT)
+endef
+
+ifndef SYSTEM_TARBALL_FORMAT
+ SYSTEM_TARBALL_FORMAT := bz2
+endif
+
+system_tar := $(PRODUCT_OUT)/system.tar
+INSTALLED_SYSTEMTARBALL_TARGET := $(system_tar).$(SYSTEM_TARBALL_FORMAT)
+$(INSTALLED_SYSTEMTARBALL_TARGET): PRIVATE_SYSTEM_TAR := $(system_tar)
+$(INSTALLED_SYSTEMTARBALL_TARGET): $(FS_GET_STATS) $(INTERNAL_SYSTEMIMAGE_FILES)
+ $(build-systemtarball-target)
+
+.PHONY: systemtarball-nodeps
+systemtarball-nodeps: $(FS_GET_STATS) \
+ $(filter-out systemtarball-nodeps stnod,$(MAKECMDGOALS))
+ $(build-systemtarball-target)
+
+.PHONY: stnod
+stnod: systemtarball-nodeps
+
+# -----------------------------------------------------------------
+## platform.zip: system, plus other files to be used in PDK fusion build,
+## in a zip file
+##
+## PDK_PLATFORM_ZIP_PRODUCT_BINARIES is used to store specified files to platform.zip.
+## The variable will be typically set from BoardConfig.mk.
+## Files under out dir will be rejected to prevent possible conflicts with other rules.
+pdk_odex_javalibs := $(strip $(foreach m,$(DEXPREOPT.MODULES.JAVA_LIBRARIES),\
+ $(if $(filter $(DEXPREOPT.$(m).INSTALLED),$(ALL_DEFAULT_INSTALLED_MODULES)),$(m))))
+pdk_odex_apps := $(strip $(foreach m,$(DEXPREOPT.MODULES.APPS),\
+ $(if $(filter $(DEXPREOPT.$(m).INSTALLED),$(ALL_DEFAULT_INSTALLED_MODULES)),$(m))))
+pdk_classes_dex := $(strip \
+ $(foreach m,$(pdk_odex_javalibs),$(call intermediates-dir-for,JAVA_LIBRARIES,$(m),,COMMON)/javalib.jar) \
+ $(foreach m,$(pdk_odex_apps),$(call intermediates-dir-for,APPS,$(m))/package.dex.apk))
+
+pdk_odex_config_mk := $(PRODUCT_OUT)/pdk_dexpreopt_config.mk
+$(pdk_odex_config_mk): PRIVATE_JAVA_LIBRARIES := $(pdk_odex_javalibs)
+$(pdk_odex_config_mk): PRIVATE_APPS := $(pdk_odex_apps)
+$(pdk_odex_config_mk) :
+ @echo "PDK odex config makefile: $@"
+ $(hide) mkdir -p $(dir $@)
+ $(hide) echo "# Auto-generated. Do not modify." > $@
+ $(hide) echo "PDK.DEXPREOPT.JAVA_LIBRARIES:=$(PRIVATE_JAVA_LIBRARIES)" >> $@
+ $(hide) echo "PDK.DEXPREOPT.APPS:=$(PRIVATE_APPS)" >> $@
+ $(foreach m,$(PRIVATE_JAVA_LIBRARIES),\
+ $(hide) echo "PDK.DEXPREOPT.$(m).SRC:=$(patsubst $(OUT_DIR)/%,%,$(call intermediates-dir-for,JAVA_LIBRARIES,$(m),,COMMON)/javalib.jar)" >> $@$(newline)\
+ $(hide) echo "PDK.DEXPREOPT.$(m).DEX_PREOPT:=$(DEXPREOPT.$(m).DEX_PREOPT)" >> $@$(newline)\
+ $(hide) echo "PDK.DEXPREOPT.$(m).MULTILIB:=$(DEXPREOPT.$(m).MULTILIB)" >> $@$(newline)\
+ $(hide) echo "PDK.DEXPREOPT.$(m).DEX_PREOPT_FLAGS:=$(DEXPREOPT.$(m).DEX_PREOPT_FLAGS)" >> $@$(newline)\
+ )
+ $(foreach m,$(PRIVATE_APPS),\
+ $(hide) echo "PDK.DEXPREOPT.$(m).SRC:=$(patsubst $(OUT_DIR)/%,%,$(call intermediates-dir-for,APPS,$(m))/package.dex.apk)" >> $@$(newline)\
+ $(hide) echo "PDK.DEXPREOPT.$(m).DEX_PREOPT:=$(DEXPREOPT.$(m).DEX_PREOPT)" >> $@$(newline)\
+ $(hide) echo "PDK.DEXPREOPT.$(m).MULTILIB:=$(DEXPREOPT.$(m).MULTILIB)" >> $@$(newline)\
+ $(hide) echo "PDK.DEXPREOPT.$(m).DEX_PREOPT_FLAGS:=$(DEXPREOPT.$(m).DEX_PREOPT_FLAGS)" >> $@$(newline)\
+ $(hide) echo "PDK.DEXPREOPT.$(m).PRIVILEGED_MODULE:=$(DEXPREOPT.$(m).PRIVILEGED_MODULE)" >> $@$(newline)\
+ $(hide) echo "PDK.DEXPREOPT.$(m).PROPRIETARY_MODULE:=$(DEXPREOPT.$(m).PROPRIETARY_MODULE)" >> $@$(newline)\
+ $(hide) echo "PDK.DEXPREOPT.$(m).TARGET_ARCH:=$(DEXPREOPT.$(m).TARGET_ARCH)" >> $@$(newline)\
+ $(hide) echo "PDK.DEXPREOPT.$(m).STRIPPED_SRC:=$(patsubst $(PRODUCT_OUT)/%,%,$(DEXPREOPT.$(m).INSTALLED_STRIPPED))" >> $@$(newline)\
+ )
+
+PDK_PLATFORM_ZIP_PRODUCT_BINARIES := $(filter-out $(OUT_DIR)/%,$(PDK_PLATFORM_ZIP_PRODUCT_BINARIES))
+INSTALLED_PLATFORM_ZIP := $(PRODUCT_OUT)/platform.zip
+
+$(INSTALLED_PLATFORM_ZIP): PRIVATE_DEX_FILES := $(pdk_classes_dex)
+$(INSTALLED_PLATFORM_ZIP): PRIVATE_ODEX_CONFIG := $(pdk_odex_config_mk)
+$(INSTALLED_PLATFORM_ZIP) : $(INTERNAL_SYSTEMIMAGE_FILES) $(pdk_odex_config_mk)
+ $(call pretty,"Platform zip package: $(INSTALLED_PLATFORM_ZIP)")
+ $(hide) rm -f $@
+ $(hide) cd $(dir $@) && zip -qryX $(notdir $@) \
+ $(TARGET_COPY_OUT_SYSTEM) \
+ $(patsubst $(PRODUCT_OUT)/%, %, $(TARGET_OUT_NOTICE_FILES)) \
+ $(addprefix symbols/,$(PDK_SYMBOL_FILES_LIST))
+ifdef BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE
+ $(hide) cd $(dir $@) && zip -qryX $(notdir $@) \
+ $(TARGET_COPY_OUT_VENDOR)
+endif
+ifneq ($(PDK_PLATFORM_JAVA_ZIP_CONTENTS),)
+ $(hide) cd $(OUT_DIR) && zip -qryX $(patsubst $(OUT_DIR)/%,%,$@) $(PDK_PLATFORM_JAVA_ZIP_CONTENTS)
+endif
+ifneq ($(PDK_PLATFORM_ZIP_PRODUCT_BINARIES),)
+ $(hide) zip -qryX $@ $(PDK_PLATFORM_ZIP_PRODUCT_BINARIES)
+endif
+ @# Add dex-preopt files and config.
+ $(if $(PRIVATE_DEX_FILES),$(hide) cd $(OUT_DIR) && zip -qryX $(patsubst $(OUT_DIR)/%,%,$@ $(PRIVATE_DEX_FILES)))
+ $(hide) zip -qryXj $@ $(PRIVATE_ODEX_CONFIG)
+
+.PHONY: platform
+platform: $(INSTALLED_PLATFORM_ZIP)
+
+.PHONY: platform-java
+platform-java: platform
+
+# Dist the platform.zip
+ifneq (,$(filter platform platform-java, $(MAKECMDGOALS)))
+$(call dist-for-goals, platform platform-java, $(INSTALLED_PLATFORM_ZIP))
+endif
+
+# -----------------------------------------------------------------
+## boot tarball
+define build-boottarball-target
+ $(hide) echo "Target boot fs tarball: $(INSTALLED_BOOTTARBALL_TARGET)"
+ $(hide) mkdir -p $(PRODUCT_OUT)/boot
+ $(hide) cp -f $(INTERNAL_BOOTIMAGE_FILES) $(PRODUCT_OUT)/boot/.
+ $(hide) echo $(BOARD_KERNEL_CMDLINE) > $(PRODUCT_OUT)/boot/cmdline
+ $(hide) $(MKTARBALL) $(FS_GET_STATS) \
+ $(PRODUCT_OUT) boot $(PRIVATE_BOOT_TAR) \
+ $(INSTALLED_BOOTTARBALL_TARGET) $(TARGET_OUT)
+endef
+
+ifndef BOOT_TARBALL_FORMAT
+ BOOT_TARBALL_FORMAT := bz2
+endif
+
+boot_tar := $(PRODUCT_OUT)/boot.tar
+INSTALLED_BOOTTARBALL_TARGET := $(boot_tar).$(BOOT_TARBALL_FORMAT)
+$(INSTALLED_BOOTTARBALL_TARGET): PRIVATE_BOOT_TAR := $(boot_tar)
+$(INSTALLED_BOOTTARBALL_TARGET): $(FS_GET_STATS) $(INTERNAL_BOOTIMAGE_FILES)
+ $(build-boottarball-target)
+
+.PHONY: boottarball-nodeps btnod
+boottarball-nodeps btnod: $(FS_GET_STATS) \
+ $(filter-out boottarball-nodeps btnod,$(MAKECMDGOALS))
+ $(build-boottarball-target)
+
+
+# -----------------------------------------------------------------
+# data partition image
+INTERNAL_USERDATAIMAGE_FILES := \
+ $(filter $(TARGET_OUT_DATA)/%,$(ALL_DEFAULT_INSTALLED_MODULES))
+
+# Don't build userdata.img if it's extfs but no partition size
+skip_userdata.img :=
+ifdef INTERNAL_USERIMAGES_EXT_VARIANT
+ifndef BOARD_USERDATAIMAGE_PARTITION_SIZE
+skip_userdata.img := true
+endif
+endif
+
+ifneq ($(skip_userdata.img),true)
+userdataimage_intermediates := \
+ $(call intermediates-dir-for,PACKAGING,userdata)
+BUILT_USERDATAIMAGE_TARGET := $(PRODUCT_OUT)/userdata.img
+
+define build-userdataimage-target
+ $(call pretty,"Target userdata fs image: $(INSTALLED_USERDATAIMAGE_TARGET)")
+ @mkdir -p $(TARGET_OUT_DATA)
+ @mkdir -p $(userdataimage_intermediates) && rm -rf $(userdataimage_intermediates)/userdata_image_info.txt
+ $(call generate-userimage-prop-dictionary, $(userdataimage_intermediates)/userdata_image_info.txt, skip_fsck=true)
+ $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH \
+ ./build/tools/releasetools/build_image.py \
+ $(TARGET_OUT_DATA) $(userdataimage_intermediates)/userdata_image_info.txt $(INSTALLED_USERDATAIMAGE_TARGET) $(TARGET_OUT)
+ $(hide) $(call assert-max-image-size,$(INSTALLED_USERDATAIMAGE_TARGET),$(BOARD_USERDATAIMAGE_PARTITION_SIZE))
+endef
+
+# We just build this directly to the install location.
+INSTALLED_USERDATAIMAGE_TARGET := $(BUILT_USERDATAIMAGE_TARGET)
+$(INSTALLED_USERDATAIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) \
+ $(INTERNAL_USERDATAIMAGE_FILES)
+ $(build-userdataimage-target)
+
+.PHONY: userdataimage-nodeps
+userdataimage-nodeps: | $(INTERNAL_USERIMAGES_DEPS)
+ $(build-userdataimage-target)
+
+endif # not skip_userdata.img
+skip_userdata.img :=
+
+#######
+## data partition tarball
+define build-userdatatarball-target
+ $(call pretty,"Target userdata fs tarball: " \
+ "$(INSTALLED_USERDATATARBALL_TARGET)")
+ $(MKTARBALL) $(FS_GET_STATS) \
+ $(PRODUCT_OUT) data $(PRIVATE_USERDATA_TAR) \
+ $(INSTALLED_USERDATATARBALL_TARGET) $(TARGET_OUT)
+endef
+
+userdata_tar := $(PRODUCT_OUT)/userdata.tar
+INSTALLED_USERDATATARBALL_TARGET := $(userdata_tar).bz2
+$(INSTALLED_USERDATATARBALL_TARGET): PRIVATE_USERDATA_TAR := $(userdata_tar)
+$(INSTALLED_USERDATATARBALL_TARGET): $(FS_GET_STATS) $(INTERNAL_USERDATAIMAGE_FILES)
+ $(build-userdatatarball-target)
+
+$(call dist-for-goals,userdatatarball,$(INSTALLED_USERDATATARBALL_TARGET))
+
+.PHONY: userdatatarball-nodeps
+userdatatarball-nodeps: $(FS_GET_STATS)
+ $(build-userdatatarball-target)
+
+
+# -----------------------------------------------------------------
+# cache partition image
+ifdef BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE
+INTERNAL_CACHEIMAGE_FILES := \
+ $(filter $(TARGET_OUT_CACHE)/%,$(ALL_DEFAULT_INSTALLED_MODULES))
+
+cacheimage_intermediates := \
+ $(call intermediates-dir-for,PACKAGING,cache)
+BUILT_CACHEIMAGE_TARGET := $(PRODUCT_OUT)/cache.img
+
+define build-cacheimage-target
+ $(call pretty,"Target cache fs image: $(INSTALLED_CACHEIMAGE_TARGET)")
+ @mkdir -p $(TARGET_OUT_CACHE)
+ @mkdir -p $(cacheimage_intermediates) && rm -rf $(cacheimage_intermediates)/cache_image_info.txt
+ $(call generate-userimage-prop-dictionary, $(cacheimage_intermediates)/cache_image_info.txt, skip_fsck=true)
+ $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH \
+ ./build/tools/releasetools/build_image.py \
+ $(TARGET_OUT_CACHE) $(cacheimage_intermediates)/cache_image_info.txt $(INSTALLED_CACHEIMAGE_TARGET) $(TARGET_OUT)
+ $(hide) $(call assert-max-image-size,$(INSTALLED_CACHEIMAGE_TARGET),$(BOARD_CACHEIMAGE_PARTITION_SIZE))
+endef
+
+# We just build this directly to the install location.
+INSTALLED_CACHEIMAGE_TARGET := $(BUILT_CACHEIMAGE_TARGET)
+$(INSTALLED_CACHEIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_CACHEIMAGE_FILES)
+ $(build-cacheimage-target)
+
+.PHONY: cacheimage-nodeps
+cacheimage-nodeps: | $(INTERNAL_USERIMAGES_DEPS)
+ $(build-cacheimage-target)
+
+else # BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE
+# we need to ignore the broken cache link when doing the rsync
+IGNORE_CACHE_LINK := --exclude=cache
+endif # BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE
+
+
+# -----------------------------------------------------------------
+# system_other partition image
+ifeq ($(BOARD_USES_SYSTEM_OTHER_ODEX),true)
+BOARD_USES_SYSTEM_OTHER := true
+
+# Marker file to identify that odex files are installed
+INSTALLED_SYSTEM_OTHER_ODEX_MARKER := $(TARGET_OUT_SYSTEM_OTHER)/system-other-odex-marker
+ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_SYSTEM_OTHER_ODEX_MARKER)
+$(INSTALLED_SYSTEM_OTHER_ODEX_MARKER):
+ $(hide) touch $@
+endif
+
+ifdef BOARD_USES_SYSTEM_OTHER
+INTERNAL_SYSTEMOTHERIMAGE_FILES := \
+ $(filter $(TARGET_OUT_SYSTEM_OTHER)/%,\
+ $(ALL_DEFAULT_INSTALLED_MODULES)\
+ $(ALL_PDK_FUSION_FILES))
+
+INSTALLED_FILES_FILE_SYSTEMOTHER := $(PRODUCT_OUT)/installed-files-system-other.txt
+$(INSTALLED_FILES_FILE_SYSTEMOTHER) : $(INTERNAL_SYSTEMOTHERIMAGE_FILES)
+ @echo Installed file list: $@
+ @mkdir -p $(dir $@)
+ @rm -f $@
+ $(hide) build/tools/fileslist.py $(TARGET_OUT_SYSTEM_OTHER) > $@
+
+systemotherimage_intermediates := \
+ $(call intermediates-dir-for,PACKAGING,system_other)
+BUILT_SYSTEMOTHERIMAGE_TARGET := $(PRODUCT_OUT)/system_other.img
+
+# Note that we assert the size is SYSTEMIMAGE_PARTITION_SIZE since this is the 'b' system image.
+define build-systemotherimage-target
+ $(call pretty,"Target system_other fs image: $(INSTALLED_SYSTEMOTHERIMAGE_TARGET)")
+ @mkdir -p $(TARGET_OUT_SYSTEM_OTHER)
+ @mkdir -p $(systemotherimage_intermediates) && rm -rf $(systemotherimage_intermediates)/system_other_image_info.txt
+ $(call generate-userimage-prop-dictionary, $(systemotherimage_intermediates)/system_other_image_info.txt, skip_fsck=true)
+ $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH \
+ ./build/tools/releasetools/build_image.py \
+ $(TARGET_OUT_SYSTEM_OTHER) $(systemotherimage_intermediates)/system_other_image_info.txt $(INSTALLED_SYSTEMOTHERIMAGE_TARGET) $(TARGET_OUT)
+ $(hide) $(call assert-max-image-size,$(INSTALLED_SYSTEMOTHERIMAGE_TARGET),$(BOARD_SYSTEMIMAGE_PARTITION_SIZE))
+endef
+
+# We just build this directly to the install location.
+INSTALLED_SYSTEMOTHERIMAGE_TARGET := $(BUILT_SYSTEMOTHERIMAGE_TARGET)
+$(INSTALLED_SYSTEMOTHERIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_SYSTEMOTHERIMAGE_FILES) $(INSTALLED_FILES_FILE_SYSTEMOTHER)
+ $(build-systemotherimage-target)
+
+.PHONY: systemotherimage-nodeps
+systemotherimage-nodeps: | $(INTERNAL_USERIMAGES_DEPS)
+ $(build-systemotherimage-target)
+
+endif # BOARD_USES_SYSTEM_OTHER
+
+
+# -----------------------------------------------------------------
+# vendor partition image
+ifdef BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE
+INTERNAL_VENDORIMAGE_FILES := \
+ $(filter $(TARGET_OUT_VENDOR)/%,\
+ $(ALL_DEFAULT_INSTALLED_MODULES)\
+ $(ALL_PDK_FUSION_FILES))
+
+# platform.zip depends on $(INTERNAL_VENDORIMAGE_FILES).
+$(INSTALLED_PLATFORM_ZIP) : $(INTERNAL_VENDORIMAGE_FILES)
+
+INSTALLED_FILES_FILE_VENDOR := $(PRODUCT_OUT)/installed-files-vendor.txt
+$(INSTALLED_FILES_FILE_VENDOR) : $(INTERNAL_VENDORIMAGE_FILES)
+ @echo Installed file list: $@
+ @mkdir -p $(dir $@)
+ @rm -f $@
+ $(hide) build/tools/fileslist.py $(TARGET_OUT_VENDOR) > $@
+
+vendorimage_intermediates := \
+ $(call intermediates-dir-for,PACKAGING,vendor)
+BUILT_VENDORIMAGE_TARGET := $(PRODUCT_OUT)/vendor.img
+
+define build-vendorimage-target
+ $(call pretty,"Target vendor fs image: $(INSTALLED_VENDORIMAGE_TARGET)")
+ @mkdir -p $(TARGET_OUT_VENDOR)
+ @mkdir -p $(vendorimage_intermediates) && rm -rf $(vendorimage_intermediates)/vendor_image_info.txt
+ $(call generate-userimage-prop-dictionary, $(vendorimage_intermediates)/vendor_image_info.txt, skip_fsck=true)
+ $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH \
+ ./build/tools/releasetools/build_image.py \
+ $(TARGET_OUT_VENDOR) $(vendorimage_intermediates)/vendor_image_info.txt $(INSTALLED_VENDORIMAGE_TARGET) $(TARGET_OUT)
+ $(hide) $(call assert-max-image-size,$(INSTALLED_VENDORIMAGE_TARGET),$(BOARD_VENDORIMAGE_PARTITION_SIZE))
+endef
+
+# We just build this directly to the install location.
+INSTALLED_VENDORIMAGE_TARGET := $(BUILT_VENDORIMAGE_TARGET)
+$(INSTALLED_VENDORIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_VENDORIMAGE_FILES) $(INSTALLED_FILES_FILE_VENDOR)
+ $(build-vendorimage-target)
+
+.PHONY: vendorimage-nodeps
+vendorimage-nodeps: | $(INTERNAL_USERIMAGES_DEPS)
+ $(build-vendorimage-target)
+
+else ifdef BOARD_PREBUILT_VENDORIMAGE
+INSTALLED_VENDORIMAGE_TARGET := $(PRODUCT_OUT)/vendor.img
+$(eval $(call copy-one-file,$(BOARD_PREBUILT_VENDORIMAGE),$(INSTALLED_VENDORIMAGE_TARGET)))
+endif
+
+# -----------------------------------------------------------------
+# bring in the installer image generation defines if necessary
+ifeq ($(TARGET_USE_DISKINSTALLER),true)
+include bootable/diskinstaller/config.mk
+endif
+
+# -----------------------------------------------------------------
+# host tools needed to build dist and OTA packages
+
+build_ota_package := true
+ifeq ($(TARGET_SKIP_OTA_PACKAGE),true)
+build_ota_package := false
+endif
+ifeq ($(BUILD_OS),darwin)
+build_ota_package := false
+endif
+ifneq ($(strip $(SANITIZE_TARGET)),)
+build_ota_package := false
+endif
+ifeq ($(TARGET_PRODUCT),sdk)
+build_ota_package := false
+endif
+ifneq ($(filter generic%,$(TARGET_DEVICE)),)
+build_ota_package := false
+endif
+ifeq ($(TARGET_NO_KERNEL),true)
+build_ota_package := false
+endif
+ifeq ($(recovery_fstab),)
+build_ota_package := false
+endif
+ifeq ($(TARGET_BUILD_PDK),true)
+build_ota_package := false
+endif
+
+ifeq ($(build_ota_package),true)
+OTATOOLS := $(HOST_OUT_EXECUTABLES)/minigzip \
+ $(HOST_OUT_EXECUTABLES)/aapt \
+ $(HOST_OUT_EXECUTABLES)/mkbootfs \
+ $(HOST_OUT_EXECUTABLES)/mkbootimg \
+ $(HOST_OUT_EXECUTABLES)/fs_config \
+ $(HOST_OUT_EXECUTABLES)/zipalign \
+ $(HOST_OUT_EXECUTABLES)/bsdiff \
+ $(HOST_OUT_EXECUTABLES)/imgdiff \
+ $(HOST_OUT_JAVA_LIBRARIES)/dumpkey.jar \
+ $(HOST_OUT_JAVA_LIBRARIES)/signapk.jar \
+ $(HOST_OUT_JAVA_LIBRARIES)/BootSignature.jar \
+ $(HOST_OUT_EXECUTABLES)/mkuserimg.sh \
+ $(HOST_OUT_EXECUTABLES)/make_ext4fs \
+ $(HOST_OUT_EXECUTABLES)/mksquashfsimage.sh \
+ $(HOST_OUT_EXECUTABLES)/mksquashfs \
+ $(HOST_OUT_EXECUTABLES)/mkf2fsuserimg.sh \
+ $(HOST_OUT_EXECUTABLES)/make_f2fs \
+ $(HOST_OUT_EXECUTABLES)/simg2img \
+ $(HOST_OUT_EXECUTABLES)/e2fsck \
+ $(HOST_OUT_EXECUTABLES)/build_verity_tree \
+ $(HOST_OUT_EXECUTABLES)/verity_signer \
+ $(HOST_OUT_EXECUTABLES)/append2simg \
+ $(HOST_OUT_EXECUTABLES)/img2simg \
+ $(HOST_OUT_EXECUTABLES)/boot_signer \
+ $(HOST_OUT_EXECUTABLES)/fec \
+ $(HOST_OUT_EXECUTABLES)/brillo_update_payload \
+ $(HOST_OUT_EXECUTABLES)/lib/shflags/shflags \
+ $(HOST_OUT_EXECUTABLES)/delta_generator \
+ $(BLK_ALLOC_TO_BASE_FS)
+
+# Shared libraries.
+OTATOOLS += \
+ $(HOST_LIBRARY_PATH)/libc++$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/liblog$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libcutils$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libselinux$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libcrypto-host$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libdivsufsort$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libdivsufsort64$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libext2fs-host$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libext2_blkid-host$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libext2_com_err-host$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libext2_e2p-host$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libext2_profile-host$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libext2_quota-host$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libext2_uuid-host$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libconscrypt_openjdk_jni$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libbrillo$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libbrillo-stream$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libbrillo-http$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libchrome$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libcurl-host$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libevent-host$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libprotobuf-cpp-lite$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libssl-host$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libz-host$(HOST_SHLIB_SUFFIX) \
+ $(HOST_LIBRARY_PATH)/libbase$(HOST_SHLIB_SUFFIX)
+
+.PHONY: otatools
+otatools: $(OTATOOLS)
+
+BUILT_OTATOOLS_PACKAGE := $(PRODUCT_OUT)/otatools.zip
+$(BUILT_OTATOOLS_PACKAGE): zip_root := $(call intermediates-dir-for,PACKAGING,otatools)/otatools
+
+$(BUILT_OTATOOLS_PACKAGE): $(OTATOOLS) | $(ACP)
+ @echo "Package OTA tools: $@"
+ $(hide) rm -rf $@ $(zip_root)
+ $(hide) mkdir -p $(dir $@) $(zip_root)/bin $(zip_root)/framework $(zip_root)/releasetools $(zip_root)/system/extras/verity
+ $(call copy-files-with-structure,$(OTATOOLS),$(HOST_OUT)/,$(zip_root))
+ $(hide) $(ACP) $(HOST_OUT_JAVA_LIBRARIES)/VeritySigner.jar $(zip_root)/framework/
+ $(hide) $(ACP) -p system/extras/verity/build_verity_metadata.py $(zip_root)/system/extras/verity/
+ $(hide) $(ACP) -r -d -p build/tools/releasetools/* $(zip_root)/releasetools
+ $(hide) rm -rf $@ $(zip_root)/releasetools/*.pyc
+ $(hide) (cd $(zip_root) && zip -qryX $(abspath $@) *)
+ $(hide) zip -qryX $(abspath $@) build/target/product/security/
+ $(hide) find device vendor -name \*.pk8 -o -name verifiedboot\* -o -name \*.x509.pem -o -name oem\*.prop | xargs zip -qryX $(abspath $@)>/dev/null || true
+
+.PHONY: otatools-package
+otatools-package: $(BUILT_OTATOOLS_PACKAGE)
+
+endif # build_ota_package
+
+# -----------------------------------------------------------------
+# A zip of the directories that map to the target filesystem.
+# This zip can be used to create an OTA package or filesystem image
+# as a post-build step.
+#
+name := $(TARGET_PRODUCT)
+ifeq ($(TARGET_BUILD_TYPE),debug)
+ name := $(name)_debug
+endif
+name := $(name)-target_files-$(FILE_NAME_TAG)
+
+intermediates := $(call intermediates-dir-for,PACKAGING,target_files)
+BUILT_TARGET_FILES_PACKAGE := $(intermediates)/$(name).zip
+$(BUILT_TARGET_FILES_PACKAGE): intermediates := $(intermediates)
+$(BUILT_TARGET_FILES_PACKAGE): \
+ zip_root := $(intermediates)/$(name)
+
+# $(1): Directory to copy
+# $(2): Location to copy it to
+# The "ls -A" is to prevent "acp s/* d" from failing if s is empty.
+define package_files-copy-root
+ if [ -d "$(strip $(1))" -a "$$(ls -A $(1))" ]; then \
+ mkdir -p $(2) && \
+ $(ACP) -rd $(strip $(1))/* $(2); \
+ fi
+endef
+
+built_ota_tools :=
+
+# We can't build static executables when SANITIZE_TARGET=address
+ifeq ($(strip $(SANITIZE_TARGET)),)
+built_ota_tools += \
+ $(call intermediates-dir-for,EXECUTABLES,updater,,,$(TARGET_PREFER_32_BIT))/updater
+endif
+
+$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_OTA_TOOLS := $(built_ota_tools)
+
+$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_API_VERSION := $(RECOVERY_API_VERSION)
+$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_FSTAB_VERSION := $(RECOVERY_FSTAB_VERSION)
+
+ifeq ($(TARGET_RELEASETOOLS_EXTENSIONS),)
+# default to common dir for device vendor
+$(BUILT_TARGET_FILES_PACKAGE): tool_extensions := $(TARGET_DEVICE_DIR)/../common
+else
+$(BUILT_TARGET_FILES_PACKAGE): tool_extensions := $(TARGET_RELEASETOOLS_EXTENSIONS)
+endif
+
+# Build OTA tools if not using the AB Updater.
+ifneq ($(AB_OTA_UPDATER),true)
+$(BUILT_TARGET_FILES_PACKAGE): $(built_ota_tools)
+endif
+
+# If we are using recovery as boot, output recovery files to BOOT/.
+ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_OUT := BOOT
+else
+$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_OUT := RECOVERY
+endif
+
+# Depending on the various images guarantees that the underlying
+# directories are up-to-date.
+$(BUILT_TARGET_FILES_PACKAGE): \
+ $(INSTALLED_BOOTIMAGE_TARGET) \
+ $(INSTALLED_RADIOIMAGE_TARGET) \
+ $(INSTALLED_RECOVERYIMAGE_TARGET) \
+ $(INSTALLED_SYSTEMIMAGE) \
+ $(INSTALLED_USERDATAIMAGE_TARGET) \
+ $(INSTALLED_CACHEIMAGE_TARGET) \
+ $(INSTALLED_VENDORIMAGE_TARGET) \
+ $(INSTALLED_SYSTEMOTHERIMAGE_TARGET) \
+ $(INSTALLED_ANDROID_INFO_TXT_TARGET) \
+ $(SELINUX_FC) \
+ $(APKCERTS_FILE) \
+ $(HOST_OUT_EXECUTABLES)/fs_config \
+ | $(ACP)
+ @echo "Package target files: $@"
+ $(hide) rm -rf $@ $(zip_root)
+ $(hide) mkdir -p $(dir $@) $(zip_root)
+ifneq (,$(INSTALLED_RECOVERYIMAGE_TARGET)$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)))
+ @# Components of the recovery image
+ $(hide) mkdir -p $(zip_root)/$(PRIVATE_RECOVERY_OUT)
+ $(hide) $(call package_files-copy-root, \
+ $(TARGET_RECOVERY_ROOT_OUT),$(zip_root)/$(PRIVATE_RECOVERY_OUT)/RAMDISK)
+ifdef INSTALLED_KERNEL_TARGET
+ $(hide) $(ACP) $(INSTALLED_KERNEL_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/kernel
+endif
+ifdef INSTALLED_2NDBOOTLOADER_TARGET
+ $(hide) $(ACP) \
+ $(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/second
+endif
+ifdef BOARD_KERNEL_CMDLINE
+ $(hide) echo "$(BOARD_KERNEL_CMDLINE)" > $(zip_root)/$(PRIVATE_RECOVERY_OUT)/cmdline
+endif
+ifdef BOARD_KERNEL_BASE
+ $(hide) echo "$(BOARD_KERNEL_BASE)" > $(zip_root)/$(PRIVATE_RECOVERY_OUT)/base
+endif
+ifdef BOARD_KERNEL_PAGESIZE
+ $(hide) echo "$(BOARD_KERNEL_PAGESIZE)" > $(zip_root)/$(PRIVATE_RECOVERY_OUT)/pagesize
+endif
+endif # INSTALLED_RECOVERYIMAGE_TARGET defined or BOARD_USES_RECOVERY_AS_BOOT is true
+ @# Components of the boot image
+ $(hide) mkdir -p $(zip_root)/BOOT
+ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
+ $(hide) mkdir -p $(zip_root)/ROOT
+ $(hide) $(call package_files-copy-root, \
+ $(TARGET_ROOT_OUT),$(zip_root)/ROOT)
+else
+ $(hide) $(call package_files-copy-root, \
+ $(TARGET_ROOT_OUT),$(zip_root)/BOOT/RAMDISK)
+endif
+ @# If we are using recovery as boot, this is already done when processing recovery.
+ifneq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+ifdef INSTALLED_KERNEL_TARGET
+ $(hide) $(ACP) $(INSTALLED_KERNEL_TARGET) $(zip_root)/BOOT/kernel
+endif
+ifdef INSTALLED_2NDBOOTLOADER_TARGET
+ $(hide) $(ACP) \
+ $(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/BOOT/second
+endif
+ifdef BOARD_KERNEL_CMDLINE
+ $(hide) echo "$(BOARD_KERNEL_CMDLINE)" > $(zip_root)/BOOT/cmdline
+endif
+ifdef BOARD_KERNEL_BASE
+ $(hide) echo "$(BOARD_KERNEL_BASE)" > $(zip_root)/BOOT/base
+endif
+ifdef BOARD_KERNEL_PAGESIZE
+ $(hide) echo "$(BOARD_KERNEL_PAGESIZE)" > $(zip_root)/BOOT/pagesize
+endif
+endif # BOARD_USES_RECOVERY_AS_BOOT
+ $(hide) $(foreach t,$(INSTALLED_RADIOIMAGE_TARGET),\
+ mkdir -p $(zip_root)/RADIO; \
+ $(ACP) $(t) $(zip_root)/RADIO/$(notdir $(t));)
+ @# Contents of the system image
+ $(hide) $(call package_files-copy-root, \
+ $(SYSTEMIMAGE_SOURCE_DIR),$(zip_root)/SYSTEM)
+ @# Contents of the data image
+ $(hide) $(call package_files-copy-root, \
+ $(TARGET_OUT_DATA),$(zip_root)/DATA)
+ifdef BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE
+ @# Contents of the vendor image
+ $(hide) $(call package_files-copy-root, \
+ $(TARGET_OUT_VENDOR),$(zip_root)/VENDOR)
+endif
+ifdef INSTALLED_SYSTEMOTHERIMAGE_TARGET
+ @# Contents of the system_other image
+ $(hide) $(call package_files-copy-root, \
+ $(TARGET_OUT_SYSTEM_OTHER),$(zip_root)/SYSTEM_OTHER)
+endif
+ @# Extra contents of the OTA package
+ $(hide) mkdir -p $(zip_root)/OTA
+ $(hide) $(ACP) $(INSTALLED_ANDROID_INFO_TXT_TARGET) $(zip_root)/OTA/
+ifneq ($(AB_OTA_UPDATER),true)
+ifneq ($(built_ota_tools),)
+ $(hide) mkdir -p $(zip_root)/OTA/bin
+ $(hide) $(ACP) $(PRIVATE_OTA_TOOLS) $(zip_root)/OTA/bin/
+endif
+endif
+ @# Files that do not end up in any images, but are necessary to
+ @# build them.
+ $(hide) mkdir -p $(zip_root)/META
+ $(hide) $(ACP) $(APKCERTS_FILE) $(zip_root)/META/apkcerts.txt
+ $(hide) if test -e $(tool_extensions)/releasetools.py; then $(ACP) $(tool_extensions)/releasetools.py $(zip_root)/META/; fi
+ $(hide) echo "$(PRODUCT_OTA_PUBLIC_KEYS)" > $(zip_root)/META/otakeys.txt
+ $(hide) $(ACP) $(SELINUX_FC) $(zip_root)/META/file_contexts.bin
+ $(hide) echo "recovery_api_version=$(PRIVATE_RECOVERY_API_VERSION)" > $(zip_root)/META/misc_info.txt
+ $(hide) echo "fstab_version=$(PRIVATE_RECOVERY_FSTAB_VERSION)" >> $(zip_root)/META/misc_info.txt
+ifdef BOARD_FLASH_BLOCK_SIZE
+ $(hide) echo "blocksize=$(BOARD_FLASH_BLOCK_SIZE)" >> $(zip_root)/META/misc_info.txt
+endif
+ifdef BOARD_BOOTIMAGE_PARTITION_SIZE
+ $(hide) echo "boot_size=$(BOARD_BOOTIMAGE_PARTITION_SIZE)" >> $(zip_root)/META/misc_info.txt
+endif
+ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),)
+ $(hide) echo "recovery_as_boot=$(BOARD_USES_RECOVERY_AS_BOOT)" >> $(zip_root)/META/misc_info.txt
+endif
+ifeq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
+ $(hide) echo "no_recovery=true" >> $(zip_root)/META/misc_info.txt
+endif
+ifdef BOARD_RECOVERYIMAGE_PARTITION_SIZE
+ $(hide) echo "recovery_size=$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)" >> $(zip_root)/META/misc_info.txt
+endif
+ifdef BOARD_HAS_EXT4_RESERVED_BLOCKS
+ $(hide) echo "has_ext4_reserved_blocks=$(BOARD_HAS_EXT4_RESERVED_BLOCKS)" >> $(zip_root)/META/misc_info.txt
+endif
+ifdef TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS
+ @# TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS can be empty to indicate that nothing but defaults should be used.
+ $(hide) echo "recovery_mount_options=$(TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS)" >> $(zip_root)/META/misc_info.txt
+else
+ $(hide) echo "recovery_mount_options=$(DEFAULT_TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS)" >> $(zip_root)/META/misc_info.txt
+endif
+ $(hide) echo "tool_extensions=$(tool_extensions)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "default_system_dev_certificate=$(DEFAULT_SYSTEM_DEV_CERTIFICATE)" >> $(zip_root)/META/misc_info.txt
+ifdef PRODUCT_EXTRA_RECOVERY_KEYS
+ $(hide) echo "extra_recovery_keys=$(PRODUCT_EXTRA_RECOVERY_KEYS)" >> $(zip_root)/META/misc_info.txt
+endif
+ $(hide) echo 'mkbootimg_args=$(BOARD_MKBOOTIMG_ARGS)' >> $(zip_root)/META/misc_info.txt
+ $(hide) echo 'mkbootimg_version_args=$(INTERNAL_MKBOOTIMG_VERSION_ARGS)' >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "use_set_metadata=1" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "multistage_support=1" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "update_rename_support=1" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "blockimgdiff_versions=1,2,3,4" >> $(zip_root)/META/misc_info.txt
+ifneq ($(OEM_THUMBPRINT_PROPERTIES),)
+ # OTA scripts are only interested in fingerprint related properties
+ $(hide) echo "oem_fingerprint_properties=$(OEM_THUMBPRINT_PROPERTIES)" >> $(zip_root)/META/misc_info.txt
+endif
+ifneq ($(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH),)
+ $(hide) $(ACP) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH) \
+ $(zip_root)/META/$(notdir $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH))
+endif
+ifneq ($(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH),)
+ $(hide) $(ACP) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH) \
+ $(zip_root)/META/$(notdir $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_BASE_FS_PATH))
+endif
+ifneq ($(strip $(SANITIZE_TARGET)),)
+ # We need to create userdata.img with real data because the instrumented libraries are in userdata.img.
+ $(hide) echo "userdata_img_with_data=true" >> $(zip_root)/META/misc_info.txt
+endif
+ifeq ($(BOARD_USES_FULL_RECOVERY_IMAGE),true)
+ $(hide) echo "full_recovery_image=true" >> $(zip_root)/META/misc_info.txt
+endif
+ $(call generate-userimage-prop-dictionary, $(zip_root)/META/misc_info.txt)
+ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
+ $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
+ ./build/tools/releasetools/make_recovery_patch $(zip_root) $(zip_root)
+endif
+ifeq ($(AB_OTA_UPDATER),true)
+ @# When using the A/B updater, include the updater config files in the zip.
+ $(hide) $(ACP) $(TOPDIR)system/update_engine/update_engine.conf $(zip_root)/META/update_engine_config.txt
+ $(hide) for part in $(AB_OTA_PARTITIONS); do \
+ echo "$${part}" >> $(zip_root)/META/ab_partitions.txt; \
+ done
+ $(hide) for conf in $(AB_OTA_POSTINSTALL_CONFIG); do \
+ echo "$${conf}" >> $(zip_root)/META/postinstall_config.txt; \
+ done
+ @# Include the build type in META/misc_info.txt so the server can easily differentiate production builds.
+ $(hide) echo "build_type=$(TARGET_BUILD_VARIANT)" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "ab_update=true" >> $(zip_root)/META/misc_info.txt
+ifdef OSRELEASED_DIRECTORY
+ $(hide) $(ACP) $(TARGET_OUT_ETC)/$(OSRELEASED_DIRECTORY)/product_id $(zip_root)/META/product_id.txt
+ $(hide) $(ACP) $(TARGET_OUT_ETC)/$(OSRELEASED_DIRECTORY)/product_version $(zip_root)/META/product_version.txt
+endif
+endif
+ifeq ($(BREAKPAD_GENERATE_SYMBOLS),true)
+ @# If breakpad symbols have been generated, add them to the zip.
+ $(hide) $(ACP) -r $(TARGET_OUT_BREAKPAD) $(zip_root)/BREAKPAD
+endif
+ifdef BOARD_PREBUILT_VENDORIMAGE
+ $(hide) mkdir -p $(zip_root)/IMAGES
+ $(hide) cp $(INSTALLED_VENDORIMAGE_TARGET) $(zip_root)/IMAGES/
+endif
+ @# Zip everything up, preserving symlinks and placing META/ files first to
+ @# help early validation of the .zip file while uploading it.
+ $(hide) (cd $(zip_root) && \
+ zip -qryX ../$(notdir $@) ./META && \
+ zip -qryXu ../$(notdir $@) .)
+ @# Run fs_config on all the system, vendor, boot ramdisk,
+ @# and recovery ramdisk files in the zip, and save the output
+ $(hide) zipinfo -1 $@ | awk 'BEGIN { FS="SYSTEM/" } /^SYSTEM\// {print "system/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/filesystem_config.txt
+ $(hide) zipinfo -1 $@ | awk 'BEGIN { FS="VENDOR/" } /^VENDOR\// {print "vendor/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/vendor_filesystem_config.txt
+ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
+ $(hide) zipinfo -1 $@ | awk 'BEGIN { FS="ROOT/" } /^ROOT\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/root_filesystem_config.txt
+endif
+ $(hide) zipinfo -1 $@ | awk 'BEGIN { FS="BOOT/RAMDISK/" } /^BOOT\/RAMDISK\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/boot_filesystem_config.txt
+ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
+ $(hide) zipinfo -1 $@ | awk 'BEGIN { FS="RECOVERY/RAMDISK/" } /^RECOVERY\/RAMDISK\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/recovery_filesystem_config.txt
+endif
+ifdef INSTALLED_SYSTEMOTHERIMAGE_TARGET
+ $(hide) zipinfo -1 $@ | awk 'BEGIN { FS="SYSTEM_OTHER/" } /^SYSTEM_OTHER\// { print "system/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/system_other_filesystem_config.txt
+endif
+ $(hide) (cd $(zip_root) && zip -qX ../$(notdir $@) META/*filesystem_config.txt)
+ $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
+ ./build/tools/releasetools/add_img_to_target_files -a -v -p $(HOST_OUT) $@
+
+.PHONY: target-files-package
+target-files-package: $(BUILT_TARGET_FILES_PACKAGE)
+
+ifneq ($(filter $(MAKECMDGOALS),target-files-package),)
+$(call dist-for-goals, target-files-package, $(BUILT_TARGET_FILES_PACKAGE))
+endif
+
+ifeq ($(build_ota_package),true)
+# -----------------------------------------------------------------
+# OTA update package
+
+name := $(TARGET_PRODUCT)
+ifeq ($(TARGET_BUILD_TYPE),debug)
+ name := $(name)_debug
+endif
+name := $(name)-ota-$(FILE_NAME_TAG)
+
+INTERNAL_OTA_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
+
+$(INTERNAL_OTA_PACKAGE_TARGET): KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
+
+$(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE)
+ @echo "Package OTA: $@"
+ $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
+ ./build/tools/releasetools/ota_from_target_files -v \
+ --block \
+ -p $(HOST_OUT) \
+ -k $(KEY_CERT_PAIR) \
+ $(if $(OEM_OTA_CONFIG), -o $(OEM_OTA_CONFIG)) \
+ $(BUILT_TARGET_FILES_PACKAGE) $@
+
+.PHONY: otapackage
+otapackage: $(INTERNAL_OTA_PACKAGE_TARGET)
+
+endif # build_ota_package
+
+# -----------------------------------------------------------------
+# The update package
+
+name := $(TARGET_PRODUCT)
+ifeq ($(TARGET_BUILD_TYPE),debug)
+ name := $(name)_debug
+endif
+name := $(name)-img-$(FILE_NAME_TAG)
+
+INTERNAL_UPDATE_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
+
+$(INTERNAL_UPDATE_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE)
+ @echo "Package: $@"
+ $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
+ ./build/tools/releasetools/img_from_target_files -v \
+ -p $(HOST_OUT) \
+ $(BUILT_TARGET_FILES_PACKAGE) $@
+
+.PHONY: updatepackage
+updatepackage: $(INTERNAL_UPDATE_PACKAGE_TARGET)
+
+# -----------------------------------------------------------------
+# A zip of the symbols directory. Keep the full paths to make it
+# more obvious where these files came from.
+#
+name := $(TARGET_PRODUCT)
+ifeq ($(TARGET_BUILD_TYPE),debug)
+ name := $(name)_debug
+endif
+name := $(name)-symbols-$(FILE_NAME_TAG)
+
+SYMBOLS_ZIP := $(PRODUCT_OUT)/$(name).zip
+# For apps_only build we'll establish the dependency later in build/core/main.mk.
+ifndef TARGET_BUILD_APPS
+$(SYMBOLS_ZIP): $(INSTALLED_SYSTEMIMAGE) $(INSTALLED_BOOTIMAGE_TARGET)
+endif
+$(SYMBOLS_ZIP):
+ @echo "Package symbols: $@"
+ $(hide) rm -rf $@
+ $(hide) mkdir -p $(dir $@) $(TARGET_OUT_UNSTRIPPED)
+ $(hide) zip -qrX $@ $(TARGET_OUT_UNSTRIPPED)
+
+# -----------------------------------------------------------------
+# A zip of the Android Apps. Not keeping full path so that we don't
+# include product names when distributing
+#
+name := $(TARGET_PRODUCT)
+ifeq ($(TARGET_BUILD_TYPE),debug)
+ name := $(name)_debug
+endif
+name := $(name)-apps-$(FILE_NAME_TAG)
+
+APPS_ZIP := $(PRODUCT_OUT)/$(name).zip
+$(APPS_ZIP): $(INSTALLED_SYSTEMIMAGE)
+ @echo "Package apps: $@"
+ $(hide) rm -rf $@
+ $(hide) mkdir -p $(dir $@)
+ $(hide) apps_to_zip=`find $(TARGET_OUT_APPS) $(TARGET_OUT_APPS_PRIVILEGED) -mindepth 2 -maxdepth 3 -name "*.apk"`; \
+ if [ -z "$$apps_to_zip" ]; then \
+ echo "No apps to zip up. Generating empty apps archive." ; \
+ a=$$(mktemp /tmp/XXXXXXX) && touch $$a && zip $@ $$a && zip -d $@ $$a; \
+ else \
+ zip -qjX $@ $$apps_to_zip; \
+ fi
+
+#------------------------------------------------------------------
+# A zip of emma code coverage meta files. Generated for fully emma
+# instrumented build.
+#
+ifeq (true,$(EMMA_INSTRUMENT))
+EMMA_META_ZIP := $(PRODUCT_OUT)/emma_meta.zip
+# the dependency will be set up later in build/core/main.mk.
+$(EMMA_META_ZIP) :
+ @echo "Collecting Emma coverage meta files."
+ $(hide) find $(TARGET_COMMON_OUT_ROOT) $(HOST_COMMON_OUT_ROOT) -name "coverage.em" | \
+ zip -@ -qX $@
+
+endif # EMMA_INSTRUMENT=true
+
+#------------------------------------------------------------------
+# A zip of Proguard obfuscation dictionary files.
+# Only for apps_only build.
+#
+ifdef TARGET_BUILD_APPS
+PROGUARD_DICT_ZIP := $(PRODUCT_OUT)/$(TARGET_PRODUCT)-proguard-dict-$(FILE_NAME_TAG).zip
+# the dependency will be set up later in build/core/main.mk.
+$(PROGUARD_DICT_ZIP) :
+ @echo "Packaging Proguard obfuscation dictionary files."
+ $(hide) dict_files=`find $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS -name proguard_dictionary -o -name jack_dictionary`; \
+ if [ -n "$$dict_files" ]; then \
+ unobfuscated_jars=$${dict_files//proguard_dictionary/classes.jar}; \
+ zip -qX $@ $$dict_files $$unobfuscated_jars; \
+ else \
+ touch $(dir $@)/zipdummy; \
+ (cd $(dir $@) && zip -q $(notdir $@) zipdummy); \
+ zip -qd $@ zipdummy; \
+ rm $(dir $@)/zipdummy; \
+ fi
+
+endif # TARGET_BUILD_APPS
+
+# -----------------------------------------------------------------
+# dalvik something
+.PHONY: dalvikfiles
+dalvikfiles: $(INTERNAL_DALVIK_MODULES)
+
+# -----------------------------------------------------------------
+# The emulator package
+ifeq ($(BUILD_EMULATOR),true)
+INTERNAL_EMULATOR_PACKAGE_FILES += \
+ $(HOST_OUT_EXECUTABLES)/emulator$(HOST_EXECUTABLE_SUFFIX) \
+ prebuilts/qemu-kernel/$(TARGET_ARCH)/kernel-qemu \
+ $(INSTALLED_RAMDISK_TARGET) \
+ $(INSTALLED_SYSTEMIMAGE) \
+ $(INSTALLED_USERDATAIMAGE_TARGET)
+
+name := $(TARGET_PRODUCT)-emulator-$(FILE_NAME_TAG)
+
+INTERNAL_EMULATOR_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
+
+$(INTERNAL_EMULATOR_PACKAGE_TARGET): $(INTERNAL_EMULATOR_PACKAGE_FILES)
+ @echo "Package: $@"
+ $(hide) zip -qjX $@ $(INTERNAL_EMULATOR_PACKAGE_FILES)
+
+endif
+# -----------------------------------------------------------------
+# Old PDK stuffs, retired
+# The pdk package (Platform Development Kit)
+
+#ifneq (,$(filter pdk,$(MAKECMDGOALS)))
+# include development/pdk/Pdk.mk
+#endif
+
+
+# -----------------------------------------------------------------
+# The SDK
+
+# The SDK includes host-specific components, so it belongs under HOST_OUT.
+sdk_dir := $(HOST_OUT)/sdk/$(TARGET_PRODUCT)
+
+# Build a name that looks like:
+#
+# linux-x86 --> android-sdk_12345_linux-x86
+# darwin-x86 --> android-sdk_12345_mac-x86
+# windows-x86 --> android-sdk_12345_windows
+#
+sdk_name := android-sdk_$(FILE_NAME_TAG)
+ifeq ($(HOST_OS),darwin)
+ INTERNAL_SDK_HOST_OS_NAME := mac
+else
+ INTERNAL_SDK_HOST_OS_NAME := $(HOST_OS)
+endif
+ifneq ($(HOST_OS),windows)
+ INTERNAL_SDK_HOST_OS_NAME := $(INTERNAL_SDK_HOST_OS_NAME)-$(SDK_HOST_ARCH)
+endif
+sdk_name := $(sdk_name)_$(INTERNAL_SDK_HOST_OS_NAME)
+
+sdk_dep_file := $(sdk_dir)/sdk_deps.mk
+
+ATREE_FILES :=
+-include $(sdk_dep_file)
+
+# if we don't have a real list, then use "everything"
+ifeq ($(strip $(ATREE_FILES)),)
+ATREE_FILES := \
+ $(ALL_PREBUILT) \
+ $(ALL_DEFAULT_INSTALLED_MODULES) \
+ $(INSTALLED_RAMDISK_TARGET) \
+ $(ALL_DOCS) \
+ $(ALL_SDK_FILES)
+endif
+
+atree_dir := development/build
+
+
+sdk_atree_files := \
+ $(atree_dir)/sdk.exclude.atree \
+ $(atree_dir)/sdk-$(HOST_OS)-$(SDK_HOST_ARCH).atree
+
+# development/build/sdk-android-.atree is used to differentiate
+# between architecture models (e.g. ARMv5TE versus ARMv7) when copying
+# files like the kernel image. We use TARGET_CPU_ABI because we don't
+# have a better way to distinguish between CPU models.
+ifneq (,$(strip $(wildcard $(atree_dir)/sdk-android-$(TARGET_CPU_ABI).atree)))
+ sdk_atree_files += $(atree_dir)/sdk-android-$(TARGET_CPU_ABI).atree
+endif
+
+ifneq ($(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SDK_ATREE_FILES),)
+sdk_atree_files += $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SDK_ATREE_FILES)
+else
+sdk_atree_files += $(atree_dir)/sdk.atree
+endif
+
+include $(BUILD_SYSTEM)/sdk_font.mk
+
+deps := \
+ $(target_notice_file_txt) \
+ $(tools_notice_file_txt) \
+ $(OUT_DOCS)/offline-sdk-timestamp \
+ $(SYMBOLS_ZIP) \
+ $(INSTALLED_SYSTEMIMAGE) \
+ $(INSTALLED_USERDATAIMAGE_TARGET) \
+ $(INSTALLED_RAMDISK_TARGET) \
+ $(INSTALLED_SDK_BUILD_PROP_TARGET) \
+ $(INSTALLED_BUILD_PROP_TARGET) \
+ $(ATREE_FILES) \
+ $(sdk_atree_files) \
+ $(HOST_OUT_EXECUTABLES)/atree \
+ $(HOST_OUT_EXECUTABLES)/line_endings \
+ $(SDK_FONT_DEPS)
+
+INTERNAL_SDK_TARGET := $(sdk_dir)/$(sdk_name).zip
+$(INTERNAL_SDK_TARGET): PRIVATE_NAME := $(sdk_name)
+$(INTERNAL_SDK_TARGET): PRIVATE_DIR := $(sdk_dir)/$(sdk_name)
+$(INTERNAL_SDK_TARGET): PRIVATE_DEP_FILE := $(sdk_dep_file)
+$(INTERNAL_SDK_TARGET): PRIVATE_INPUT_FILES := $(sdk_atree_files)
+
+# Set SDK_GNU_ERROR to non-empty to fail when a GNU target is built.
+#
+#SDK_GNU_ERROR := true
+
+$(INTERNAL_SDK_TARGET): $(deps)
+ @echo "Package SDK: $@"
+ $(hide) rm -rf $(PRIVATE_DIR) $@
+ $(hide) for f in $(target_gnu_MODULES); do \
+ if [ -f $$f ]; then \
+ echo SDK: $(if $(SDK_GNU_ERROR),ERROR:,warning:) \
+ including GNU target $$f >&2; \
+ FAIL=$(SDK_GNU_ERROR); \
+ fi; \
+ done; \
+ if [ $$FAIL ]; then exit 1; fi
+ $(hide) echo $(notdir $(SDK_FONT_DEPS)) | tr " " "\n" > $(SDK_FONT_TEMP)/fontsInSdk.txt
+ $(hide) ( \
+ ATREE_STRIP="strip -x" \
+ $(HOST_OUT_EXECUTABLES)/atree \
+ $(addprefix -f ,$(PRIVATE_INPUT_FILES)) \
+ -m $(PRIVATE_DEP_FILE) \
+ -I . \
+ -I $(PRODUCT_OUT) \
+ -I $(HOST_OUT) \
+ -I $(TARGET_COMMON_OUT_ROOT) \
+ -v "PLATFORM_NAME=android-$(PLATFORM_VERSION)" \
+ -v "OUT_DIR=$(OUT_DIR)" \
+ -v "HOST_OUT=$(HOST_OUT)" \
+ -v "TARGET_ARCH=$(TARGET_ARCH)" \
+ -v "TARGET_CPU_ABI=$(TARGET_CPU_ABI)" \
+ -v "DLL_EXTENSION=$(HOST_SHLIB_SUFFIX)" \
+ -v "FONT_OUT=$(SDK_FONT_TEMP)" \
+ -v "JACK_SDKTOOL_VERSION=$(JACK_SDKTOOL_VERSION)" \
+ -o $(PRIVATE_DIR) && \
+ cp -f $(target_notice_file_txt) \
+ $(PRIVATE_DIR)/system-images/android-$(PLATFORM_VERSION)/$(TARGET_CPU_ABI)/NOTICE.txt && \
+ cp -f $(tools_notice_file_txt) $(PRIVATE_DIR)/platform-tools/NOTICE.txt && \
+ HOST_OUT_EXECUTABLES=$(HOST_OUT_EXECUTABLES) HOST_OS=$(HOST_OS) \
+ development/build/tools/sdk_clean.sh $(PRIVATE_DIR) && \
+ chmod -R ug+rwX $(PRIVATE_DIR) && \
+ cd $(dir $@) && zip -rqX $(notdir $@) $(PRIVATE_NAME) \
+ ) || ( rm -rf $(PRIVATE_DIR) $@ && exit 44 )
+
+
+# Is a Windows SDK requested? If so, we need some definitions from here
+# in order to find the Linux SDK used to create the Windows one.
+MAIN_SDK_NAME := $(sdk_name)
+MAIN_SDK_DIR := $(sdk_dir)
+MAIN_SDK_ZIP := $(INTERNAL_SDK_TARGET)
+ifneq ($(filter win_sdk winsdk-tools,$(MAKECMDGOALS)),)
+include $(TOPDIR)development/build/tools/windows_sdk.mk
+endif
+
+# -----------------------------------------------------------------
+# Findbugs
+INTERNAL_FINDBUGS_XML_TARGET := $(PRODUCT_OUT)/findbugs.xml
+INTERNAL_FINDBUGS_HTML_TARGET := $(PRODUCT_OUT)/findbugs.html
+$(INTERNAL_FINDBUGS_XML_TARGET): $(ALL_FINDBUGS_FILES)
+ @echo UnionBugs: $@
+ $(hide) $(FINDBUGS_DIR)/unionBugs $(ALL_FINDBUGS_FILES) \
+ > $@
+$(INTERNAL_FINDBUGS_HTML_TARGET): $(INTERNAL_FINDBUGS_XML_TARGET)
+ @echo ConvertXmlToText: $@
+ $(hide) $(FINDBUGS_DIR)/convertXmlToText -html:fancy.xsl \
+ $(INTERNAL_FINDBUGS_XML_TARGET) > $@
+
+# -----------------------------------------------------------------
+# Findbugs
+
+# -----------------------------------------------------------------
+# These are some additional build tasks that need to be run.
+ifneq ($(dont_bother),true)
+include $(sort $(wildcard $(BUILD_SYSTEM)/tasks/*.mk))
+-include $(sort $(wildcard vendor/*/build/tasks/*.mk))
+-include $(sort $(wildcard device/*/build/tasks/*.mk))
+-include $(sort $(wildcard product/*/build/tasks/*.mk))
+# Also the project-specific tasks
+-include $(sort $(wildcard vendor/*/*/build/tasks/*.mk))
+-include $(sort $(wildcard device/*/*/build/tasks/*.mk))
+-include $(sort $(wildcard product/*/*/build/tasks/*.mk))
+endif
+
+include $(BUILD_SYSTEM)/product-graph.mk
+
+# -----------------------------------------------------------------
+# Create SDK repository packages. Must be done after tasks/* since
+# we need the addon rules defined.
+ifneq ($(sdk_repo_goal),)
+include $(TOPDIR)development/build/tools/sdk_repo.mk
+endif
diff --git a/core/aapt2.mk b/core/aapt2.mk
new file mode 100644
index 0000000000000000000000000000000000000000..ccc45357632a19ebdab028f40238635667c1b7a1
--- /dev/null
+++ b/core/aapt2.mk
@@ -0,0 +1,89 @@
+######################################
+# Compile resource with AAPT2
+# Input variables:
+# full_android_manifest,
+# my_res_resources, my_overlay_resources,
+# my_compiled_res_base_dir, my_res_package,
+# R_file_stamp, proguard_options_file
+# my_generated_res_dirs: Resources generated during the build process and we have to compile them in a single run of aapt2.
+# my_generated_res_dirs_deps: the dependency to use for my_generated_res_dirs.
+#
+# Output variables:
+# my_res_resources_flat, my_overlay_resources_flat,
+# my_generated_resources_flata
+#
+######################################
+
+
+# Compile all the resource files.
+my_res_resources_flat := \
+ $(foreach r, $(my_res_resources),\
+ $(eval o := $(call aapt2-compiled-resource-out-file,$(r),$(my_compiled_res_base_dir)))\
+ $(eval $(call aapt2-compile-one-resource-file-rule,$(r),$(o)))\
+ $(o))
+
+my_overlay_resources_flat := \
+ $(foreach r, $(my_overlay_resources),\
+ $(eval o := $(call aapt2-compiled-resource-out-file,$(r),$(my_compiled_res_base_dir)))\
+ $(eval $(call aapt2-compile-one-resource-file-rule,$(r),$(o)))\
+ $(o))
+
+my_generated_resources_flata :=
+# Compile generated resources
+ifneq ($(my_generated_res_dirs),)
+my_generated_resources_flata := $(my_compiled_res_base_dir)/gen_res.flata
+$(my_generated_resources_flata): PRIVATE_SOURCE_RES_DIRS := $(my_generated_res_dirs)
+$(my_generated_resources_flata) : $(my_generated_res_dirs_deps)
+ @echo "AAPT2 compile $@ <- $(PRIVATE_SOURCE_RES_DIRS)"
+ $(call aapt2-compile-resource-dirs)
+
+my_generated_resources_flata += $(my_generated_resources_flata)
+endif
+
+$(my_res_resources_flat) $(my_overlay_resources_flat) $(my_generated_resources_flata): \
+ PRIVATE_AAPT2_CFLAGS := $(PRODUCT_AAPT2_CFLAGS)
+
+my_static_library_resources := $(foreach l, $(call reverse-list,$(LOCAL_STATIC_ANDROID_LIBRARIES)),\
+ $(call intermediates-dir-for,JAVA_LIBRARIES,$(l),,COMMON)/package-res.apk)
+my_shared_library_resources := $(foreach l, $(LOCAL_SHARED_ANDROID_LIBRARIES),\
+ $(call intermediates-dir-for,JAVA_LIBRARIES,$(l),,COMMON)/package-res.apk)
+
+ifneq ($(my_static_library_resources),)
+$(my_res_package): PRIVATE_AAPT_FLAGS += --auto-add-overlay
+endif
+
+$(my_res_package): PRIVATE_RES_FLAT := $(my_res_resources_flat)
+$(my_res_package): PRIVATE_OVERLAY_FLAT := $(my_static_library_resources) $(my_generated_resources_flata) $(my_overlay_resources_flat)
+$(my_res_package): PRIVATE_SHARED_ANDROID_LIBRARIES := $(my_shared_library_resources)
+$(my_res_package): PRIVATE_PROGUARD_OPTIONS_FILE := $(proguard_options_file)
+$(my_res_package) : $(full_android_manifest) $(my_static_library_resources) $(my_shared_library_resources)
+$(my_res_package) : $(my_res_resources_flat) $(my_overlay_resources_flat) \
+ $(my_generated_resources_flata) $(my_static_library_resources) \
+ $(AAPT2)
+ @echo "AAPT2 link $@"
+ $(call aapt2-link)
+
+ifdef R_file_stamp
+$(R_file_stamp) : $(my_res_package) | $(ACP)
+ @echo "target R.java/Manifest.java: $(PRIVATE_MODULE) ($@)"
+ @rm -rf $@ && mkdir -p $(dir $@)
+ $(call find-generated-R.java)
+endif
+
+ifdef proguard_options_file
+$(proguard_options_file) : $(my_res_package)
+endif
+
+resource_export_package :=
+ifdef LOCAL_EXPORT_PACKAGE_RESOURCES
+# Put this module's resources into a PRODUCT-agnositc package that
+# other packages can use to build their own PRODUCT-agnostic R.java (etc.)
+# files.
+resource_export_package := $(intermediates.COMMON)/package-export.apk
+$(R_file_stamp) : $(resource_export_package)
+
+$(resource_export_package) : $(my_res_package) | $(ACP)
+ @echo "target Export Resources: $(PRIVATE_MODULE) $(@)"
+ $(copy-file-to-new-target)
+
+endif
diff --git a/core/android_manifest.mk b/core/android_manifest.mk
new file mode 100644
index 0000000000000000000000000000000000000000..0093e02087f88c4f400a2da0009cd72b2b6321d7
--- /dev/null
+++ b/core/android_manifest.mk
@@ -0,0 +1,43 @@
+# Handle AndroidManifest.xmls
+# Input: LOCAL_MANIFEST_FILE, LOCAL_FULL_MANIFEST_FILE, LOCAL_FULL_LIBS_MANIFEST_FILES
+# Output: full_android_manifest
+
+ifeq ($(strip $(LOCAL_MANIFEST_FILE)),)
+ LOCAL_MANIFEST_FILE := AndroidManifest.xml
+endif
+ifdef LOCAL_FULL_MANIFEST_FILE
+ full_android_manifest := $(LOCAL_FULL_MANIFEST_FILE)
+else
+ full_android_manifest := $(LOCAL_PATH)/$(LOCAL_MANIFEST_FILE)
+endif
+
+my_full_libs_manifest_files := $(LOCAL_FULL_LIBS_MANIFEST_FILES)
+my_full_libs_manifest_deps := $(LOCAL_FULL_LIBS_MANIFEST_FILES)
+
+# Set up dependency on aar libraries
+LOCAL_STATIC_JAVA_AAR_LIBRARIES := $(strip $(LOCAL_STATIC_JAVA_AAR_LIBRARIES))
+ifdef LOCAL_STATIC_JAVA_AAR_LIBRARIES
+my_full_libs_manifest_deps += $(foreach lib, $(LOCAL_STATIC_JAVA_AAR_LIBRARIES),\
+ $(call intermediates-dir-for,JAVA_LIBRARIES,$(lib),,COMMON)/aar/classes.jar)
+my_full_libs_manifest_files += $(foreach lib, $(LOCAL_STATIC_JAVA_AAR_LIBRARIES),\
+ $(call intermediates-dir-for,JAVA_LIBRARIES,$(lib),,COMMON)/aar/AndroidManifest.xml)
+
+# With aapt2, we'll link in the built resource from the AAR.
+ifndef LOCAL_USE_AAPT2
+LOCAL_RESOURCE_DIR += $(foreach lib, $(LOCAL_STATIC_JAVA_AAR_LIBRARIES),\
+ $(call intermediates-dir-for,JAVA_LIBRARIES,$(lib),,COMMON)/aar/res)
+endif # LOCAL_USE_AAPT2
+endif # LOCAL_STATIC_JAVA_AAR_LIBRARIES
+
+# Set up rules to merge library manifest files
+ifdef my_full_libs_manifest_files
+main_android_manifest := $(full_android_manifest)
+full_android_manifest := $(intermediates.COMMON)/AndroidManifest.xml
+$(full_android_manifest): PRIVATE_LIBS_MANIFESTS := $(my_full_libs_manifest_files)
+$(full_android_manifest) : $(main_android_manifest) $(my_full_libs_manifest_deps)
+ @echo "Merge android manifest files: $@ <-- $< $(PRIVATE_LIBS_MANIFESTS)"
+ @mkdir -p $(dir $@)
+ $(hide) $(ANDROID_MANIFEST_MERGER) --main $< --libs $(PRIVATE_LIBS_MANIFESTS) \
+ --out $@
+
+endif
diff --git a/core/apicheck_msg_current.txt b/core/apicheck_msg_current.txt
new file mode 100644
index 0000000000000000000000000000000000000000..440e7f8862e81b2d32ed3357d7b3161914544d78
--- /dev/null
+++ b/core/apicheck_msg_current.txt
@@ -0,0 +1,17 @@
+
+******************************
+You have tried to change the API from what has been previously approved.
+
+To make these errors go away, you have two choices:
+ 1) You can add "@hide" javadoc comments to the methods, etc. listed in the
+ errors above.
+
+ 2) You can update current.txt by executing the following command:
+ make update-api
+
+ To submit the revised current.txt to the main Android repository,
+ you will need approval.
+******************************
+
+
+
diff --git a/core/apicheck_msg_last.txt b/core/apicheck_msg_last.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2993157b165d79e11f1b3526bdf20e71339b3db2
--- /dev/null
+++ b/core/apicheck_msg_last.txt
@@ -0,0 +1,7 @@
+
+******************************
+You have tried to change the API from what has been previously released in
+an SDK. Please fix the errors listed above.
+******************************
+
+
diff --git a/core/base_rules.mk b/core/base_rules.mk
new file mode 100644
index 0000000000000000000000000000000000000000..6722af4f27ad1eae869694db53efdbef7d1ab475
--- /dev/null
+++ b/core/base_rules.mk
@@ -0,0 +1,523 @@
+#
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Users can define base-rules-hook in their buildspec.mk to perform
+# arbitrary operations as each module is included.
+ifdef base-rules-hook
+$(if $(base-rules-hook),)
+endif
+
+###########################################################
+## Common instructions for a generic module.
+###########################################################
+
+LOCAL_MODULE := $(strip $(LOCAL_MODULE))
+ifeq ($(LOCAL_MODULE),)
+ $(error $(LOCAL_PATH): LOCAL_MODULE is not defined)
+endif
+
+LOCAL_IS_HOST_MODULE := $(strip $(LOCAL_IS_HOST_MODULE))
+ifdef LOCAL_IS_HOST_MODULE
+ ifneq ($(LOCAL_IS_HOST_MODULE),true)
+ $(error $(LOCAL_PATH): LOCAL_IS_HOST_MODULE must be "true" or empty, not "$(LOCAL_IS_HOST_MODULE)")
+ endif
+ ifeq ($(LOCAL_HOST_PREFIX),)
+ my_prefix := HOST_
+ else
+ my_prefix := $(LOCAL_HOST_PREFIX)
+ endif
+ my_host := host-
+else
+ my_prefix := TARGET_
+ my_host :=
+endif
+
+ifeq ($(my_prefix),HOST_CROSS_)
+ my_host_cross := true
+else
+ my_host_cross :=
+endif
+
+my_module_tags := $(LOCAL_MODULE_TAGS)
+ifeq ($(my_host_cross),true)
+ my_module_tags :=
+endif
+
+ifdef BUILDING_WITH_NINJA
+# Ninja has an implicit dependency on the command being run, and kati will
+# regenerate the ninja manifest if any read makefile changes, so there is no
+# need to have dependencies on makefiles.
+# This won't catch all the cases where LOCAL_ADDITIONAL_DEPENDENCIES contains
+# a .mk file, because a few users of LOCAL_ADDITIONAL_DEPENDENCIES don't include
+# base_rules.mk, but it will fix the most common ones.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(filter-out %.mk,$(LOCAL_ADDITIONAL_DEPENDENCIES))
+endif
+
+###########################################################
+## Validate and define fallbacks for input LOCAL_* variables.
+###########################################################
+
+## Dump a .csv file of all modules and their tags
+#ifneq ($(tag-list-first-time),false)
+#$(shell rm -f tag-list.csv)
+#tag-list-first-time := false
+#endif
+#$(shell echo $(lastword $(filter-out config/% out/%,$(MAKEFILE_LIST))),$(LOCAL_MODULE),$(strip $(LOCAL_MODULE_CLASS)),$(subst $(space),$(comma),$(sort $(my_module_tags))) >> tag-list.csv)
+
+LOCAL_UNINSTALLABLE_MODULE := $(strip $(LOCAL_UNINSTALLABLE_MODULE))
+my_module_tags := $(sort $(my_module_tags))
+ifeq (,$(my_module_tags))
+ my_module_tags := optional
+endif
+
+# User tags are not allowed anymore. Fail early because it will not be installed
+# like it used to be.
+ifneq ($(filter $(my_module_tags),user),)
+ $(warning *** Module name: $(LOCAL_MODULE))
+ $(warning *** Makefile location: $(LOCAL_MODULE_MAKEFILE))
+ $(warning * )
+ $(warning * Module is attempting to use the 'user' tag. This)
+ $(warning * used to cause the module to be installed automatically.)
+ $(warning * Now, the module must be listed in the PRODUCT_PACKAGES)
+ $(warning * section of a product makefile to have it installed.)
+ $(warning * )
+ $(error user tag detected on module.)
+endif
+
+# Only the tags mentioned in this test are expected to be set by module
+# makefiles. Anything else is either a typo or a source of unexpected
+# behaviors.
+ifneq ($(filter-out debug eng tests optional samples,$(my_module_tags)),)
+$(warning unusual tags $(my_module_tags) on $(LOCAL_MODULE) at $(LOCAL_PATH))
+endif
+
+# Add implicit tags.
+#
+# If the local directory or one of its parents contains a MODULE_LICENSE_GPL
+# file, tag the module as "gnu". Search for "*_GPL*", "*_LGPL*" and "*_MPL*"
+# so that we can also find files like MODULE_LICENSE_GPL_AND_AFL
+#
+license_files := $(call find-parent-file,$(LOCAL_PATH),MODULE_LICENSE*)
+gpl_license_file := $(call find-parent-file,$(LOCAL_PATH),MODULE_LICENSE*_GPL* MODULE_LICENSE*_MPL* MODULE_LICENSE*_LGPL*)
+ifneq ($(gpl_license_file),)
+ my_module_tags += gnu
+ ALL_GPL_MODULE_LICENSE_FILES := $(sort $(ALL_GPL_MODULE_LICENSE_FILES) $(gpl_license_file))
+endif
+
+LOCAL_MODULE_CLASS := $(strip $(LOCAL_MODULE_CLASS))
+ifneq ($(words $(LOCAL_MODULE_CLASS)),1)
+ $(error $(LOCAL_PATH): LOCAL_MODULE_CLASS must contain exactly one word, not "$(LOCAL_MODULE_CLASS)")
+endif
+
+my_32_64_bit_suffix := $(if $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)IS_64_BIT),64,32)
+
+ifneq (true,$(LOCAL_UNINSTALLABLE_MODULE))
+my_multilib_module_path := $(strip $(LOCAL_MODULE_PATH_$(my_32_64_bit_suffix)))
+ifdef my_multilib_module_path
+my_module_path := $(my_multilib_module_path)
+else
+my_module_path := $(strip $(LOCAL_MODULE_PATH))
+endif
+my_module_relative_path := $(strip $(LOCAL_MODULE_RELATIVE_PATH))
+ifeq ($(my_module_path),)
+ ifdef LOCAL_IS_HOST_MODULE
+ partition_tag :=
+ else
+ ifeq (true,$(LOCAL_PROPRIETARY_MODULE))
+ partition_tag := _VENDOR
+ else ifeq (true,$(LOCAL_OEM_MODULE))
+ partition_tag := _OEM
+ else ifeq (true,$(LOCAL_ODM_MODULE))
+ partition_tag := _ODM
+ else
+ # The definition of should-install-to-system will be different depending
+ # on which goal (e.g., sdk or just droid) is being built.
+ partition_tag := $(if $(call should-install-to-system,$(my_module_tags)),,_DATA)
+ endif
+ endif
+ install_path_var := $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT$(partition_tag)_$(LOCAL_MODULE_CLASS)
+ ifeq (true,$(LOCAL_PRIVILEGED_MODULE))
+ install_path_var := $(install_path_var)_PRIVILEGED
+ endif
+
+ my_module_path := $($(install_path_var))
+ ifeq ($(strip $(my_module_path)),)
+ $(error $(LOCAL_PATH): unhandled install path "$(install_path_var) for $(LOCAL_MODULE)")
+ endif
+endif
+ifneq ($(my_module_relative_path),)
+ my_module_path := $(my_module_path)/$(my_module_relative_path)
+endif
+endif # not LOCAL_UNINSTALLABLE_MODULE
+
+ifneq ($(strip $(LOCAL_BUILT_MODULE)$(LOCAL_INSTALLED_MODULE)),)
+ $(error $(LOCAL_PATH): LOCAL_BUILT_MODULE and LOCAL_INSTALLED_MODULE must not be defined by component makefiles)
+endif
+
+my_register_name := $(LOCAL_MODULE)
+ifeq ($(my_host_cross),true)
+ my_register_name := host_cross_$(LOCAL_MODULE)
+endif
+ifdef LOCAL_2ND_ARCH_VAR_PREFIX
+ifndef LOCAL_NO_2ND_ARCH_MODULE_SUFFIX
+my_register_name := $(my_register_name)$($(my_prefix)2ND_ARCH_MODULE_SUFFIX)
+endif
+endif
+# Make sure that this IS_HOST/CLASS/MODULE combination is unique.
+module_id := MODULE.$(if \
+ $(LOCAL_IS_HOST_MODULE),$($(my_prefix)OS),TARGET).$(LOCAL_MODULE_CLASS).$(my_register_name)
+ifdef $(module_id)
+$(error $(LOCAL_PATH): $(module_id) already defined by $($(module_id)))
+endif
+$(module_id) := $(LOCAL_PATH)
+
+intermediates := $(call local-intermediates-dir,,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))
+intermediates.COMMON := $(call local-intermediates-dir,COMMON)
+generated_sources_dir := $(call local-generated-sources-dir)
+
+###########################################################
+# Pick a name for the intermediate and final targets
+###########################################################
+include $(BUILD_SYSTEM)/configure_module_stem.mk
+
+# OVERRIDE_BUILT_MODULE_PATH is only allowed to be used by the
+# internal SHARED_LIBRARIES build files.
+OVERRIDE_BUILT_MODULE_PATH := $(strip $(OVERRIDE_BUILT_MODULE_PATH))
+ifdef OVERRIDE_BUILT_MODULE_PATH
+ ifneq ($(LOCAL_MODULE_CLASS),SHARED_LIBRARIES)
+ $(error $(LOCAL_PATH): Illegal use of OVERRIDE_BUILT_MODULE_PATH)
+ endif
+ built_module_path := $(OVERRIDE_BUILT_MODULE_PATH)
+else
+ built_module_path := $(intermediates)
+endif
+LOCAL_BUILT_MODULE := $(built_module_path)/$(my_built_module_stem)
+
+ifneq (true,$(LOCAL_UNINSTALLABLE_MODULE))
+ # Apk and its attachments reside in its own subdir.
+ ifeq ($(LOCAL_MODULE_CLASS),APPS)
+ # framework-res.apk doesn't like the additional layer.
+ ifneq ($(LOCAL_NO_STANDARD_LIBRARIES),true)
+ my_module_path := $(my_module_path)/$(LOCAL_MODULE)
+ endif
+ endif
+ LOCAL_INSTALLED_MODULE := $(my_module_path)/$(my_installed_module_stem)
+endif
+
+# Assemble the list of targets to create PRIVATE_ variables for.
+LOCAL_INTERMEDIATE_TARGETS += $(LOCAL_BUILT_MODULE)
+
+###########################################################
+## Create .toc files from shared objects to reduce unnecessary rebuild
+# .toc files have the list of external dynamic symbols without their addresses.
+# As .KATI_RESTAT is specified to .toc files and commit-change-for-toc is used,
+# dependent binaries of a .toc file will be rebuilt only when the content of
+# the .toc file is changed.
+###########################################################
+ifndef LOCAL_IS_HOST_MODULE
+# Disable .toc optimization for host modules: we may run the host binaries during the build process
+# and the libraries' implementation matters.
+ifeq ($(LOCAL_MODULE_CLASS),SHARED_LIBRARIES)
+LOCAL_INTERMEDIATE_TARGETS += $(LOCAL_BUILT_MODULE).toc
+$(LOCAL_BUILT_MODULE).toc: $(LOCAL_BUILT_MODULE)
+ $(call $(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)transform-shared-lib-to-toc,$<,$@.tmp)
+ $(call commit-change-for-toc,$@)
+
+# Kati adds restat=1 to ninja. GNU make does nothing for this.
+.KATI_RESTAT: $(LOCAL_BUILT_MODULE).toc
+# Build .toc file when using mm, mma, or make $(my_register_name)
+$(my_register_name): $(LOCAL_BUILT_MODULE).toc
+endif
+endif
+
+###########################################################
+## logtags: Add .logtags files to global list
+###########################################################
+
+logtags_sources := $(filter %.logtags,$(LOCAL_SRC_FILES))
+
+ifneq ($(strip $(logtags_sources)),)
+event_log_tags := $(addprefix $(LOCAL_PATH)/,$(logtags_sources))
+else
+event_log_tags :=
+endif
+
+###########################################################
+## make clean- targets
+###########################################################
+cleantarget := clean-$(my_register_name)
+$(cleantarget) : PRIVATE_MODULE := $(my_register_name)
+$(cleantarget) : PRIVATE_CLEAN_FILES := \
+ $(LOCAL_BUILT_MODULE) \
+ $(LOCAL_INSTALLED_MODULE) \
+ $(intermediates)
+$(cleantarget)::
+ @echo "Clean: $(PRIVATE_MODULE)"
+ $(hide) rm -rf $(PRIVATE_CLEAN_FILES)
+
+###########################################################
+## Common definitions for module.
+###########################################################
+$(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_PATH:=$(LOCAL_PATH)
+$(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_IS_HOST_MODULE := $(LOCAL_IS_HOST_MODULE)
+$(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_HOST:= $(my_host)
+$(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_PREFIX := $(my_prefix)
+
+$(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_INTERMEDIATES_DIR:= $(intermediates)
+$(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_2ND_ARCH_VAR_PREFIX := $(LOCAL_2ND_ARCH_VAR_PREFIX)
+
+# Tell the module and all of its sub-modules who it is.
+$(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_MODULE:= $(my_register_name)
+
+# Provide a short-hand for building this module.
+# We name both BUILT and INSTALLED in case
+# LOCAL_UNINSTALLABLE_MODULE is set.
+.PHONY: $(my_register_name)
+$(my_register_name): $(LOCAL_BUILT_MODULE) $(LOCAL_INSTALLED_MODULE)
+
+# Set up phony targets that covers all modules under the given paths.
+# This allows us to build everything in given paths by running mmma/mma.
+my_path_components := $(subst /,$(space),$(LOCAL_PATH))
+my_path_prefix := MODULES-IN
+$(foreach c, $(my_path_components),\
+ $(eval my_path_prefix := $(my_path_prefix)-$(c))\
+ $(eval .PHONY : $(my_path_prefix))\
+ $(eval $(my_path_prefix) : $(my_register_name)))
+
+###########################################################
+## Module installation rule
+###########################################################
+
+# Some hosts do not have ACP; override the LOCAL version if that's the case.
+ifneq ($(strip $(HOST_ACP_UNAVAILABLE)),)
+ LOCAL_ACP_UNAVAILABLE := $(strip $(HOST_ACP_UNAVAILABLE))
+endif
+
+ifneq (true,$(LOCAL_UNINSTALLABLE_MODULE))
+ # Define a copy rule to install the module.
+ # acp and libraries that it uses can't use acp for
+ # installation; hence, LOCAL_ACP_UNAVAILABLE.
+$(LOCAL_INSTALLED_MODULE): PRIVATE_POST_INSTALL_CMD := $(LOCAL_POST_INSTALL_CMD)
+ifneq ($(LOCAL_ACP_UNAVAILABLE),true)
+$(LOCAL_INSTALLED_MODULE): $(LOCAL_BUILT_MODULE) | $(ACP)
+ @echo "Install: $@"
+ $(copy-file-to-new-target)
+ $(PRIVATE_POST_INSTALL_CMD)
+else
+$(LOCAL_INSTALLED_MODULE): $(LOCAL_BUILT_MODULE)
+ @echo "Install: $@"
+ $(copy-file-to-target-with-cp)
+endif
+
+# Rule to install the module's companion init.rc.
+my_init_rc := $(LOCAL_INIT_RC_$(my_32_64_bit_suffix))
+my_init_rc_src :=
+my_init_rc_installed :=
+ifndef my_init_rc
+my_init_rc := $(LOCAL_INIT_RC)
+# Make sure we don't define the rule twice in multilib module.
+LOCAL_INIT_RC :=
+endif
+ifdef my_init_rc
+my_init_rc_src := $(LOCAL_PATH)/$(my_init_rc)
+my_init_rc_installed := $(TARGET_OUT$(partition_tag)_ETC)/init/$(notdir $(my_init_rc_src))
+$(my_init_rc_installed) : $(my_init_rc_src) | $(ACP)
+ @echo "Install: $@"
+ $(copy-file-to-new-target)
+
+$(my_register_name) : $(my_init_rc_installed)
+endif # my_init_rc
+endif # !LOCAL_UNINSTALLABLE_MODULE
+
+###########################################################
+## CHECK_BUILD goals
+###########################################################
+my_checked_module :=
+# If nobody has defined a more specific module for the
+# checked modules, use LOCAL_BUILT_MODULE.
+ifdef LOCAL_CHECKED_MODULE
+ my_checked_module := $(LOCAL_CHECKED_MODULE)
+else
+ my_checked_module := $(LOCAL_BUILT_MODULE)
+endif
+
+# If they request that this module not be checked, then don't.
+# PLEASE DON'T SET THIS. ANY PLACES THAT SET THIS WITHOUT
+# GOOD REASON WILL HAVE IT REMOVED.
+ifdef LOCAL_DONT_CHECK_MODULE
+ my_checked_module :=
+endif
+# Don't check build target module defined for the 2nd arch
+ifndef LOCAL_IS_HOST_MODULE
+ifdef LOCAL_2ND_ARCH_VAR_PREFIX
+ my_checked_module :=
+endif
+endif
+
+###########################################################
+## Compatibiliy suite files.
+###########################################################
+ifdef LOCAL_COMPATIBILITY_SUITE
+ifneq ($(words $(LOCAL_COMPATIBILITY_SUITE)),1)
+$(error $(LOCAL_PATH):$(LOCAL_MODULE) LOCAL_COMPATIBILITY_SUITE can be only one name)
+endif
+
+# The module itself.
+my_compat_dist := \
+ $(LOCAL_BUILT_MODULE):$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(my_installed_module_stem)
+
+# Make sure we only add the files once for multilib modules.
+ifndef $(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files
+$(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files := true
+
+# LOCAL_COMPATIBILITY_SUPPORT_FILES is a list of [:].
+my_compat_dist += $(foreach f, $(LOCAL_COMPATIBILITY_SUPPORT_FILES),\
+ $(eval p := $(subst :,$(space),$(f)))\
+ $(eval s := $(word 1,$(p)))\
+ $(eval d := $(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(or $(word 2,$(p)),$(notdir $(word 1,$(p)))))\
+ $(s):$(d))
+
+ifneq (,$(wildcard $(LOCAL_PATH)/AndroidTest.xml))
+my_compat_dist += \
+ $(LOCAL_PATH)/AndroidTest.xml:$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE).config
+endif
+
+ifneq (,$(wildcard $(LOCAL_PATH)/DynamicConfig.xml))
+my_compat_dist += \
+ $(LOCAL_PATH)/DynamicConfig.xml:$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE).dynamic
+endif
+endif # $(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files
+
+my_compat_files := $(call copy-many-files, $(my_compat_dist))
+
+COMPATIBILITY.$(LOCAL_COMPATIBILITY_SUITE).FILES := \
+ $(COMPATIBILITY.$(LOCAL_COMPATIBILITY_SUITE).FILES) \
+ $(my_compat_files)
+
+# Copy over the compatibility files when user runs mm/mmm.
+$(my_register_name) : $(my_compat_files)
+endif # LOCAL_COMPATIBILITY_SUITE
+
+###########################################################
+## Register with ALL_MODULES
+###########################################################
+
+ALL_MODULES += $(my_register_name)
+
+# Don't use += on subvars, or else they'll end up being
+# recursively expanded.
+ALL_MODULES.$(my_register_name).CLASS := \
+ $(ALL_MODULES.$(my_register_name).CLASS) $(LOCAL_MODULE_CLASS)
+ALL_MODULES.$(my_register_name).PATH := \
+ $(ALL_MODULES.$(my_register_name).PATH) $(LOCAL_PATH)
+ALL_MODULES.$(my_register_name).TAGS := \
+ $(ALL_MODULES.$(my_register_name).TAGS) $(my_module_tags)
+ALL_MODULES.$(my_register_name).CHECKED := \
+ $(ALL_MODULES.$(my_register_name).CHECKED) $(my_checked_module)
+ALL_MODULES.$(my_register_name).BUILT := \
+ $(ALL_MODULES.$(my_register_name).BUILT) $(LOCAL_BUILT_MODULE)
+ifneq (true,$(LOCAL_UNINSTALLABLE_MODULE))
+ALL_MODULES.$(my_register_name).INSTALLED := \
+ $(strip $(ALL_MODULES.$(my_register_name).INSTALLED) \
+ $(LOCAL_INSTALLED_MODULE) $(my_init_rc_installed))
+ALL_MODULES.$(my_register_name).BUILT_INSTALLED := \
+ $(strip $(ALL_MODULES.$(my_register_name).BUILT_INSTALLED) \
+ $(LOCAL_BUILT_MODULE):$(LOCAL_INSTALLED_MODULE) \
+ $(addprefix $(my_init_rc_src):,$(my_init_rc_installed)))
+endif
+ifdef LOCAL_PICKUP_FILES
+# Files or directories ready to pick up by the build system
+# when $(LOCAL_BUILT_MODULE) is done.
+ALL_MODULES.$(my_register_name).PICKUP_FILES := \
+ $(ALL_MODULES.$(my_register_name).PICKUP_FILES) $(LOCAL_PICKUP_FILES)
+endif
+my_required_modules := $(LOCAL_REQUIRED_MODULES) \
+ $(LOCAL_REQUIRED_MODULES_$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH))
+ifdef LOCAL_IS_HOST_MODULE
+my_required_modules += $(LOCAL_REQUIRED_MODULES_$($(my_prefix)OS))
+endif
+ALL_MODULES.$(my_register_name).REQUIRED := \
+ $(strip $(ALL_MODULES.$(my_register_name).REQUIRED) $(my_required_modules))
+ALL_MODULES.$(my_register_name).EVENT_LOG_TAGS := \
+ $(ALL_MODULES.$(my_register_name).EVENT_LOG_TAGS) $(event_log_tags)
+ALL_MODULES.$(my_register_name).MAKEFILE := \
+ $(ALL_MODULES.$(my_register_name).MAKEFILE) $(LOCAL_MODULE_MAKEFILE)
+ifdef LOCAL_MODULE_OWNER
+ALL_MODULES.$(my_register_name).OWNER := \
+ $(sort $(ALL_MODULES.$(my_register_name).OWNER) $(LOCAL_MODULE_OWNER))
+endif
+ifdef LOCAL_2ND_ARCH_VAR_PREFIX
+ALL_MODULES.$(my_register_name).FOR_2ND_ARCH := true
+endif
+ALL_MODULES.$(my_register_name).FOR_HOST_CROSS := $(my_host_cross)
+
+INSTALLABLE_FILES.$(LOCAL_INSTALLED_MODULE).MODULE := $(my_register_name)
+
+##########################################################
+# Track module-level dependencies.
+# Use $(LOCAL_MODULE) instead of $(my_register_name) to ignore module's bitness.
+ALL_DEPS.MODULES := $(sort $(ALL_DEPS.MODULES) $(LOCAL_MODULE))
+ALL_DEPS.$(LOCAL_MODULE).ALL_DEPS := $(sort \
+ $(ALL_MODULES.$(LOCAL_MODULE).ALL_DEPS) \
+ $(LOCAL_STATIC_LIBRARIES) \
+ $(LOCAL_WHOLE_STATIC_LIBRARIES) \
+ $(LOCAL_SHARED_LIBRARIES) \
+ $(LOCAL_STATIC_JAVA_LIBRARIES) \
+ $(LOCAL_JAVA_LIBRARIES)\
+ $(LOCAL_JNI_SHARED_LIBRARIES))
+
+ALL_DEPS.$(LOCAL_MODULE).LICENSE := $(sort $(ALL_DEPS.$(LOCAL_MODULE).LICENSE) $(license_files))
+
+###########################################################
+## Take care of my_module_tags
+###########################################################
+
+# Keep track of all the tags we've seen.
+ALL_MODULE_TAGS := $(sort $(ALL_MODULE_TAGS) $(my_module_tags))
+
+# Add this module name to the tag list of each specified tag.
+$(foreach tag,$(my_module_tags),\
+ $(eval ALL_MODULE_NAME_TAGS.$(tag) += $(my_register_name)))
+
+###########################################################
+## umbrella targets used to verify builds
+###########################################################
+j_or_n :=
+ifneq (,$(filter EXECUTABLES SHARED_LIBRARIES STATIC_LIBRARIES,$(LOCAL_MODULE_CLASS)))
+j_or_n := native
+else
+ifneq (,$(filter JAVA_LIBRARIES APPS,$(LOCAL_MODULE_CLASS)))
+j_or_n := java
+endif
+endif
+ifdef LOCAL_IS_HOST_MODULE
+h_or_t := host
+else
+h_or_t := target
+endif
+
+ifdef j_or_n
+$(j_or_n) $(h_or_t) $(j_or_n)-$(h_or_t) : $(my_checked_module)
+ifneq (,$(filter $(my_module_tags),tests))
+$(j_or_n)-$(h_or_t)-tests $(j_or_n)-tests $(h_or_t)-tests : $(my_checked_module)
+endif
+endif
+
+###########################################################
+## NOTICE files
+###########################################################
+
+include $(BUILD_NOTICE_FILE)
diff --git a/core/binary.mk b/core/binary.mk
new file mode 100644
index 0000000000000000000000000000000000000000..7b229032b00a3d43ef82d30947dc255f9297ea02
--- /dev/null
+++ b/core/binary.mk
@@ -0,0 +1,1444 @@
+###########################################################
+## Standard rules for building binary object files from
+## asm/c/cpp/yacc/lex/etc source files.
+##
+## The list of object files is exported in $(all_objects).
+###########################################################
+
+#######################################
+include $(BUILD_SYSTEM)/base_rules.mk
+#######################################
+
+##################################################
+# Compute the dependency of the shared libraries
+##################################################
+# On the target, we compile with -nostdlib, so we must add in the
+# default system shared libraries, unless they have requested not
+# to by supplying a LOCAL_SYSTEM_SHARED_LIBRARIES value. One would
+# supply that, for example, when building libc itself.
+ifdef LOCAL_IS_HOST_MODULE
+ ifeq ($(LOCAL_SYSTEM_SHARED_LIBRARIES),none)
+ my_system_shared_libraries :=
+ else
+ my_system_shared_libraries := $(LOCAL_SYSTEM_SHARED_LIBRARIES)
+ endif
+else
+ ifeq ($(LOCAL_SYSTEM_SHARED_LIBRARIES),none)
+ my_system_shared_libraries := libc libm
+ else
+ my_system_shared_libraries := $(LOCAL_SYSTEM_SHARED_LIBRARIES)
+ endif
+endif
+
+# The following LOCAL_ variables will be modified in this file.
+# Because the same LOCAL_ variables may be used to define modules for both 1st arch and 2nd arch,
+# we can't modify them in place.
+my_src_files := $(LOCAL_SRC_FILES)
+my_src_files_exclude := $(LOCAL_SRC_FILES_EXCLUDE)
+my_static_libraries := $(LOCAL_STATIC_LIBRARIES)
+my_whole_static_libraries := $(LOCAL_WHOLE_STATIC_LIBRARIES)
+my_shared_libraries := $(LOCAL_SHARED_LIBRARIES)
+my_cflags := $(LOCAL_CFLAGS)
+my_conlyflags := $(LOCAL_CONLYFLAGS)
+my_cppflags := $(LOCAL_CPPFLAGS)
+my_cflags_no_override := $(GLOBAL_CFLAGS_NO_OVERRIDE)
+my_cppflags_no_override := $(GLOBAL_CPPFLAGS_NO_OVERRIDE)
+my_ldflags := $(LOCAL_LDFLAGS)
+my_ldlibs := $(LOCAL_LDLIBS)
+my_asflags := $(LOCAL_ASFLAGS)
+my_cc := $(LOCAL_CC)
+my_cc_wrapper := $(CC_WRAPPER)
+my_cxx := $(LOCAL_CXX)
+my_cxx_wrapper := $(CXX_WRAPPER)
+my_c_includes := $(LOCAL_C_INCLUDES)
+my_generated_sources := $(LOCAL_GENERATED_SOURCES)
+my_native_coverage := $(LOCAL_NATIVE_COVERAGE)
+my_additional_dependencies := $(LOCAL_MODULE_MAKEFILE_DEP) $(LOCAL_ADDITIONAL_DEPENDENCIES)
+my_export_c_include_dirs := $(LOCAL_EXPORT_C_INCLUDE_DIRS)
+
+ifdef LOCAL_IS_HOST_MODULE
+my_allow_undefined_symbols := true
+else
+my_allow_undefined_symbols := $(strip $(LOCAL_ALLOW_UNDEFINED_SYMBOLS))
+endif
+
+my_ndk_sysroot :=
+my_ndk_sysroot_include :=
+my_ndk_sysroot_lib :=
+ifdef LOCAL_SDK_VERSION
+ ifdef LOCAL_NDK_VERSION
+ $(error $(LOCAL_PATH): LOCAL_NDK_VERSION is now retired.)
+ endif
+ ifdef LOCAL_IS_HOST_MODULE
+ $(error $(LOCAL_PATH): LOCAL_SDK_VERSION cannot be used in host module)
+ endif
+ my_ndk_source_root := $(HISTORICAL_NDK_VERSIONS_ROOT)/current/sources
+ my_ndk_sysroot := $(HISTORICAL_NDK_VERSIONS_ROOT)/current/platforms/android-$(LOCAL_SDK_VERSION)/arch-$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)
+ my_ndk_sysroot_include := $(my_ndk_sysroot)/usr/include
+
+ # x86_64 and and mips64 are both multilib toolchains, so their libraries are
+ # installed in /usr/lib64. Aarch64, on the other hand, is not a multilib
+ # compiler, so its libraries are in /usr/lib.
+ #
+ # Mips32r6 is yet another variation, with libraries installed in libr6.
+ #
+ # For the rest, the libraries are installed simply to /usr/lib.
+ ifneq (,$(filter x86_64 mips64,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)))
+ my_ndk_sysroot_lib := $(my_ndk_sysroot)/usr/lib64
+ else ifeq (mips32r6,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH_VARIANT))
+ my_ndk_sysroot_lib := $(my_ndk_sysroot)/usr/libr6
+ else
+ my_ndk_sysroot_lib := $(my_ndk_sysroot)/usr/lib
+ endif
+
+ # The bionic linker now has support for packed relocations and gnu style
+ # hashes (which are much faster!), but shipping to older devices requires
+ # the old style hash. Fortunately, we can build with both and it'll work
+ # anywhere.
+ #
+ # This is not currently supported on MIPS architectures.
+ ifeq (,$(filter mips mips64,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)))
+ my_ldflags += -Wl,--hash-style=both
+ endif
+
+ # We don't want to expose the relocation packer to the NDK just yet.
+ LOCAL_PACK_MODULE_RELOCATIONS := false
+
+ # Set up the NDK stl variant. Starting from NDK-r5 the c++ stl resides in a separate location.
+ # See ndk/docs/CPLUSPLUS-SUPPORT.html
+ my_ndk_stl_include_path :=
+ my_ndk_stl_shared_lib_fullpath :=
+ my_ndk_stl_shared_lib :=
+ my_ndk_stl_static_lib :=
+ my_ndk_stl_cppflags :=
+ my_cpu_variant := $(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)CPU_ABI)
+ ifeq (mips32r6,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH_VARIANT))
+ my_cpu_variant := mips32r6
+ endif
+ LOCAL_NDK_STL_VARIANT := $(strip $(LOCAL_NDK_STL_VARIANT))
+ ifeq (,$(LOCAL_NDK_STL_VARIANT))
+ LOCAL_NDK_STL_VARIANT := system
+ endif
+ ifneq (1,$(words $(filter none system stlport_static stlport_shared c++_static c++_shared gnustl_static, $(LOCAL_NDK_STL_VARIANT))))
+ $(error $(LOCAL_PATH): Unknown LOCAL_NDK_STL_VARIANT $(LOCAL_NDK_STL_VARIANT))
+ endif
+ ifeq (system,$(LOCAL_NDK_STL_VARIANT))
+ my_ndk_stl_include_path := $(my_ndk_source_root)/cxx-stl/system/include
+ my_system_shared_libraries += libstdc++
+ else # LOCAL_NDK_STL_VARIANT is not system
+ ifneq (,$(filter stlport_%, $(LOCAL_NDK_STL_VARIANT)))
+ my_ndk_stl_include_path := $(my_ndk_source_root)/cxx-stl/stlport/stlport
+ my_system_shared_libraries += libstdc++
+ ifeq (stlport_static,$(LOCAL_NDK_STL_VARIANT))
+ my_ndk_stl_static_lib := $(my_ndk_source_root)/cxx-stl/stlport/libs/$(my_cpu_variant)/libstlport_static.a
+ else
+ my_ndk_stl_shared_lib_fullpath := $(my_ndk_source_root)/cxx-stl/stlport/libs/$(my_cpu_variant)/libstlport_shared.so
+ my_ndk_stl_shared_lib := -lstlport_shared
+ endif
+ else # LOCAL_NDK_STL_VARIANT is not stlport_* either
+ ifneq (,$(filter c++_%, $(LOCAL_NDK_STL_VARIANT)))
+ my_ndk_stl_include_path := $(my_ndk_source_root)/cxx-stl/llvm-libc++/libcxx/include \
+ $(my_ndk_source_root)/cxx-stl/llvm-libc++/gabi++/include \
+ $(my_ndk_source_root)/android/support/include
+ ifeq (c++_static,$(LOCAL_NDK_STL_VARIANT))
+ my_ndk_stl_static_lib := $(my_ndk_source_root)/cxx-stl/llvm-libc++/libs/$(my_cpu_variant)/libc++_static.a
+ else
+ my_ndk_stl_shared_lib_fullpath := $(my_ndk_source_root)/cxx-stl/llvm-libc++/libs/$(my_cpu_variant)/libc++_shared.so
+ my_ndk_stl_shared_lib := -lc++_shared
+ endif
+ my_ndk_stl_cppflags := -std=c++11
+ else # LOCAL_NDK_STL_VARIANT is not c++_* either
+ ifneq (,$(filter gnustl_%, $(LOCAL_NDK_STL_VARIANT)))
+ my_ndk_stl_include_path := $(my_ndk_source_root)/cxx-stl/gnu-libstdc++/$($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_NDK_GCC_VERSION)/libs/$(my_cpu_variant)/include \
+ $(my_ndk_source_root)/cxx-stl/gnu-libstdc++/$($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_NDK_GCC_VERSION)/include
+ my_ndk_stl_static_lib := $(my_ndk_source_root)/cxx-stl/gnu-libstdc++/$($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_NDK_GCC_VERSION)/libs/$(my_cpu_variant)/libgnustl_static.a
+ else # LOCAL_NDK_STL_VARIANT must be none
+ # Do nothing.
+ endif
+ endif
+ endif
+ endif
+endif
+
+# MinGW spits out warnings about -fPIC even for -fpie?!) being ignored because
+# all code is position independent, and then those warnings get promoted to
+# errors.
+ifneq ($($(my_prefix)OS),windows)
+ifeq ($(LOCAL_MODULE_CLASS),EXECUTABLES)
+my_cflags += -fpie
+else
+my_cflags += -fPIC
+endif
+endif
+
+ifdef LOCAL_IS_HOST_MODULE
+my_src_files += $(LOCAL_SRC_FILES_$($(my_prefix)OS)) $(LOCAL_SRC_FILES_$($(my_prefix)OS)_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH))
+my_static_libraries += $(LOCAL_STATIC_LIBRARIES_$($(my_prefix)OS))
+my_shared_libraries += $(LOCAL_SHARED_LIBRARIES_$($(my_prefix)OS))
+my_cflags += $(LOCAL_CFLAGS_$($(my_prefix)OS))
+my_cppflags += $(LOCAL_CPPFLAGS_$($(my_prefix)OS))
+my_ldflags += $(LOCAL_LDFLAGS_$($(my_prefix)OS))
+my_ldlibs += $(LOCAL_LDLIBS_$($(my_prefix)OS))
+my_asflags += $(LOCAL_ASFLAGS_$($(my_prefix)OS))
+my_c_includes += $(LOCAL_C_INCLUDES_$($(my_prefix)OS))
+my_generated_sources += $(LOCAL_GENERATED_SOURCES_$($(my_prefix)OS))
+endif
+
+my_src_files += $(LOCAL_SRC_FILES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_SRC_FILES_$(my_32_64_bit_suffix))
+my_src_files_exclude += $(LOCAL_SRC_FILES_EXCLUDE_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_SRC_FILES_EXCLUDE_$(my_32_64_bit_suffix))
+my_shared_libraries += $(LOCAL_SHARED_LIBRARIES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_SHARED_LIBRARIES_$(my_32_64_bit_suffix))
+my_cflags += $(LOCAL_CFLAGS_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_CFLAGS_$(my_32_64_bit_suffix))
+my_cppflags += $(LOCAL_CPPFLAGS_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_CPPFLAGS_$(my_32_64_bit_suffix))
+my_ldflags += $(LOCAL_LDFLAGS_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_LDFLAGS_$(my_32_64_bit_suffix))
+my_asflags += $(LOCAL_ASFLAGS_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_ASFLAGS_$(my_32_64_bit_suffix))
+my_c_includes += $(LOCAL_C_INCLUDES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_C_INCLUDES_$(my_32_64_bit_suffix))
+my_generated_sources += $(LOCAL_GENERATED_SOURCES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_GENERATED_SOURCES_$(my_32_64_bit_suffix))
+
+my_missing_exclude_files := $(filter-out $(my_src_files),$(my_src_files_exclude))
+ifneq ($(my_missing_exclude_files),)
+$(warning Files are listed in LOCAL_SRC_FILES_EXCLUDE but not LOCAL_SRC_FILES)
+$(error $(my_missing_exclude_files))
+endif
+my_src_files := $(filter-out $(my_src_files_exclude),$(my_src_files))
+
+my_clang := $(strip $(LOCAL_CLANG))
+ifdef LOCAL_CLANG_$(my_32_64_bit_suffix)
+my_clang := $(strip $(LOCAL_CLANG_$(my_32_64_bit_suffix)))
+endif
+ifdef LOCAL_CLANG_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)
+my_clang := $(strip $(LOCAL_CLANG_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)))
+endif
+
+# clang is enabled by default for host builds
+# enable it unless we've specifically disabled clang above
+ifdef LOCAL_IS_HOST_MODULE
+ ifneq ($($(my_prefix)OS),windows)
+ ifeq ($(my_clang),)
+ my_clang := true
+ endif
+ endif
+# Add option to make gcc the default for device build
+else ifeq ($(USE_CLANG_PLATFORM_BUILD),false)
+ ifeq ($(my_clang),)
+ my_clang := false
+ endif
+else ifeq ($(my_clang),)
+ my_clang := true
+endif
+
+my_cpp_std_version := -std=gnu++14
+
+ifneq ($(my_clang),true)
+ # GCC uses an invalid C++14 ABI (emits calls to
+ # __cxa_throw_bad_array_length, which is not a valid C++ RT ABI).
+ # http://b/25022512
+ my_cpp_std_version := -std=gnu++11
+endif
+
+ifdef LOCAL_SDK_VERSION
+ # The NDK handles this itself.
+ my_cpp_std_version :=
+endif
+
+ifdef LOCAL_IS_HOST_MODULE
+ ifneq ($(my_clang),true)
+ # The host GCC doesn't support C++14 (and is deprecated, so likely
+ # never will). Build these modules with C++11.
+ my_cpp_std_version := -std=gnu++11
+ endif
+endif
+
+my_cppflags := $(my_cpp_std_version) $(my_cppflags)
+
+
+# arch-specific static libraries go first so that generic ones can depend on them
+my_static_libraries := $(LOCAL_STATIC_LIBRARIES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_STATIC_LIBRARIES_$(my_32_64_bit_suffix)) $(my_static_libraries)
+my_whole_static_libraries := $(LOCAL_WHOLE_STATIC_LIBRARIES_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_WHOLE_STATIC_LIBRARIES_$(my_32_64_bit_suffix)) $(my_whole_static_libraries)
+
+include $(BUILD_SYSTEM)/cxx_stl_setup.mk
+
+# Add static HAL libraries
+ifdef LOCAL_HAL_STATIC_LIBRARIES
+$(foreach lib, $(LOCAL_HAL_STATIC_LIBRARIES), \
+ $(eval b_lib := $(filter $(lib).%,$(BOARD_HAL_STATIC_LIBRARIES)))\
+ $(if $(b_lib), $(eval my_static_libraries += $(b_lib)),\
+ $(eval my_static_libraries += $(lib).default)))
+b_lib :=
+endif
+
+ifneq ($(strip $(CUSTOM_$(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)LINKER)),)
+ my_linker := $(CUSTOM_$(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)LINKER)
+else
+ my_linker := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_LINKER)
+endif
+
+include $(BUILD_SYSTEM)/config_sanitizers.mk
+
+# Add in libcompiler_rt for all regular device builds
+ifeq (,$(LOCAL_SDK_VERSION)$(WITHOUT_LIBCOMPILER_RT))
+ my_static_libraries += $(COMPILER_RT_CONFIG_EXTRA_STATIC_LIBRARIES)
+endif
+
+####################################################
+## Add FDO flags if FDO is turned on and supported
+## Please note that we will do option filtering during FDO build.
+## i.e. Os->O2, remove -fno-early-inline and -finline-limit.
+##################################################################
+my_fdo_build :=
+ifneq ($(filter true always, $(LOCAL_FDO_SUPPORT)),)
+ ifeq ($(BUILD_FDO_INSTRUMENT),true)
+ my_cflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_INSTRUMENT_CFLAGS)
+ my_ldflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_INSTRUMENT_LDFLAGS)
+ my_fdo_build := true
+ else ifneq ($(filter true,$(BUILD_FDO_OPTIMIZE))$(filter always,$(LOCAL_FDO_SUPPORT)),)
+ my_cflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_OPTIMIZE_CFLAGS)
+ my_fdo_build := true
+ endif
+ # Disable ccache (or other compiler wrapper) except gomacc, which
+ # can handle -fprofile-use properly.
+ my_cc_wrapper := $(filter $(GOMA_CC),$(my_cc_wrapper))
+ my_cxx_wrapper := $(filter $(GOMA_CC),$(my_cxx_wrapper))
+endif
+
+###########################################################
+## Explicitly declare assembly-only __ASSEMBLY__ macro for
+## assembly source
+###########################################################
+my_asflags += -D__ASSEMBLY__
+
+
+###########################################################
+## Define PRIVATE_ variables from global vars
+###########################################################
+ifndef LOCAL_IS_HOST_MODULE
+ifdef LOCAL_SDK_VERSION
+my_target_project_includes :=
+my_target_c_includes := $(my_ndk_stl_include_path) $(my_ndk_sysroot_include)
+my_target_global_cppflags := $(my_ndk_stl_cppflags)
+else
+my_target_project_includes := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_PROJECT_INCLUDES)
+my_target_c_includes := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_C_INCLUDES)
+my_target_global_cppflags :=
+endif # LOCAL_SDK_VERSION
+
+ifeq ($(my_clang),true)
+my_target_global_cflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_TARGET_GLOBAL_CFLAGS)
+my_target_global_conlyflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_TARGET_GLOBAL_CONLYFLAGS)
+my_target_global_cppflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_TARGET_GLOBAL_CPPFLAGS)
+my_target_global_ldflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_TARGET_GLOBAL_LDFLAGS)
+else
+my_target_global_cflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_GLOBAL_CFLAGS)
+my_target_global_conlyflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_GLOBAL_CONLYFLAGS)
+my_target_global_cppflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_GLOBAL_CPPFLAGS)
+my_target_global_ldflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_GLOBAL_LDFLAGS)
+endif # my_clang
+
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TARGET_PROJECT_INCLUDES := $(my_target_project_includes)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TARGET_C_INCLUDES := $(my_target_c_includes)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TARGET_GLOBAL_CFLAGS := $(my_target_global_cflags)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TARGET_GLOBAL_CONLYFLAGS := $(my_target_global_conlyflags)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TARGET_GLOBAL_CPPFLAGS := $(my_target_global_cppflags)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TARGET_GLOBAL_LDFLAGS := $(my_target_global_ldflags)
+
+else # LOCAL_IS_HOST_MODULE
+
+ifeq ($(my_clang),true)
+my_host_global_cflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_$(my_prefix)GLOBAL_CFLAGS)
+my_host_global_conlyflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_$(my_prefix)GLOBAL_CONLYFLAGS)
+my_host_global_cppflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_$(my_prefix)GLOBAL_CPPFLAGS)
+my_host_global_ldflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)CLANG_$(my_prefix)GLOBAL_LDFLAGS)
+my_host_c_includes := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)C_INCLUDES)
+else
+my_host_global_cflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)GLOBAL_CFLAGS)
+my_host_global_conlyflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)GLOBAL_CONLYFLAGS)
+my_host_global_cppflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)GLOBAL_CPPFLAGS)
+my_host_global_ldflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)GLOBAL_LDFLAGS)
+my_host_c_includes := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)C_INCLUDES)
+endif # my_clang
+
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_HOST_C_INCLUDES := $(my_host_c_includes)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_HOST_GLOBAL_CFLAGS := $(my_host_global_cflags)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_HOST_GLOBAL_CONLYFLAGS := $(my_host_global_conlyflags)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_HOST_GLOBAL_CPPFLAGS := $(my_host_global_cppflags)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_HOST_GLOBAL_LDFLAGS := $(my_host_global_ldflags)
+endif # LOCAL_IS_HOST_MODULE
+
+# To enable coverage for a given module, set LOCAL_NATIVE_COVERAGE=true and
+# build with NATIVE_COVERAGE=true in your enviornment. Note that the build
+# system is not sensitive to changes to NATIVE_COVERAGE, so you should do a
+# clean build of your module after toggling it.
+ifeq ($(NATIVE_COVERAGE),true)
+ ifeq ($(my_native_coverage),true)
+ # Note that clang coverage doesn't play nicely with acov out of the box.
+ # Clang apparently generates .gcno files that aren't compatible with
+ # gcov-4.8. This can be solved by installing gcc-4.6 and invoking lcov
+ # with `--gcov-tool /usr/bin/gcov-4.6`.
+ #
+ # http://stackoverflow.com/questions/17758126/clang-code-coverage-invalid-output
+ my_cflags += --coverage -O0
+ my_ldflags += --coverage
+ endif
+else
+ my_native_coverage := false
+endif
+
+ifeq ($(my_clang),true)
+ my_coverage_lib := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_LIBPROFILE_RT)
+else
+ my_coverage_lib := $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_LIBGCOV)
+endif
+
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TARGET_COVERAGE_LIB := $(my_coverage_lib)
+
+###########################################################
+## Define PRIVATE_ variables used by multiple module types
+###########################################################
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_NO_DEFAULT_COMPILER_FLAGS := \
+ $(strip $(LOCAL_NO_DEFAULT_COMPILER_FLAGS))
+
+ifeq ($(strip $(WITH_SYNTAX_CHECK)),)
+ LOCAL_NO_SYNTAX_CHECK := true
+endif
+
+ifeq ($(strip $(WITH_STATIC_ANALYZER)),)
+ LOCAL_NO_STATIC_ANALYZER := true
+endif
+
+# Clang does not recognize all gcc flags.
+# Use static analyzer only if clang is used.
+ifneq ($(my_clang),true)
+ LOCAL_NO_STATIC_ANALYZER := true
+endif
+
+ifneq ($(strip $(LOCAL_IS_HOST_MODULE)),)
+ my_syntax_arch := host
+else
+ my_syntax_arch := $(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)
+endif
+
+ifeq ($(strip $(my_cc)),)
+ ifeq ($(my_clang),true)
+ my_cc := $(CLANG)
+ else
+ my_cc := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)CC)
+ endif
+ my_cc := $(my_cc_wrapper) $(my_cc)
+endif
+
+ifneq ($(LOCAL_NO_STATIC_ANALYZER),true)
+ my_cc := CCC_CC=$(CLANG) CLANG=$(CLANG) \
+ $(SYNTAX_TOOLS_PREFIX)/ccc-analyzer
+else
+ifneq ($(LOCAL_NO_SYNTAX_CHECK),true)
+ my_cc := $(my_cc) -fsyntax-only
+endif
+endif
+
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CC := $(my_cc)
+
+ifeq ($(strip $(my_cxx)),)
+ ifeq ($(my_clang),true)
+ my_cxx := $(CLANG_CXX)
+ else
+ my_cxx := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)CXX)
+ endif
+ my_cxx := $(my_cxx_wrapper) $(my_cxx)
+endif
+
+ifneq ($(LOCAL_NO_STATIC_ANALYZER),true)
+ my_cxx := CCC_CXX=$(CLANG_CXX) CLANG_CXX=$(CLANG_CXX) \
+ $(SYNTAX_TOOLS_PREFIX)/c++-analyzer
+else
+ifneq ($(LOCAL_NO_SYNTAX_CHECK),true)
+ my_cxx := $(my_cxx) -fsyntax-only
+endif
+endif
+
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_LINKER := $(my_linker)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CXX := $(my_cxx)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CLANG := $(my_clang)
+
+# TODO: support a mix of standard extensions so that this isn't necessary
+LOCAL_CPP_EXTENSION := $(strip $(LOCAL_CPP_EXTENSION))
+ifeq ($(LOCAL_CPP_EXTENSION),)
+ LOCAL_CPP_EXTENSION := .cpp
+endif
+
+# Certain modules like libdl have to have symbols resolved at runtime and blow
+# up if --no-undefined is passed to the linker.
+ifeq ($(strip $(LOCAL_NO_DEFAULT_COMPILER_FLAGS)),)
+ifeq ($(my_allow_undefined_symbols),)
+ my_ldflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)NO_UNDEFINED_LDFLAGS)
+endif
+endif
+
+ifeq (true,$(LOCAL_GROUP_STATIC_LIBRARIES))
+$(LOCAL_BUILT_MODULE): PRIVATE_GROUP_STATIC_LIBRARIES := true
+else
+$(LOCAL_BUILT_MODULE): PRIVATE_GROUP_STATIC_LIBRARIES :=
+endif
+
+###########################################################
+## Define arm-vs-thumb-mode flags.
+###########################################################
+LOCAL_ARM_MODE := $(strip $(LOCAL_ARM_MODE))
+ifeq ($($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH),arm)
+arm_objects_mode := $(if $(LOCAL_ARM_MODE),$(LOCAL_ARM_MODE),arm)
+normal_objects_mode := $(if $(LOCAL_ARM_MODE),$(LOCAL_ARM_MODE),thumb)
+
+# Read the values from something like TARGET_arm_CFLAGS or
+# TARGET_thumb_CFLAGS. HOST_(arm|thumb)_CFLAGS values aren't
+# actually used (although they are usually empty).
+arm_objects_cflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)$(arm_objects_mode)_CFLAGS)
+normal_objects_cflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)$(normal_objects_mode)_CFLAGS)
+ifeq ($(my_clang),true)
+arm_objects_cflags := $(call $(LOCAL_2ND_ARCH_VAR_PREFIX)convert-to-$(my_host)clang-flags,$(arm_objects_cflags))
+normal_objects_cflags := $(call $(LOCAL_2ND_ARCH_VAR_PREFIX)convert-to-$(my_host)clang-flags,$(normal_objects_cflags))
+endif
+
+else
+arm_objects_mode :=
+normal_objects_mode :=
+arm_objects_cflags :=
+normal_objects_cflags :=
+endif
+
+###########################################################
+## Define per-module debugging flags. Users can turn on
+## debugging for a particular module by setting DEBUG_MODULE_ModuleName
+## to a non-empty value in their environment or buildspec.mk,
+## and setting HOST_/TARGET_CUSTOM_DEBUG_CFLAGS to the
+## debug flags that they want to use.
+###########################################################
+ifdef DEBUG_MODULE_$(strip $(LOCAL_MODULE))
+ debug_cflags := $($(my_prefix)CUSTOM_DEBUG_CFLAGS)
+else
+ debug_cflags :=
+endif
+
+####################################################
+## Keep track of src -> obj mapping
+####################################################
+
+my_tracked_gen_files :=
+my_tracked_src_files :=
+
+###########################################################
+## Stuff source generated from one-off tools
+###########################################################
+$(my_generated_sources): PRIVATE_MODULE := $(my_register_name)
+
+my_gen_sources_copy := $(patsubst $(generated_sources_dir)/%,$(intermediates)/%,$(filter $(generated_sources_dir)/%,$(my_generated_sources)))
+
+$(my_gen_sources_copy): $(intermediates)/% : $(generated_sources_dir)/% | $(ACP)
+ @echo "Copy: $@"
+ $(copy-file-to-target)
+
+my_generated_sources := $(patsubst $(generated_sources_dir)/%,$(intermediates)/%,$(my_generated_sources))
+
+# Generated sources that will actually produce object files.
+# Other files (like headers) are allowed in LOCAL_GENERATED_SOURCES,
+# since other compiled sources may depend on them, and we set up
+# the dependencies.
+my_gen_src_files := $(filter %.c %$(LOCAL_CPP_EXTENSION) %.S %.s,$(my_generated_sources))
+
+ALL_GENERATED_SOURCES += $(my_generated_sources)
+
+####################################################
+## Compile RenderScript with reflected C++
+####################################################
+
+renderscript_sources := $(filter %.rs %.fs,$(my_src_files))
+
+ifneq (,$(renderscript_sources))
+
+renderscript_sources_fullpath := $(addprefix $(LOCAL_PATH)/, $(renderscript_sources))
+RenderScript_file_stamp := $(intermediates)/RenderScriptCPP.stamp
+renderscript_intermediate := $(intermediates)/renderscript
+
+renderscript_target_api :=
+
+ifneq (,$(LOCAL_RENDERSCRIPT_TARGET_API))
+renderscript_target_api := $(LOCAL_RENDERSCRIPT_TARGET_API)
+else
+ifneq (,$(LOCAL_SDK_VERSION))
+# Set target-api for LOCAL_SDK_VERSIONs other than current.
+ifneq (,$(filter-out current system_current test_current, $(LOCAL_SDK_VERSION)))
+renderscript_target_api := $(LOCAL_SDK_VERSION)
+endif
+endif # LOCAL_SDK_VERSION is set
+endif # LOCAL_RENDERSCRIPT_TARGET_API is set
+
+
+ifeq ($(LOCAL_RENDERSCRIPT_CC),)
+LOCAL_RENDERSCRIPT_CC := $(LLVM_RS_CC)
+endif
+
+# Turn on all warnings and warnings as errors for RS compiles.
+# This can be disabled with LOCAL_RENDERSCRIPT_FLAGS := -Wno-error
+renderscript_flags := -Wall -Werror
+renderscript_flags += $(LOCAL_RENDERSCRIPT_FLAGS)
+# -m32 or -m64
+renderscript_flags += -m$(my_32_64_bit_suffix)
+
+renderscript_includes := \
+ $(TOPDIR)external/clang/lib/Headers \
+ $(TOPDIR)frameworks/rs/scriptc \
+ $(LOCAL_RENDERSCRIPT_INCLUDES)
+
+ifneq ($(LOCAL_RENDERSCRIPT_INCLUDES_OVERRIDE),)
+renderscript_includes := $(LOCAL_RENDERSCRIPT_INCLUDES_OVERRIDE)
+endif
+
+bc_dep_files := $(addprefix $(renderscript_intermediate)/, \
+ $(patsubst %.fs,%.d, $(patsubst %.rs,%.d, $(notdir $(renderscript_sources)))))
+
+$(RenderScript_file_stamp): PRIVATE_RS_INCLUDES := $(renderscript_includes)
+$(RenderScript_file_stamp): PRIVATE_RS_CC := $(LOCAL_RENDERSCRIPT_CC)
+$(RenderScript_file_stamp): PRIVATE_RS_FLAGS := $(renderscript_flags)
+$(RenderScript_file_stamp): PRIVATE_RS_SOURCE_FILES := $(renderscript_sources_fullpath)
+$(RenderScript_file_stamp): PRIVATE_RS_OUTPUT_DIR := $(renderscript_intermediate)
+$(RenderScript_file_stamp): PRIVATE_RS_TARGET_API := $(renderscript_target_api)
+$(RenderScript_file_stamp): PRIVATE_DEP_FILES := $(bc_dep_files)
+$(RenderScript_file_stamp): $(renderscript_sources_fullpath) $(LOCAL_RENDERSCRIPT_CC)
+ $(transform-renderscripts-to-cpp-and-bc)
+
+# include the dependency files (.d/.P) generated by llvm-rs-cc.
+$(call include-depfile,$(RenderScript_file_stamp).P,$(RenderScript_file_stamp))
+
+LOCAL_INTERMEDIATE_TARGETS += $(RenderScript_file_stamp)
+
+rs_generated_cpps := $(addprefix \
+ $(renderscript_intermediate)/ScriptC_,$(patsubst %.fs,%.cpp, $(patsubst %.rs,%.cpp, \
+ $(notdir $(renderscript_sources)))))
+
+$(call track-src-file-gen,$(renderscript_sources),$(rs_generated_cpps))
+
+# This is just a dummy rule to make sure gmake doesn't skip updating the dependents.
+$(rs_generated_cpps) : $(RenderScript_file_stamp)
+ @echo "Updated RS generated cpp file $@."
+ $(hide) touch $@
+
+my_c_includes += $(renderscript_intermediate)
+my_generated_sources += $(rs_generated_cpps)
+
+endif
+
+
+###########################################################
+## Compile the .proto files to .cc (or .c) and then to .o
+###########################################################
+proto_sources := $(filter %.proto,$(my_src_files))
+ifneq ($(proto_sources),)
+proto_gen_dir := $(generated_sources_dir)/proto
+
+my_rename_cpp_ext :=
+ifneq (,$(filter nanopb-c nanopb-c-enable_malloc, $(LOCAL_PROTOC_OPTIMIZE_TYPE)))
+my_proto_source_suffix := .c
+my_proto_c_includes := external/nanopb-c
+my_protoc_flags := --nanopb_out=$(proto_gen_dir) \
+ --plugin=external/nanopb-c/generator/protoc-gen-nanopb
+else
+my_proto_source_suffix := $(LOCAL_CPP_EXTENSION)
+ifneq ($(my_proto_source_suffix),.cc)
+# aprotoc is hardcoded to write out only .cc file.
+# We need to rename the extension to $(LOCAL_CPP_EXTENSION) if it's not .cc.
+my_rename_cpp_ext := true
+endif
+my_proto_c_includes := external/protobuf/src
+my_cflags += -DGOOGLE_PROTOBUF_NO_RTTI
+my_protoc_flags := --cpp_out=$(proto_gen_dir)
+endif
+my_proto_c_includes += $(proto_gen_dir)
+
+proto_sources_fullpath := $(addprefix $(LOCAL_PATH)/, $(proto_sources))
+proto_generated_cpps := $(addprefix $(proto_gen_dir)/, \
+ $(patsubst %.proto,%.pb$(my_proto_source_suffix),$(proto_sources_fullpath)))
+
+# Ensure the transform-proto-to-cc rule is only defined once in multilib build.
+ifndef $(my_host)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_proto_defined
+$(proto_generated_cpps): PRIVATE_PROTO_INCLUDES := $(TOP)
+$(proto_generated_cpps): PRIVATE_PROTOC_FLAGS := $(LOCAL_PROTOC_FLAGS) $(my_protoc_flags)
+$(proto_generated_cpps): PRIVATE_RENAME_CPP_EXT := $(my_rename_cpp_ext)
+$(proto_generated_cpps): $(proto_gen_dir)/%.pb$(my_proto_source_suffix): %.proto $(my_protoc_deps) $(PROTOC)
+ $(transform-proto-to-cc)
+
+$(my_host)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_proto_defined := true
+endif
+# Ideally we can generate the source directly into $(intermediates).
+# But many Android.mks assume the .pb.hs are in $(generated_sources_dir).
+# As a workaround, we make a copy in the $(intermediates).
+proto_intermediate_dir := $(intermediates)/proto
+proto_intermediate_cpps := $(patsubst $(proto_gen_dir)/%,$(proto_intermediate_dir)/%,\
+ $(proto_generated_cpps))
+$(proto_intermediate_cpps) : $(proto_intermediate_dir)/% : $(proto_gen_dir)/% | $(ACP)
+ @echo "Copy: $@"
+ $(copy-file-to-target)
+ $(hide) cp $(basename $<).h $(basename $@).h
+$(call track-src-file-gen,$(proto_sources),$(proto_intermediate_cpps))
+
+my_generated_sources += $(proto_intermediate_cpps)
+
+my_c_includes += $(my_proto_c_includes)
+# Auto-export the generated proto source dir.
+my_export_c_include_dirs += $(my_proto_c_includes)
+
+ifeq ($(LOCAL_PROTOC_OPTIMIZE_TYPE),nanopb-c-enable_malloc)
+ my_static_libraries += libprotobuf-c-nano-enable_malloc
+else ifeq ($(LOCAL_PROTOC_OPTIMIZE_TYPE),nanopb-c)
+ my_static_libraries += libprotobuf-c-nano
+else ifeq ($(LOCAL_PROTOC_OPTIMIZE_TYPE),full)
+ ifdef LOCAL_SDK_VERSION
+ my_static_libraries += libprotobuf-cpp-full-ndk
+ else
+ my_shared_libraries += libprotobuf-cpp-full
+ endif
+else
+ ifdef LOCAL_SDK_VERSION
+ my_static_libraries += libprotobuf-cpp-lite-ndk
+ else
+ my_shared_libraries += libprotobuf-cpp-lite
+ endif
+endif
+endif # $(proto_sources) non-empty
+
+###########################################################
+## Compile the .dbus-xml files to c++ headers
+###########################################################
+dbus_definitions := $(filter %.dbus-xml,$(my_src_files))
+dbus_generated_headers :=
+ifneq ($(dbus_definitions),)
+
+dbus_definition_paths := $(addprefix $(LOCAL_PATH)/,$(dbus_definitions))
+dbus_service_config := $(filter %dbus-service-config.json,$(my_src_files))
+dbus_service_config_path := $(addprefix $(LOCAL_PATH)/,$(dbus_service_config))
+
+# Mark these source files as not producing objects
+$(call track-src-file-obj,$(dbus_definitions) $(dbus_service_config),)
+
+dbus_gen_dir := $(generated_sources_dir)/dbus_bindings
+
+ifdef LOCAL_DBUS_PROXY_PREFIX
+dbus_header_dir := $(dbus_gen_dir)/include/$(LOCAL_DBUS_PROXY_PREFIX)
+dbus_headers := dbus-proxies.h
+else
+dbus_header_dir := $(dbus_gen_dir)
+dbus_headers := $(patsubst %.dbus-xml,%.h,$(dbus_definitions))
+endif
+dbus_generated_headers := $(addprefix $(dbus_header_dir)/,$(dbus_headers))
+
+# Ensure that we only define build rules once in multilib builds.
+ifndef $(my_prefix)_$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_dbus_bindings_defined
+$(my_prefix)_$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_dbus_bindings_defined := true
+
+$(dbus_generated_headers): PRIVATE_MODULE := $(LOCAL_MODULE)
+$(dbus_generated_headers): PRIVATE_DBUS_SERVICE_CONFIG := $(dbus_service_config_path)
+$(dbus_generated_headers) : $(dbus_service_config_path) $(DBUS_GENERATOR)
+ifdef LOCAL_DBUS_PROXY_PREFIX
+$(dbus_generated_headers) : $(dbus_definition_paths)
+ $(generate-dbus-proxies)
+else
+$(dbus_generated_headers) : $(dbus_header_dir)/%.h : $(LOCAL_PATH)/%.dbus-xml
+ $(generate-dbus-adaptors)
+endif # $(LOCAL_DBUS_PROXY_PREFIX)
+endif # $(my_prefix)_$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_dbus_bindings_defined
+
+ifdef LOCAL_DBUS_PROXY_PREFIX
+# Auto-export the generated dbus proxy directory.
+my_export_c_include_dirs += $(dbus_gen_dir)/include
+my_c_includes += $(dbus_gen_dir)/include
+else
+my_export_c_include_dirs += $(dbus_header_dir)
+my_c_includes += $(dbus_header_dir)
+endif # $(LOCAL_DBUS_PROXY_PREFIX)
+
+my_generated_sources += $(dbus_generated_headers)
+
+endif # $(dbus_definitions) non-empty
+
+
+###########################################################
+## AIDL: Compile .aidl files to .cpp and .h files
+###########################################################
+aidl_src := $(strip $(filter %.aidl,$(my_src_files)))
+aidl_gen_cpp :=
+ifneq ($(aidl_src),)
+
+# Use the intermediates directory to avoid writing our own .cpp -> .o rules.
+aidl_gen_cpp_root := $(intermediates)/aidl-generated/src
+aidl_gen_include_root := $(intermediates)/aidl-generated/include
+
+# Multi-architecture builds have distinct intermediates directories.
+# Thus we'll actually generate source for each architecture.
+$(foreach s,$(aidl_src),\
+ $(eval $(call define-aidl-cpp-rule,$(s),$(aidl_gen_cpp_root),aidl_gen_cpp)))
+$(foreach cpp,$(aidl_gen_cpp), \
+ $(call include-depfile,$(addsuffix .aidl.P,$(basename $(cpp))),$(cpp)))
+$(call track-src-file-gen,$(aidl_src),$(aidl_gen_cpp))
+
+$(aidl_gen_cpp) : PRIVATE_MODULE := $(LOCAL_MODULE)
+$(aidl_gen_cpp) : PRIVATE_HEADER_OUTPUT_DIR := $(aidl_gen_include_root)
+$(aidl_gen_cpp) : PRIVATE_AIDL_FLAGS := $(addprefix -I,$(LOCAL_AIDL_INCLUDES))
+
+# Add generated headers to include paths.
+my_c_includes += $(aidl_gen_include_root)
+my_export_c_include_dirs += $(aidl_gen_include_root)
+# Pick up the generated C++ files later for transformation to .o files.
+my_generated_sources += $(aidl_gen_cpp)
+
+endif # $(aidl_src) non-empty
+
+###########################################################
+## Compile the .vts files to .cc (or .c) and then to .o
+###########################################################
+
+vts_src := $(strip $(filter %.vts,$(my_src_files)))
+vts_gen_cpp :=
+ifneq ($(vts_src),)
+
+# Use the intermediates directory to avoid writing our own .cpp -> .o rules.
+vts_gen_cpp_root := $(intermediates)/vts-generated/src
+vts_gen_include_root := $(intermediates)/vts-generated/include
+
+# Multi-architecture builds have distinct intermediates directories.
+# Thus we'll actually generate source for each architecture.
+$(foreach s,$(vts_src),\
+ $(eval $(call define-vts-cpp-rule,$(s),$(vts_gen_cpp_root),vts_gen_cpp)))
+$(foreach cpp,$(vts_gen_cpp), \
+ $(call include-depfile,$(addsuffix .vts.P,$(basename $(cpp))),$(cpp)))
+$(call track-src-file-gen,$(vts_src),$(vts_gen_cpp))
+
+$(vts_gen_cpp) : PRIVATE_MODULE := $(LOCAL_MODULE)
+$(vts_gen_cpp) : PRIVATE_HEADER_OUTPUT_DIR := $(vts_gen_include_root)
+$(vts_gen_cpp) : PRIVATE_VTS_FLAGS := $(addprefix -I,$(LOCAL_VTS_INCLUDES))
+
+# Add generated headers to include paths.
+my_c_includes += $(vts_gen_include_root)
+my_export_c_include_dirs += $(vts_gen_include_root)
+# Pick up the generated C++ files later for transformation to .o files.
+my_generated_sources += $(vts_gen_cpp)
+
+endif # $(vts_src) non-empty
+
+###########################################################
+## YACC: Compile .y/.yy files to .c/.cpp and then to .o.
+###########################################################
+
+y_yacc_sources := $(filter %.y,$(my_src_files))
+y_yacc_cs := $(addprefix \
+ $(intermediates)/,$(y_yacc_sources:.y=.c))
+ifneq ($(y_yacc_cs),)
+$(y_yacc_cs): $(intermediates)/%.c: \
+ $(TOPDIR)$(LOCAL_PATH)/%.y \
+ $(my_additional_dependencies)
+ $(call transform-y-to-c-or-cpp)
+$(call track-src-file-gen,$(y_yacc_sources),$(y_yacc_cs))
+
+my_generated_sources += $(y_yacc_cs)
+endif
+
+yy_yacc_sources := $(filter %.yy,$(my_src_files))
+yy_yacc_cpps := $(addprefix \
+ $(intermediates)/,$(yy_yacc_sources:.yy=$(LOCAL_CPP_EXTENSION)))
+ifneq ($(yy_yacc_cpps),)
+$(yy_yacc_cpps): $(intermediates)/%$(LOCAL_CPP_EXTENSION): \
+ $(TOPDIR)$(LOCAL_PATH)/%.yy \
+ $(my_additional_dependencies)
+ $(call transform-y-to-c-or-cpp)
+$(call track-src-file-gen,$(yy_yacc_sources),$(yy_yacc_cpps))
+
+my_generated_sources += $(yy_yacc_cpps)
+endif
+
+###########################################################
+## LEX: Compile .l/.ll files to .c/.cpp and then to .o.
+###########################################################
+
+l_lex_sources := $(filter %.l,$(my_src_files))
+l_lex_cs := $(addprefix \
+ $(intermediates)/,$(l_lex_sources:.l=.c))
+ifneq ($(l_lex_cs),)
+$(l_lex_cs): $(intermediates)/%.c: \
+ $(TOPDIR)$(LOCAL_PATH)/%.l
+ $(transform-l-to-c-or-cpp)
+$(call track-src-file-gen,$(l_lex_sources),$(l_lex_cs))
+
+my_generated_sources += $(l_lex_cs)
+endif
+
+ll_lex_sources := $(filter %.ll,$(my_src_files))
+ll_lex_cpps := $(addprefix \
+ $(intermediates)/,$(ll_lex_sources:.ll=$(LOCAL_CPP_EXTENSION)))
+ifneq ($(ll_lex_cpps),)
+$(ll_lex_cpps): $(intermediates)/%$(LOCAL_CPP_EXTENSION): \
+ $(TOPDIR)$(LOCAL_PATH)/%.ll
+ $(transform-l-to-c-or-cpp)
+$(call track-src-file-gen,$(ll_lex_sources),$(ll_lex_cpps))
+
+my_generated_sources += $(ll_lex_cpps)
+endif
+
+###########################################################
+## C++: Compile .cpp files to .o.
+###########################################################
+
+# we also do this on host modules, even though
+# it's not really arm, because there are files that are shared.
+cpp_arm_sources := $(patsubst %$(LOCAL_CPP_EXTENSION).arm,%$(LOCAL_CPP_EXTENSION),$(filter %$(LOCAL_CPP_EXTENSION).arm,$(my_src_files)))
+dotdot_arm_sources := $(filter ../%,$(cpp_arm_sources))
+cpp_arm_sources := $(filter-out ../%,$(cpp_arm_sources))
+cpp_arm_objects := $(addprefix $(intermediates)/,$(cpp_arm_sources:$(LOCAL_CPP_EXTENSION)=.o))
+$(call track-src-file-obj,$(patsubst %,%.arm,$(cpp_arm_sources)),$(cpp_arm_objects))
+
+# For source files starting with ../, we remove all the ../ in the object file path,
+# to avoid object file escaping the intermediate directory.
+dotdot_arm_objects :=
+$(foreach s,$(dotdot_arm_sources),\
+ $(eval $(call compile-dotdot-cpp-file,$(s),\
+ $(my_additional_dependencies),\
+ dotdot_arm_objects)))
+$(call track-src-file-obj,$(patsubst %,%.arm,$(dotdot_arm_sources)),$(dotdot_arm_objects))
+
+dotdot_sources := $(filter ../%$(LOCAL_CPP_EXTENSION),$(my_src_files))
+dotdot_objects :=
+$(foreach s,$(dotdot_sources),\
+ $(eval $(call compile-dotdot-cpp-file,$(s),\
+ $(my_additional_dependencies),\
+ dotdot_objects)))
+$(call track-src-file-obj,$(dotdot_sources),$(dotdot_objects))
+
+cpp_normal_sources := $(filter-out ../%,$(filter %$(LOCAL_CPP_EXTENSION),$(my_src_files)))
+cpp_normal_objects := $(addprefix $(intermediates)/,$(cpp_normal_sources:$(LOCAL_CPP_EXTENSION)=.o))
+$(call track-src-file-obj,$(cpp_normal_sources),$(cpp_normal_objects))
+
+$(dotdot_arm_objects) $(cpp_arm_objects): PRIVATE_ARM_MODE := $(arm_objects_mode)
+$(dotdot_arm_objects) $(cpp_arm_objects): PRIVATE_ARM_CFLAGS := $(arm_objects_cflags)
+$(dotdot_objects) $(cpp_normal_objects): PRIVATE_ARM_MODE := $(normal_objects_mode)
+$(dotdot_objects) $(cpp_normal_objects): PRIVATE_ARM_CFLAGS := $(normal_objects_cflags)
+
+cpp_objects := $(cpp_arm_objects) $(cpp_normal_objects)
+
+ifneq ($(strip $(cpp_objects)),)
+$(cpp_objects): $(intermediates)/%.o: \
+ $(TOPDIR)$(LOCAL_PATH)/%$(LOCAL_CPP_EXTENSION) \
+ $(my_additional_dependencies)
+ $(transform-$(PRIVATE_HOST)cpp-to-o)
+$(call include-depfiles-for-objs, $(cpp_objects))
+endif
+
+cpp_objects += $(dotdot_arm_objects) $(dotdot_objects)
+
+###########################################################
+## C++: Compile generated .cpp files to .o.
+###########################################################
+
+gen_cpp_sources := $(filter %$(LOCAL_CPP_EXTENSION),$(my_generated_sources))
+gen_cpp_objects := $(gen_cpp_sources:%$(LOCAL_CPP_EXTENSION)=%.o)
+$(call track-gen-file-obj,$(gen_cpp_sources),$(gen_cpp_objects))
+
+ifneq ($(strip $(gen_cpp_objects)),)
+# Compile all generated files as thumb.
+# TODO: support compiling certain generated files as arm.
+$(gen_cpp_objects): PRIVATE_ARM_MODE := $(normal_objects_mode)
+$(gen_cpp_objects): PRIVATE_ARM_CFLAGS := $(normal_objects_cflags)
+$(gen_cpp_objects): $(intermediates)/%.o: \
+ $(intermediates)/%$(LOCAL_CPP_EXTENSION) \
+ $(my_additional_dependencies)
+ $(transform-$(PRIVATE_HOST)cpp-to-o)
+$(call include-depfiles-for-objs, $(gen_cpp_objects))
+endif
+
+###########################################################
+## S: Compile generated .S and .s files to .o.
+###########################################################
+
+gen_S_sources := $(filter %.S,$(my_generated_sources))
+gen_S_objects := $(gen_S_sources:%.S=%.o)
+$(call track-gen-file-obj,$(gen_S_sources),$(gen_S_objects))
+
+ifneq ($(strip $(gen_S_sources)),)
+$(gen_S_objects): $(intermediates)/%.o: $(intermediates)/%.S \
+ $(my_additional_dependencies)
+ $(transform-$(PRIVATE_HOST)s-to-o)
+$(call include-depfiles-for-objs, $(gen_S_objects))
+endif
+
+gen_s_sources := $(filter %.s,$(my_generated_sources))
+gen_s_objects := $(gen_s_sources:%.s=%.o)
+$(call track-gen-file-obj,$(gen_s_sources),$(gen_s_objects))
+
+ifneq ($(strip $(gen_s_objects)),)
+$(gen_s_objects): $(intermediates)/%.o: $(intermediates)/%.s \
+ $(my_additional_dependencies)
+ $(transform-$(PRIVATE_HOST)s-to-o-no-deps)
+endif
+
+gen_asm_objects := $(gen_S_objects) $(gen_s_objects)
+$(gen_asm_objects): PRIVATE_ARM_CFLAGS := $(normal_objects_cflags)
+
+###########################################################
+## o: Include generated .o files in output.
+###########################################################
+
+gen_o_objects := $(filter %.o,$(my_generated_sources))
+
+###########################################################
+## C: Compile .c files to .o.
+###########################################################
+
+c_arm_sources := $(patsubst %.c.arm,%.c,$(filter %.c.arm,$(my_src_files)))
+dotdot_arm_sources := $(filter ../%,$(c_arm_sources))
+c_arm_sources := $(filter-out ../%,$(c_arm_sources))
+c_arm_objects := $(addprefix $(intermediates)/,$(c_arm_sources:.c=.o))
+$(call track-src-file-obj,$(patsubst %,%.arm,$(c_arm_sources)),$(c_arm_objects))
+
+# For source files starting with ../, we remove all the ../ in the object file path,
+# to avoid object file escaping the intermediate directory.
+dotdot_arm_objects :=
+$(foreach s,$(dotdot_arm_sources),\
+ $(eval $(call compile-dotdot-c-file,$(s),\
+ $(my_additional_dependencies),\
+ dotdot_arm_objects)))
+$(call track-src-file-obj,$(patsubst %,%.arm,$(dotdot_arm_sources)),$(dotdot_arm_objects))
+
+dotdot_sources := $(filter ../%.c, $(my_src_files))
+dotdot_objects :=
+$(foreach s, $(dotdot_sources),\
+ $(eval $(call compile-dotdot-c-file,$(s),\
+ $(my_additional_dependencies),\
+ dotdot_objects)))
+$(call track-src-file-obj,$(dotdot_sources),$(dotdot_objects))
+
+c_normal_sources := $(filter-out ../%,$(filter %.c,$(my_src_files)))
+c_normal_objects := $(addprefix $(intermediates)/,$(c_normal_sources:.c=.o))
+$(call track-src-file-obj,$(c_normal_sources),$(c_normal_objects))
+
+$(dotdot_arm_objects) $(c_arm_objects): PRIVATE_ARM_MODE := $(arm_objects_mode)
+$(dotdot_arm_objects) $(c_arm_objects): PRIVATE_ARM_CFLAGS := $(arm_objects_cflags)
+$(dotdot_objects) $(c_normal_objects): PRIVATE_ARM_MODE := $(normal_objects_mode)
+$(dotdot_objects) $(c_normal_objects): PRIVATE_ARM_CFLAGS := $(normal_objects_cflags)
+
+c_objects := $(c_arm_objects) $(c_normal_objects)
+
+ifneq ($(strip $(c_objects)),)
+$(c_objects): $(intermediates)/%.o: $(TOPDIR)$(LOCAL_PATH)/%.c \
+ $(my_additional_dependencies)
+ $(transform-$(PRIVATE_HOST)c-to-o)
+$(call include-depfiles-for-objs, $(c_objects))
+endif
+
+c_objects += $(dotdot_arm_objects) $(dotdot_objects)
+
+###########################################################
+## C: Compile generated .c files to .o.
+###########################################################
+
+gen_c_sources := $(filter %.c,$(my_generated_sources))
+gen_c_objects := $(gen_c_sources:%.c=%.o)
+$(call track-gen-file-obj,$(gen_c_sources),$(gen_c_objects))
+
+ifneq ($(strip $(gen_c_objects)),)
+# Compile all generated files as thumb.
+# TODO: support compiling certain generated files as arm.
+$(gen_c_objects): PRIVATE_ARM_MODE := $(normal_objects_mode)
+$(gen_c_objects): PRIVATE_ARM_CFLAGS := $(normal_objects_cflags)
+$(gen_c_objects): $(intermediates)/%.o: $(intermediates)/%.c \
+ $(my_additional_dependencies)
+ $(transform-$(PRIVATE_HOST)c-to-o)
+$(call include-depfiles-for-objs, $(gen_c_objects))
+endif
+
+###########################################################
+## ObjC: Compile .m files to .o
+###########################################################
+
+objc_sources := $(filter %.m,$(my_src_files))
+objc_objects := $(addprefix $(intermediates)/,$(objc_sources:.m=.o))
+$(call track-src-file-obj,$(objc_sources),$(objc_objects))
+
+ifneq ($(strip $(objc_objects)),)
+$(objc_objects): $(intermediates)/%.o: $(TOPDIR)$(LOCAL_PATH)/%.m \
+ $(my_additional_dependencies)
+ $(transform-$(PRIVATE_HOST)m-to-o)
+$(call include-depfiles-for-objs, $(objc_objects))
+endif
+
+###########################################################
+## ObjC++: Compile .mm files to .o
+###########################################################
+
+objcpp_sources := $(filter %.mm,$(my_src_files))
+objcpp_objects := $(addprefix $(intermediates)/,$(objcpp_sources:.mm=.o))
+$(call track-src-file-obj,$(objcpp_sources),$(objcpp_objects))
+
+ifneq ($(strip $(objcpp_objects)),)
+$(objcpp_objects): $(intermediates)/%.o: $(TOPDIR)$(LOCAL_PATH)/%.mm \
+ $(my_additional_dependencies)
+ $(transform-$(PRIVATE_HOST)mm-to-o)
+$(call include-depfiles-for-objs, $(objcpp_objects))
+endif
+
+###########################################################
+## AS: Compile .S files to .o.
+###########################################################
+
+asm_sources_S := $(filter %.S,$(my_src_files))
+dotdot_sources := $(filter ../%,$(asm_sources_S))
+asm_sources_S := $(filter-out ../%,$(asm_sources_S))
+asm_objects_S := $(addprefix $(intermediates)/,$(asm_sources_S:.S=.o))
+$(call track-src-file-obj,$(asm_sources_S),$(asm_objects_S))
+
+dotdot_objects_S :=
+$(foreach s,$(dotdot_sources),\
+ $(eval $(call compile-dotdot-s-file,$(s),\
+ $(my_additional_dependencies),\
+ dotdot_objects_S)))
+$(call track-src-file-obj,$(dotdot_sources),$(dotdot_objects_S))
+
+ifneq ($(strip $(asm_objects_S)),)
+$(asm_objects_S): $(intermediates)/%.o: $(TOPDIR)$(LOCAL_PATH)/%.S \
+ $(my_additional_dependencies)
+ $(transform-$(PRIVATE_HOST)s-to-o)
+$(call include-depfiles-for-objs, $(asm_objects_S))
+endif
+
+asm_sources_s := $(filter %.s,$(my_src_files))
+dotdot_sources := $(filter ../%,$(asm_sources_s))
+asm_sources_s := $(filter-out ../%,$(asm_sources_s))
+asm_objects_s := $(addprefix $(intermediates)/,$(asm_sources_s:.s=.o))
+$(call track-src-file-obj,$(asm_sources_s),$(asm_objects_s))
+
+dotdot_objects_s :=
+$(foreach s,$(dotdot_sources),\
+ $(eval $(call compile-dotdot-s-file-no-deps,$(s),\
+ $(my_additional_dependencies),\
+ dotdot_objects_s)))
+$(call track-src-file-obj,$(dotdot_sources),$(dotdot_objects_s))
+
+ifneq ($(strip $(asm_objects_s)),)
+$(asm_objects_s): $(intermediates)/%.o: $(TOPDIR)$(LOCAL_PATH)/%.s \
+ $(my_additional_dependencies)
+ $(transform-$(PRIVATE_HOST)s-to-o-no-deps)
+endif
+
+asm_objects := $(dotdot_objects_S) $(dotdot_objects_s) $(asm_objects_S) $(asm_objects_s)
+$(asm_objects): PRIVATE_ARM_CFLAGS := $(normal_objects_cflags)
+
+
+# .asm for x86/x86_64 needs to be compiled with yasm.
+asm_sources_asm := $(filter %.asm,$(my_src_files))
+ifneq ($(strip $(asm_sources_asm)),)
+asm_objects_asm := $(addprefix $(intermediates)/,$(asm_sources_asm:.asm=.o))
+$(asm_objects_asm): $(intermediates)/%.o: $(TOPDIR)$(LOCAL_PATH)/%.asm \
+ $(my_additional_dependencies)
+ $(transform-asm-to-o)
+$(call track-src-file-obj,$(asm_sources_asm),$(asm_objects_asm))
+
+asm_objects += $(asm_objects_asm)
+endif
+
+
+##########################################################
+## Set up installed module dependency
+## We cannot compute the full path of the LOCAL_SHARED_LIBRARIES for
+## they may cusomize their install path with LOCAL_MODULE_PATH
+##########################################################
+# Get the list of INSTALLED libraries as module names.
+ifdef LOCAL_SDK_VERSION
+ installed_shared_library_module_names := \
+ $(my_shared_libraries)
+else
+ installed_shared_library_module_names := \
+ $(my_shared_libraries) $(my_system_shared_libraries)
+endif
+
+# The real dependency will be added after all Android.mks are loaded and the install paths
+# of the shared libraries are determined.
+ifdef LOCAL_INSTALLED_MODULE
+ifdef installed_shared_library_module_names
+$(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)DEPENDENCIES_ON_SHARED_LIBRARIES += \
+ $(my_register_name):$(LOCAL_INSTALLED_MODULE):$(subst $(space),$(comma),$(sort $(installed_shared_library_module_names)))
+endif
+endif
+
+
+####################################################
+## Import includes
+####################################################
+import_includes := $(intermediates)/import_includes
+import_includes_deps := $(strip \
+ $(foreach l, $(installed_shared_library_module_names), \
+ $(call intermediates-dir-for,SHARED_LIBRARIES,$(l),$(LOCAL_IS_HOST_MODULE),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes) \
+ $(foreach l, $(my_static_libraries) $(my_whole_static_libraries), \
+ $(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(LOCAL_IS_HOST_MODULE),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/export_includes))
+$(import_includes): PRIVATE_IMPORT_EXPORT_INCLUDES := $(import_includes_deps)
+$(import_includes) : $(LOCAL_MODULE_MAKEFILE_DEP) $(import_includes_deps)
+ @echo Import includes file: $@
+ $(hide) mkdir -p $(dir $@) && rm -f $@
+ifdef import_includes_deps
+ $(hide) for f in $(PRIVATE_IMPORT_EXPORT_INCLUDES); do \
+ cat $$f >> $@; \
+ done
+else
+ $(hide) touch $@
+endif
+
+###########################################################
+## Common object handling.
+###########################################################
+
+my_unused_src_files := $(filter-out $(logtags_sources) $(my_tracked_src_files),$(my_src_files) $(my_gen_src_files))
+ifneq ($(my_unused_src_files),)
+ $(warning $(LOCAL_MODULE_MAKEFILE): $(LOCAL_MODULE): Unused source files: $(my_unused_src_files))
+endif
+
+# some rules depend on asm_objects being first. If your code depends on
+# being first, it's reasonable to require it to be assembly
+normal_objects := \
+ $(asm_objects) \
+ $(cpp_objects) \
+ $(gen_cpp_objects) \
+ $(gen_asm_objects) \
+ $(c_objects) \
+ $(gen_c_objects) \
+ $(objc_objects) \
+ $(objcpp_objects)
+
+new_order_normal_objects := $(foreach f,$(my_src_files),$(my_src_file_obj_$(f)))
+new_order_normal_objects += $(foreach f,$(my_gen_src_files),$(my_src_file_obj_$(f)))
+
+ifneq ($(sort $(normal_objects)),$(sort $(new_order_normal_objects)))
+$(warning $(LOCAL_MODULE_MAKEFILE) Internal build system warning: New object list does not match old)
+$(info Only in old: $(filter-out $(new_order_normal_objects),$(sort $(normal_objects))))
+$(info Only in new: $(filter-out $(normal_objects),$(sort $(new_order_normal_objects))))
+endif
+
+ifeq ($(BINARY_OBJECTS_ORDER),soong)
+normal_objects := $(new_order_normal_objects)
+endif
+
+normal_objects += $(addprefix $(TOPDIR)$(LOCAL_PATH)/,$(LOCAL_PREBUILT_OBJ_FILES))
+
+all_objects := $(normal_objects) $(gen_o_objects)
+
+# Cleanup file tracking
+$(foreach f,$(my_tracked_gen_files),$(eval my_src_file_gen_$(s):=))
+my_tracked_gen_files :=
+$(foreach f,$(my_tracked_src_files),$(eval my_src_file_obj_$(s):=))
+my_tracked_src_files :=
+
+my_c_includes += $(TOPDIR)$(LOCAL_PATH) $(intermediates) $(generated_sources_dir)
+
+ifndef LOCAL_SDK_VERSION
+ my_c_includes += $(JNI_H_INCLUDE)
+endif
+
+# all_objects includes gen_o_objects which were part of LOCAL_GENERATED_SOURCES;
+# use normal_objects here to avoid creating circular dependencies. This assumes
+# that custom build rules which generate .o files don't consume other generated
+# sources as input (or if they do they take care of that dependency themselves).
+$(normal_objects) : | $(my_generated_sources)
+ifeq ($(BUILDING_WITH_NINJA),true)
+$(all_objects) : $(import_includes)
+else
+$(all_objects) : | $(import_includes)
+endif
+ALL_C_CPP_ETC_OBJECTS += $(all_objects)
+
+
+###########################################################
+# Standard library handling.
+###########################################################
+
+###########################################################
+# The list of libraries that this module will link against are in
+# these variables. Each is a list of bare module names like "libc libm".
+#
+# LOCAL_SHARED_LIBRARIES
+# LOCAL_STATIC_LIBRARIES
+# LOCAL_WHOLE_STATIC_LIBRARIES
+#
+# We need to convert the bare names into the dependencies that
+# we'll use for LOCAL_BUILT_MODULE and LOCAL_INSTALLED_MODULE.
+# LOCAL_BUILT_MODULE should depend on the BUILT versions of the
+# libraries, so that simply building this module doesn't force
+# an install of a library. Similarly, LOCAL_INSTALLED_MODULE
+# should depend on the INSTALLED versions of the libraries so
+# that they get installed when this module does.
+###########################################################
+# NOTE:
+# WHOLE_STATIC_LIBRARIES are libraries that are pulled into the
+# module without leaving anything out, which is useful for turning
+# a collection of .a files into a .so file. Linking against a
+# normal STATIC_LIBRARY will only pull in code/symbols that are
+# referenced by the module. (see gcc/ld's --whole-archive option)
+###########################################################
+
+# Get the list of BUILT libraries, which are under
+# various intermediates directories.
+so_suffix := $($(my_prefix)SHLIB_SUFFIX)
+a_suffix := $($(my_prefix)STATIC_LIB_SUFFIX)
+
+ifdef LOCAL_SDK_VERSION
+built_shared_libraries := \
+ $(addprefix $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT_INTERMEDIATE_LIBRARIES)/, \
+ $(addsuffix $(so_suffix), \
+ $(my_shared_libraries)))
+built_shared_library_deps := $(addsuffix .toc, $(built_shared_libraries))
+
+# Add the NDK libraries to the built module dependency
+my_system_shared_libraries_fullpath := \
+ $(my_ndk_stl_shared_lib_fullpath) \
+ $(addprefix $(my_ndk_sysroot_lib)/, \
+ $(addsuffix $(so_suffix), $(my_system_shared_libraries)))
+
+built_shared_libraries += $(my_system_shared_libraries_fullpath)
+else
+built_shared_libraries := \
+ $(addprefix $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT_INTERMEDIATE_LIBRARIES)/, \
+ $(addsuffix $(so_suffix), \
+ $(installed_shared_library_module_names)))
+ifdef LOCAL_IS_HOST_MODULE
+# Disable .toc optimization for host modules: we may run the host binaries during the build process
+# and the libraries' implementation matters.
+built_shared_library_deps := $(built_shared_libraries)
+else
+built_shared_library_deps := $(addsuffix .toc, $(built_shared_libraries))
+endif
+my_system_shared_libraries_fullpath :=
+endif
+
+built_static_libraries := \
+ $(foreach lib,$(my_static_libraries), \
+ $(call intermediates-dir-for, \
+ STATIC_LIBRARIES,$(lib),$(LOCAL_IS_HOST_MODULE),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/$(lib)$(a_suffix))
+
+ifdef LOCAL_SDK_VERSION
+built_static_libraries += $(my_ndk_stl_static_lib)
+endif
+
+built_whole_libraries := \
+ $(foreach lib,$(my_whole_static_libraries), \
+ $(call intermediates-dir-for, \
+ STATIC_LIBRARIES,$(lib),$(LOCAL_IS_HOST_MODULE),,$(LOCAL_2ND_ARCH_VAR_PREFIX),$(my_host_cross))/$(lib)$(a_suffix))
+
+# We don't care about installed static libraries, since the
+# libraries have already been linked into the module at that point.
+# We do, however, care about the NOTICE files for any static
+# libraries that we use. (see notice_files.mk)
+
+installed_static_library_notice_file_targets := \
+ $(foreach lib,$(my_static_libraries) $(my_whole_static_libraries), \
+ NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST,TARGET)-STATIC_LIBRARIES-$(lib))
+
+# Default is -fno-rtti.
+ifeq ($(strip $(LOCAL_RTTI_FLAG)),)
+LOCAL_RTTI_FLAG := -fno-rtti
+endif
+
+###########################################################
+# Rule-specific variable definitions
+###########################################################
+
+ifeq ($(my_clang),true)
+my_cflags += $(LOCAL_CLANG_CFLAGS)
+my_conlyflags += $(LOCAL_CLANG_CONLYFLAGS)
+my_cppflags += $(LOCAL_CLANG_CPPFLAGS)
+my_cflags_no_override += $(GLOBAL_CLANG_CFLAGS_NO_OVERRIDE)
+my_cppflags_no_override += $(GLOBAL_CLANG_CPPFLAGS_NO_OVERRIDE)
+my_asflags += $(LOCAL_CLANG_ASFLAGS)
+my_ldflags += $(LOCAL_CLANG_LDFLAGS)
+my_cflags += $(LOCAL_CLANG_CFLAGS_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_CLANG_CFLAGS_$(my_32_64_bit_suffix))
+my_conlyflags += $(LOCAL_CLANG_CONLYFLAGS_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_CLANG_CONLYFLAGS_$(my_32_64_bit_suffix))
+my_cppflags += $(LOCAL_CLANG_CPPFLAGS_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_CLANG_CPPFLAGS_$(my_32_64_bit_suffix))
+my_ldflags += $(LOCAL_CLANG_LDFLAGS_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_CLANG_LDFLAGS_$(my_32_64_bit_suffix))
+my_asflags += $(LOCAL_CLANG_ASFLAGS_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_CLANG_ASFLAGS_$(my_32_64_bit_suffix))
+my_cflags := $(call $(LOCAL_2ND_ARCH_VAR_PREFIX)convert-to-$(my_host)clang-flags,$(my_cflags))
+my_cppflags := $(call $(LOCAL_2ND_ARCH_VAR_PREFIX)convert-to-$(my_host)clang-flags,$(my_cppflags))
+my_asflags := $(call $(LOCAL_2ND_ARCH_VAR_PREFIX)convert-to-$(my_host)clang-flags,$(my_asflags))
+my_ldflags := $(call $(LOCAL_2ND_ARCH_VAR_PREFIX)convert-to-$(my_host)clang-flags,$(my_ldflags))
+endif
+
+ifeq ($(my_fdo_build), true)
+ my_cflags := $(patsubst -Os,-O2,$(my_cflags))
+ fdo_incompatible_flags := -fno-early-inlining -finline-limit=%
+ my_cflags := $(filter-out $(fdo_incompatible_flags),$(my_cflags))
+endif
+
+# No one should ever use this flag. On GCC it's mere presence will disable all
+# warnings, even those that are specified after it (contrary to typical warning
+# flag behavior). This circumvents CFLAGS_NO_OVERRIDE from forcibly enabling the
+# warnings that are *always* bugs.
+my_illegal_flags := -w
+my_cflags := $(filter-out $(my_illegal_flags),$(my_cflags))
+my_cppflags := $(filter-out $(my_illegal_flags),$(my_cppflags))
+my_conlyflags := $(filter-out $(my_illegal_flags),$(my_conlyflags))
+
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_YACCFLAGS := $(LOCAL_YACCFLAGS)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_ASFLAGS := $(my_asflags)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CONLYFLAGS := $(my_conlyflags)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CFLAGS := $(my_cflags)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CPPFLAGS := $(my_cppflags)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CFLAGS_NO_OVERRIDE := $(my_cflags_no_override)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CPPFLAGS_NO_OVERRIDE := $(my_cppflags_no_override)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_RTTI_FLAG := $(LOCAL_RTTI_FLAG)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_DEBUG_CFLAGS := $(debug_cflags)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_C_INCLUDES := $(my_c_includes)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_IMPORT_INCLUDES := $(import_includes)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_LDFLAGS := $(my_ldflags)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_LDLIBS := $(my_ldlibs)
+
+# this is really the way to get the files onto the command line instead
+# of using $^, because then LOCAL_ADDITIONAL_DEPENDENCIES doesn't work
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_ALL_SHARED_LIBRARIES := $(built_shared_libraries)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_ALL_STATIC_LIBRARIES := $(built_static_libraries)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_ALL_WHOLE_STATIC_LIBRARIES := $(built_whole_libraries)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_ALL_OBJECTS := $(all_objects)
+
+###########################################################
+# Define library dependencies.
+###########################################################
+# all_libraries is used for the dependencies on LOCAL_BUILT_MODULE.
+all_libraries := \
+ $(built_shared_library_deps) \
+ $(my_system_shared_libraries_fullpath) \
+ $(built_static_libraries) \
+ $(built_whole_libraries)
+
+# Also depend on the notice files for any static libraries that
+# are linked into this module. This will force them to be installed
+# when this module is.
+$(LOCAL_INSTALLED_MODULE): | $(installed_static_library_notice_file_targets)
+
+###########################################################
+# Export includes
+###########################################################
+export_includes := $(intermediates)/export_includes
+$(export_includes): PRIVATE_EXPORT_C_INCLUDE_DIRS := $(my_export_c_include_dirs)
+# By adding $(my_generated_sources) it makes sure the headers get generated
+# before any dependent source files get compiled.
+$(export_includes) : $(my_generated_sources) $(export_include_deps)
+ @echo Export includes file: $< -- $@
+ $(hide) mkdir -p $(dir $@) && rm -f $@.tmp
+ifdef my_export_c_include_dirs
+ $(hide) for d in $(PRIVATE_EXPORT_C_INCLUDE_DIRS); do \
+ echo "-I $$d" >> $@.tmp; \
+ done
+else
+ $(hide) touch $@.tmp
+endif
+ifeq ($(BUILDING_WITH_NINJA),true)
+ $(hide) if cmp -s $@.tmp $@ ; then \
+ rm $@.tmp ; \
+ else \
+ mv $@.tmp $@ ; \
+ fi
+else
+ mv $@.tmp $@ ;
+endif
+
+# Kati adds restat=1 to ninja. GNU make does nothing for this.
+.KATI_RESTAT: $(export_includes)
+
+# Make sure export_includes gets generated when you are running mm/mmm
+$(LOCAL_BUILT_MODULE) : | $(export_includes)
diff --git a/core/build-system.html b/core/build-system.html
new file mode 100644
index 0000000000000000000000000000000000000000..bddde6a4de45aa140da7196defe9f33734f0c651
--- /dev/null
+++ b/core/build-system.html
@@ -0,0 +1,953 @@
+
+
+
+
+
+
+
+ Android Build System
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Android Build System
+
+
+
+ Status:Draft
+ (as of May 18, 2006)
+
+
+
Contents
+
+
+
+
Objective
+
The primary goals of reworking the build system are (1) to make dependencies
+work more reliably, so that when files need to rebuilt, they are, and (2) to
+improve performance of the build system so that unnecessary modules are not
+rebuilt, and so doing a top-level build when little or nothing needs to be done
+for a build takes as little time as possible.
+
+
Principles and Use Cases and Policy
+
Given the above objective, these are the overall principles and use cases
+that we will support. This is not an exhaustive list.
+
Multiple Targets
+
It needs to be possible to build the Android platform for multiple targets.
+This means:
+
+
The build system will support building tools for the host platform,
+ both ones that are used in the build process itself, and developer tools
+ like the simulator.
+
The build system will need to be able to build tools on Linux
+ (definitely Goobuntu and maybe Grhat), MacOS, and to some degree on
+ Windows.
+
The build system will need to be able to build the OS on Linux, and in
+ the short-term, MacOS. Note that this is a conscious decision to stop
+ building the OS on Windows. We are going to rely on the emulator there
+ and not attempt to use the simulator. This is a requirement change now
+ that the emulator story is looking brighter.
+
+
Non-Recursive Make
+
To achieve the objectives, the build system will be rewritten to use make
+non-recursively. For more background on this, read Recursive Make Considered Harmful. For those that don't
+want PDF, here is the
+Google translated version.
+
Rapid Compile-Test Cycles
+
When developing a component, for example a C++ shared library, it must be
+possible to easily rebuild just that component, and not have to wait more than a
+couple seconds for dependency checks, and not have to wait for unneeded
+components to be built.
+
Both Environment and Config File Based Settings
+
To set the target, and other options, some people on the team like to have a
+configuration file in a directory so they do not have an environment setup
+script to run, and others want an environment setup script to run so they can
+run builds in different terminals on the same tree, or switch back and forth
+in one terminal. We will support both.
+
Object File Directory / make clean
+
Object files and other intermediate files will be generated into a directory
+that is separate from the source tree. The goal is to have make clean be
+"rm -rf " in the tree root directory. The primary goals of
+this are to simplify searching the source tree, and to make "make clean" more
+reliable.
+
+
SDK
+
The SDK will be a tarball that will allow non-OS-developers to write apps.
+The apps will actually be built by first building the SDK, and then building
+the apps against that SDK. This will hopefully (1) make writing apps easier
+for us, because we won't have to rebuild the OS as much, and we can use the
+standard java-app development tools, and (2) allow us to dog-food the SDK, to
+help ensure its quality. Cedric has suggested (and I agree) that apps built
+from the SDK should be built with ant. Stay tuned for more details as we
+figure out exactly how this will work.
+
+
Dependecies
+
Dependencies should all be automatic. Unless there is a custom tool involved
+(e.g. the webkit has several), the dependencies for shared and static libraries,
+.c, .cpp, .h, .java, java libraries, etc., should all work without intervention
+in the Android.mk file.
+
+
Hiding command lines
+
The default of the build system will be to hide the command lines being
+executed for make steps. It will be possible to override this by specifying
+the showcommands pseudo-target, and possibly by setting an environment
+variable.
+
+
Wildcard source files
+
Wildcarding source file will be discouraged. It may be useful in some
+scenarios. The default $(wildcard *) will not work due to the
+current directory being set to the root of the build tree.
+
+
Multiple targets in one directory
+
It will be possible to generate more than one target from a given
+subdirectory. For example, libutils generates a shared library for the target
+and a static library for the host.
+
+
Makefile fragments for modules
+
Android.mk is the standard name for the makefile fragments that
+control the building of a given module. Only the top directory should
+have a file named "Makefile".
+
+
Use shared libraries
+
Currently, the simulator is not built to use shared libraries. This should
+be fixed, and now is a good time to do it. This implies getting shared
+libraries to work on Mac OS.
+
+
+
Nice to Have
+
+
These things would be nice to have, and this is a good place to record them,
+however these are not promises.
+
+
Simultaneous Builds
+
The hope is to be able to do two builds for different combos in the same
+tree at the same time, but this is a stretch goal, not a requirement.
+Doing two builds in the same tree, not at the same time must work. (update:
+it's looking like we'll get the two builds at the same time working)
+
+
Deleting headers (or other dependecies)
+
Problems can arise if you delete a header file that is referenced in
+".d" files. The easy way to deal with this is "make clean". There
+should be a better way to handle it. (from fadden)
+
One way of solving this is introducing a dependency on the directory. The
+problem is that this can create extra dependecies and slow down the build.
+It's a tradeoff.
+
+
Multiple builds
+
General way to perform builds across the set of known platforms. This
+would make it easy to perform multiple platform builds when testing a
+change, and allow a wide-scale "make clean". Right now the buildspec.mk
+or environment variables need to be updated before each build. (from fadden)
+
+
Aftermarket Locales and Carrier
+
We will eventually need to add support for creating locales and carrier
+customizations to the SDK, but that will not be addressed right now.
+
+
+
Usage
+
You've read (or scrolled past) all of the motivations for this build system,
+and you want to know how to use it. This is the place.
+
+
Your first build
+
The Building document describes how do do
+builds.
+
+
build/envsetup.sh functions
+If you source the file build/envsetup.sh into your bash environment,
+. build/envsetup.shyou'll get a few helpful shell functions:
+
+
+
printconfig - Prints the current configuration as set by the
+lunch and choosecombo commands.
+
m - Runs make from the top of the tree. This is
+useful because you can run make from within subdirectories. If you have the
+TOP environment variable set, it uses that. If you don't, it looks
+up the tree from the current directory, trying to find the top of the tree.
+
croot - cd to the top of the tree.
+
sgrep - grep for the regex you provide in all .c, .cpp, .h, .java,
+and .xml files below the current directory.
+
+
+
Build flavors/types
+
+When building for a particular product, it's often useful to have minor
+variations on what is ultimately the final release build. These are the
+currently-defined "flavors" or "types" (we need to settle on a real name
+for these).
+
+
+
+
+
+ eng
+
+
+ This is the default flavor. A plain "make" is the
+ same as "make eng". droid is an alias
+ for eng.
+
Installs non-APK modules that have no tags specified.
+
Installs APKs according to the product definition files, in
+ addition to tagged APKs.
+
ro.secure=0
+
ro.debuggable=1
+
ro.kernel.android.checkjni=1
+
adb is enabled by default.
+
+
+
+
+ user
+
+
+ "make user"
+
+ This is the flavor intended to be the final release bits.
+
+
Installs modules tagged with user.
+
Installs non-APK modules that have no tags specified.
+
Installs APKs according to the product definition files; tags
+ are ignored for APK modules.
+
ro.adb.secure=1
+
ro.secure=1
+
ro.debuggable=0
+
adb is disabled by default.
+
+
+
+
+ userdebug
+
+
+ "make userdebug"
+
+ The same as user, except:
+
+
Also installs modules tagged with debug.
+
ro.debuggable=1
+
adb is enabled by default.
+
+
+
+
+
+If you build one flavor and then want to build another, you should run
+"make installclean" between the two makes to guarantee that
+you don't pick up files installed by the previous flavor. "make
+clean" will also suffice, but it takes a lot longer.
+
+
+
+
More pseudotargets
+
Sometimes you want to just build one thing. The following pseudotargets are
+there for your convenience:
+
+
+
droid - make droid is the normal build. This target
+is here because the default target has to have a name.
+
all - make all builds everything make
+droid does, plus everything whose LOCAL_MODULE_TAGS do not
+include the "droid" tag. The build server runs this to make sure
+that everything that is in the tree and has an Android.mk builds.
+
clean-$(LOCAL_MODULE) and clean-$(LOCAL_PACKAGE_NAME) -
+Let you selectively clean one target. For example, you can type
+make clean-libutils and it will delete libutils.so and all of the
+intermediate files, or you can type make clean-Home and it will
+clean just the Home app.
+
clean - make clean deletes all of the output and
+intermediate files for this configuration. This is the same as rm -rf
+out/<configuration>/
+
clobber - make clobber deletes all of the output
+and intermediate files for all configurations. This is the same as
+rm -rf out/.
+
dataclean - make dataclean deletes contents of the data
+directory inside the current combo directory. This is especially useful on the
+simulator and emulator, where the persistent data remains present between
+builds.
+
showcommands - showcommands is a modifier target
+which causes the build system to show the actual command lines for the build
+steps, instead of the brief descriptions. Most people don't like seeing the
+actual commands, because they're quite long and hard to read, but if you need
+to for debugging purposes, you can add showcommands to the list
+of targets you build. For example make showcommands will build
+the default android configuration, and make runtime showcommands
+will build just the runtime, and targets that it depends on, while displaying
+the full command lines. Please note that there are a couple places where the
+commands aren't shown here. These are considered bugs, and should be fixed,
+but they're often hard to track down. Please let
+android-build-team know if you find
+any.
+
LOCAL_MODULE - Anything you specify as a LOCAL_MODULE
+in an Android.mk is made into a pseudotarget. For example, make
+runtime might be shorthand for make
+out/linux-x86-debug/system/bin/runtime (which would work), and
+make libkjs might be shorthand for make
+out/linux-x86-debug/system/lib/libkjs.so (which would also work).
+
targets - make targets will print a list of all of
+the LOCAL_MODULE names you can make.
+
+
+
How to add another component to the build - Android.mk templates
+
You have a new library, a new app, or a new executable. For each of the
+common types of modules, there is a corresponding file in the templates
+directory. It will usually be enough to copy one of these, and fill in your
+own values. Some of the more esoteric values are not included in the
+templates, but are instead just documented here, as is the documentation
+on using custom tools to generate files.
+
Mostly, you can just look for the TODO comments in the templates and do
+what it says. Please remember to delete the TODO comments when you're done
+to keep the files clean. The templates have minimal documentation in them,
+because they're going to be copied, and when that gets stale, the copies just
+won't get updated. So read on...
+
+
Apps
+
Use the templates/apps file.
+
This template is pretty self-explanitory. See the variables below for more
+details.
+
+
Java Libraries
+
Use the templates/java_library file.
+
The interesting thing here is the value of LOCAL_MODULE, which becomes
+the name of the jar file. (Actually right now, we're not making jar files yet,
+just directories of .class files, but the directory is named according to
+what you put in LOCAL_MODULE). This name will be what goes in the
+LOCAL_JAVA_LIBRARIES variable in modules that depend on your java library.
+
+
C/C++ Executables
+
Use the templates/executable file, or the
+templates/executable_host file.
+
This template has a couple extra options that you usually don't need.
+Please delete the ones you don't need, and remove the TODO comments. It makes
+the rest of them easier to read, and you can always refer back to the templates
+if you need them again later.
+
By default, on the target these are built into /system/bin, and on the
+host, they're built into /host/bin. These can be overridden by setting
+LOCAL_MODULE_PATH or LOCAL_MODULE_RELATIVE_PATH. See
+Putting targets elsewhere
+for more.
+
+
Shared Libraries
+
Use the templates/shared_library file, or the
+templates/shared_library_host file.
+
Remember that on the target, we use shared libraries, and on the host,
+we use static libraries, since executable size isn't as big an issue, and it
+simplifies distribution in the SDK.
+
+
Static Libraries
+
Use the templates/static_library file, or the
+templates/static_library_host file.
+
Remember that on the target, we use shared libraries, and on the host,
+we use static libraries, since executable size isn't as big an issue, and it
+simplifies distribution in the SDK.
+
+
Using Custom Tools
+
If you have a tool that generates source files for you, it's possible
+to have the build system get the dependencies correct for it. Here are
+a couple of examples. $@ is the make built-in variable for
+"the current target." The red parts are the parts you'll
+need to change.
+
+
You need to put this after you have declared LOCAL_PATH and
+LOCAL_MODULE, because the $(local-generated-sources-dir)
+and $(local-host-generated-sources-dir) macros use these variables
+to determine where to put the files.
+
+
Example 1
+
Here, there is one generated file, called
+chartables.c, which doesn't depend on anything. And is built by the tool
+built to $(HOST_OUT_EXECUTABLES)/dftables. Note on the second to last line
+that a dependency is created on the tool.
Here as a hypothetical example, we use use cat as if it were to transform
+a file. Pretend that it does something useful. Note how we use a
+target-specific variable called PRIVATE_INPUT_FILE to store the name of the
+input file.
If you have several files that are all similar in
+name, and use the same tool, you can combine them. (here the *.lut.h files are
+the generated ones, and the *.cpp files are the input files)
Sometimes you need to set flags specifically for different platforms. Here
+is a list of which values the different build-system defined variables will be
+set to and some examples.
+
+
+
+ HOST_OS
+ linux
+ darwin
+
+
+ HOST_ARCH
+ x86
+ x86_64
+
+
+ HOST_BUILD_TYPE
+ release
+ debug
+
+
+
+
+ TARGET_ARCH
+ arm
+ arm64
+ mips
+ mips64
+ x86
+ x86_64
+
+
+ TARGET_BUILD_TYPE
+ release
+ debug
+
+
+
+
+
There are also special variables to use instead of conditionals. Many of the
+normal variables (LOCAL_SRC_FILES, LOCAL_CFLAGS, etc) can be conditionally added
+to with _{arch} _{32|64}, and for the host, _{os}.
+
+
Some Examples
+
ifeq ($(TARGET_BUILD_TYPE),release)
+LOCAL_CFLAGS += -DNDEBUG=1
+endif
+
+LOCAL_CFLAGS_arm += -DTARGET_IS_ARM
+
+LOCAL_CFLAGS_64 += -DBIG_POINTER
+
+# from libutils
+# Use the futex based mutex and condition variable
+# implementation from android-arm because it's shared mem safe
+LOCAL_SRC_FILES_linux += futex_synchro.c
+LOCAL_LDLIBS_linux += -lrt -ldl
+
+
+
+
+
Putting modules elsewhere
+
If you have modules that normally go somewhere, and you need to have them
+build somewhere else, read this.
+
If you have modules that need to go in a subdirectory of their normal
+location, for example HAL modules that need to go in /system/lib/hw or
+/vendor/lib/hw, set LOCAL_MODULE_RELATIVE_PATH in your Android.mk, for
+example:
+
+LOCAL_MODULE_RELATIVE_PATH := hw
+
+
If you have modules that need to go in an entirely different location, for
+example the root filesystem instead of in /system, add these lines to your
+Android.mk:
For executables and libraries, you need to specify a
+LOCAL_UNSTRIPPED_PATH location if you specified a
+LOCAL_MODULE_PATH, because on target builds, we keep
+the unstripped executables so GDB can find the symbols.
+LOCAL_UNSTRIPPED_PATH is not necessary if you only specified
+LOCAL_MODULE_RELATIVE_PATH.
+
Look in core/envsetup.mk for all of the variables defining
+places to build things.
+
FYI: If you're installing an executable to /sbin, you probably also want to
+set LOCAL_FORCE_STATIC_EXCUTABLE := true in your Android.mk, which
+will force the linker to only accept static libraries.
+
+
+
Android.mk variables
+
These are the variables that you'll commonly see in Android.mk files, listed
+alphabetically.
+
But first, a note on variable naming:
+
+
LOCAL_ - These variables are set per-module. They are cleared
+ by the include $(CLEAR_VARS) line, so you can rely on them
+ being empty after including that file. Most of the variables you'll use
+ in most modules are LOCAL_ variables.
+
PRIVATE_ - These variables are make-target-specific variables. That
+ means they're only usable within the commands for that module. It also
+ means that they're unlikely to change behind your back from modules that
+ are included after yours. This
+ link to the make documentation
+ describes more about target-specific variables. Please note that there
+ are a couple of these laying around the tree that aren't prefixed with
+ PRIVATE_. It is safe, and they will be fixed as they are discovered.
+ Sorry for the confusion.
+
INTERNAL_ - These variables are critical to functioning of
+ the build system, so you shouldn't create variables named like this, and
+ you probably shouldn't be messing with these variables in your makefiles.
+
+
HOST_ and TARGET_ - These contain the directories
+ and definitions that are specific to either the host or the target builds.
+ Do not set variables that start with HOST_ or TARGET_ in your makefiles.
+
+
HOST_CROSS_ - These contain the directories and definitions that
+ are specific to cross-building host binaries. The common case is building
+ windows host tools on linux. Do not set variables that start with
+ HOST_CROSS_ in your makefiles.
+
+
BUILD_ and CLEAR_VARS - These contain the names of
+ well-defined template makefiles to include. Some examples are CLEAR_VARS
+ and BUILD_HOST_PACKAGE.
+
Any other name is fair-game for you to use in your Android.mk. However,
+ remember that this is a non-recursive build system, so it is possible that
+ your variable will be changed by another Android.mk included later, and be
+ different when the commands for your rule / module are executed.
+
+
+
+
LOCAL_ASSET_FILES
+
In Android.mk files that include $(BUILD_PACKAGE) set this
+to the set of files you want built into your app. Usually:
+
LOCAL_ASSET_FILES += $(call find-subdir-assets)
+
This will probably change when we switch to ant for the apps' build
+system.
+
+
LOCAL_CC
+
If you want to use a different C compiler for this module, set LOCAL_CC
+to the path to the compiler. If LOCAL_CC is blank, the appropriate default
+compiler is used.
+
+
LOCAL_CXX
+
If you want to use a different C++ compiler for this module, set LOCAL_CXX
+to the path to the compiler. If LOCAL_CXX is blank, the appropriate default
+compiler is used.
+
+
LOCAL_CFLAGS
+
If you have additional flags to pass into the C or C++ compiler, add
+them here. For example:
+
LOCAL_CFLAGS += -DLIBUTILS_NATIVE=1
+
+
LOCAL_CPPFLAGS
+
If you have additional flags to pass into only the C++ compiler, add
+them here. For example:
+
LOCAL_CPPFLAGS += -ffriend-injection
+LOCAL_CPPFLAGS is guaranteed to be after LOCAL_CFLAGS
+on the compile line, so you can use it to override flags listed in
+LOCAL_CFLAGS.
+
+
LOCAL_CPP_EXTENSION
+
If your C++ files end in something other than ".cpp",
+you can specify the custom extension here. For example:
+
LOCAL_CPP_EXTENSION := .cc
+Note that all C++ files for a given module must have the same
+extension; it is not currently possible to mix different extensions.
+
+
LOCAL_NO_DEFAULT_COMPILER_FLAGS
+
Normally, the compile line for C and C++ files includes global include
+paths and global cflags. If LOCAL_NO_DEFAULT_COMPILER_FLAGS
+is non-empty, none of the default includes or flags will be used when compiling
+C and C++ files in this module.
+LOCAL_C_INCLUDES, LOCAL_CFLAGS, and
+LOCAL_CPPFLAGS will still be used in this case, as will
+any DEBUG_CFLAGS that are defined for the module.
+
+
LOCAL_COPY_HEADERS
+
This will be going away.
+
The set of files to copy to the install include tree. You must also
+supply LOCAL_COPY_HEADERS_TO.
+
This is going away because copying headers messes up the error messages, and
+may lead to people editing those headers instead of the correct ones. It also
+makes it easier to do bad layering in the system, which we want to avoid. We
+also aren't doing a C/C++ SDK, so there is no ultimate requirement to copy any
+headers.
+
+
LOCAL_COPY_HEADERS_TO
+
This will be going away.
+
The directory within "include" to copy the headers listed in
+LOCAL_COPY_HEADERS to.
+
This is going away because copying headers messes up the error messages, and
+may lead to people editing those headers instead of the correct ones. It also
+makes it easier to do bad layering in the system, which we want to avoid. We
+also aren't doing a C/C++ SDK, so there is no ultimate requirement to copy any
+headers.
+
+
LOCAL_C_INCLUDES
+
Additional directories to instruct the C/C++ compilers to look for header
+files in. These paths are rooted at the top of the tree. Use
+LOCAL_PATH if you have subdirectories of your own that you
+want in the include paths. For example:
You should not add subdirectories of include to
+LOCAL_C_INCLUDES, instead you should reference those files
+in the #include statement with their subdirectories. For
+example:
There are some components that are doing this wrong, and should be cleaned
+up.
+
+
LOCAL_MODULE_TAGS
+
Set LOCAL_MODULE_TAGS to any number of whitespace-separated
+tags. If the tag list is empty or contains droid, the module
+will get installed as part of a make droid. Otherwise, it will
+only get installed by running make <your-module>
+or with the make all pseudotarget.
+
+
LOCAL_REQUIRED_MODULES
+
Set LOCAL_REQUIRED_MODULES to any number of whitespace-separated
+module names, like "libblah" or "Email". If this module is installed, all
+of the modules that it requires will be installed as well. This can be
+used to, e.g., ensure that necessary shared libraries or providers are
+installed when a given app is installed.
+
+
LOCAL_FORCE_STATIC_EXECUTABLE
+
If your executable should be linked statically, set
+LOCAL_FORCE_STATIC_EXECUTABLE:=true. There is a very short
+list of libraries that we have in static form (currently only libc). This is
+really only used for executables in /sbin on the root filesystem.
+
+
LOCAL_GENERATED_SOURCES
+
Files that you add to LOCAL_GENERATED_SOURCES will be
+automatically generated and then linked in when your module is built.
+See the Custom Tools template makefile for an
+example.
+
+
LOCAL_JAVACFLAGS
+
If you have additional flags to pass into the javac compiler, add
+them here. For example:
+
LOCAL_JAVACFLAGS += -Xlint:deprecation
+
+
LOCAL_JAVA_LIBRARIES
+
When linking Java apps and libraries, LOCAL_JAVA_LIBRARIES
+specifies which sets of java classes to include. Currently there are
+two of these: core and framework.
+In most cases, it will look like this:
+
LOCAL_JAVA_LIBRARIES := core framework
+
Note that setting LOCAL_JAVA_LIBRARIES is not necessary
+(and is not allowed) when building an APK with
+"include $(BUILD_PACKAGE)". The appropriate libraries
+will be included automatically.
+
+
LOCAL_LDFLAGS
+
You can pass additional flags to the linker by setting
+LOCAL_LDFLAGS. Keep in mind that the order of parameters is
+very important to ld, so test whatever you do on all platforms.
+
+
LOCAL_LDLIBS
+
LOCAL_LDLIBS allows you to specify additional libraries
+that are not part of the build for your executable or library. Specify
+the libraries you want in -lxxx format; they're passed directly to the
+link line. However, keep in mind that there will be no dependency generated
+for these libraries. It's most useful in simulator builds where you want
+to use a library preinstalled on the host. The linker (ld) is a particularly
+fussy beast, so it's sometimes necessary to pass other flags here if you're
+doing something sneaky. Some examples:
If your package doesn't have a manifest (AndroidManifest.xml), then
+set LOCAL_NO_MANIFEST:=true. The common resources package
+does this.
+
+
LOCAL_PACKAGE_NAME
+
LOCAL_PACKAGE_NAME is the name of an app. For example,
+Dialer, Contacts, etc. This will probably change or go away when we switch
+to an ant-based build system for the apps.
+
+
LOCAL_PATH
+
The directory your Android.mk file is in. You can set it by putting the
+following as the first line in your Android.mk:
+
LOCAL_PATH := $(my-dir)
+
The my-dir macro uses the
+MAKEFILE_LIST
+variable, so you must call it before you include any other makefiles. Also,
+consider that any subdirectories you inlcude might reset LOCAL_PATH, so do your
+own stuff before you include them. This also means that if you try to write
+several include lines that reference LOCAL_PATH,
+it won't work, because those included makefiles might reset LOCAL_PATH.
+
+
LOCAL_POST_PROCESS_COMMAND
+
For host executables, you can specify a command to run on the module
+after it's been linked. You might have to go through some contortions
+to get variables right because of early or late variable evaluation:
When including $(BUILD_MULTI_PREBUILT) or $(BUILD_HOST_PREBUILT), set these
+to executables that you want copied. They're located automatically into the
+right bin directory.
+
+
LOCAL_PREBUILT_LIBS
+
When including $(BUILD_MULTI_PREBUILT) or $(BUILD_HOST_PREBUILT), set these
+to libraries that you want copied. They're located automatically into the
+right lib directory.
+
+
LOCAL_SHARED_LIBRARIES
+
These are the libraries you directly link against. You don't need to
+pass transitively included libraries. Specify the name without the suffix:
The build system looks at LOCAL_SRC_FILES to know what source
+files to compile -- .cpp .c .y .l .java. For lex and yacc files, it knows
+how to correctly do the intermediate .h and .c/.cpp files automatically. If
+the files are in a subdirectory of the one containing the Android.mk, prefix
+them with the directory name:
These are the static libraries that you want to include in your module.
+Mostly, we use shared libraries, but there are a couple of places, like
+executables in sbin and host executables where we use static libraries instead.
+
LOCAL_MODULE is the name of what's supposed to be generated
+from your Android.mk. For exmample, for libkjs, the LOCAL_MODULE
+is "libkjs" (the build system adds the appropriate suffix -- .so .dylib .dll).
+For app modules, use LOCAL_PACKAGE_NAME instead of
+LOCAL_MODULE. We're planning on switching to ant for the apps,
+so this might become moot.
+
+
LOCAL_MODULE_PATH
+
Instructs the build system to put the module somewhere other than what's
+normal for its type. If you override this, make sure you also set
+LOCAL_UNSTRIPPED_PATH if it's an executable or a shared library
+so the unstripped binary has somewhere to go. An error will occur if you forget
+to.
Instructs the build system to put the module in a subdirectory under the
+directory that is normal for its type. If you set this you do not need to
+set LOCAL_UNSTRIPPED_PATH, the unstripped binaries will also use
+the relative path.
This specifies which OSes are supported by this host module. It is not used
+for target builds. The accepted values here are combinations of
+linux, darwin, and windows. By default,
+linux and darwin(MacOS) are considered to be supported. If a module should
+build under windows, you must specify windows, and any others to be supported.
+Some examples:
+
LOCAL_MODULE_HOST_OS := linux
+LOCAL_MODULE_HOST_OS := darwin linux windows
+
+
LOCAL_UNSTRIPPED_PATH
+
Instructs the build system to put the unstripped version of the module
+somewhere other than what's normal for its type. Usually, you override this
+because you overrode LOCAL_MODULE_PATH for an executable or a
+shared library. If you overrode LOCAL_MODULE_PATH, but not
+LOCAL_UNSTRIPPED_PATH, an error will occur.
These are the static libraries that you want to include in your module without allowing
+the linker to remove dead code from them. This is mostly useful if you want to add a static library
+to a shared library and have the static library's content exposed from the shared library.
+
Any flags to pass to invocations of yacc for your module. A known limitation
+here is that the flags will be the same for all invocations of YACC for your
+module. This can be fixed. If you ever need it to be, just ask.
+
LOCAL_YACCFLAGS := -p kjsyy
+
+
+
+
Implementation Details
+
+
You should never have to touch anything in the config directory unless
+you're adding a new platform, new tools, or adding new features to the
+build system. In general, please consult with the build system owner(s)
+(android-build-team) before you go
+mucking around in here. That said, here are some notes on what's going on
+under the hood.
+
+
Environment Setup / buildspec.mk Versioning
+
In order to make easier for people when the build system changes, when
+it is necessary to make changes to buildspec.mk or to rerun the environment
+setup scripts, they contain a version number in the variable
+BUILD_ENV_SEQUENCE_NUMBER. If this variable does not match what the build
+system expects, it fails printing an error message explaining what happened.
+If you make a change that requires an update, you need to update two places
+so this message will be printed.
+
+
In core/envsetup.mk, increment the
+ CORRECT_BUILD_ENV_SEQUENCE_NUMBER definition.
+
In buildspec.mk.default, update the BUILD_ENV_SEQUENCE_DUMBER
+ definition to match the one in core/envsetup.mk
+
+The scripts automatically get the value from the build system, so they will
+trigger the warning as well.
+
+
+
Additional makefile variables
+
You probably shouldn't use these variables. Please consult
+android-build-team before using them.
+These are mostly there for workarounds for other issues, or things that aren't
+completely done right.
+
+
LOCAL_ADDITIONAL_DEPENDENCIES
+
If your module needs to depend on anything else that
+isn't actually built in to it, you can add those make targets to
+LOCAL_ADDITIONAL_DEPENDENCIES. Usually this is a workaround
+for some other dependency that isn't created automatically.
+
+
LOCAL_BUILT_MODULE
+
This should not be used, since multiple binaries are now
+created from a single module defintiion.
+
When a module is built, the module is created in an intermediate
+directory then copied to its final location. LOCAL_BUILT_MODULE is
+the full path to the intermediate file. See LOCAL_INSTALLED_MODULE
+for the path to the final installed location of the module.
+
+
LOCAL_IS_HOST_MODULE
+
Set by the host_xxx.mk includes to tell base_rules.mk and the other
+includes that we're building for the host.
+
+
LOCAL_INSTALLED_MODULE
+
This should not be used, since multiple binaries are now
+created from a single module defintiion.
+
The fully qualified path name of the final location of the module.
+See LOCAL_BUILT_MODULE for the location of the intermediate file that
+the make rules should actually be constructing.
+
+
LOCAL_MODULE_CLASS
+
Which kind of module this is. This variable is used to construct other
+variable names used to locate the modules. See base_rules.mk and
+envsetup.mk.
+
+
LOCAL_MODULE_SUFFIX
+
The suffix that will be appended to LOCAL_MODULE to form
+LOCAL_MODULE_NAME. For example, .so, .a, .dylib.
+
+
LOCAL_STRIP_MODULE
+
If set to true (the default), the binary will be stripped and a debug
+link will be set up so that GDB will still work. If set to no_debuglink,
+the binary will be stripped, but no debug link will be added. If set to
+keep_symbols, it will strip the debug information, but keep the symbol table.
+Any other value will prevent stripping.
+
+
LOCAL_SYSTEM_SHARED_LIBRARIES
+
Used while building the base libraries: libc, libm, libdl. Usually
+it should be set to "none," as it is in $(CLEAR_VARS). When building
+these libraries, it's set to the ones they link against. For example,
+libc, libstdc++ and libdl don't link against anything, and libm links against
+libc. Normally, when the value is none, these libraries are automatically
+linked in to executables and libraries, so you don't need to specify them
+manually.
+
+
+
+
diff --git a/core/build_id.mk b/core/build_id.mk
new file mode 100644
index 0000000000000000000000000000000000000000..072f442d9eb5a323ea3805d1942622cb7342ca38
--- /dev/null
+++ b/core/build_id.mk
@@ -0,0 +1,21 @@
+#
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# BUILD_ID is usually used to specify the branch name
+# (like "MAIN") or a branch name and a release candidate
+# (like "CRB01"). It must be a single word, and is
+# capitalized by convention.
+
+export BUILD_ID=N6F26Q
diff --git a/core/ccache.mk b/core/ccache.mk
new file mode 100644
index 0000000000000000000000000000000000000000..5c2ae23da6f6a4f72cf81cfcd397ee187d26feb9
--- /dev/null
+++ b/core/ccache.mk
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+ifneq ($(filter-out false,$(USE_CCACHE)),)
+ # The default check uses size and modification time, causing false misses
+ # since the mtime depends when the repo was checked out
+ export CCACHE_COMPILERCHECK := content
+
+ # See man page, optimizations to get more cache hits
+ # implies that __DATE__ and __TIME__ are not critical for functionality.
+ # Ignore include file modification time since it will depend on when
+ # the repo was checked out
+ export CCACHE_SLOPPINESS := time_macros,include_file_mtime,file_macro
+
+ # Turn all preprocessor absolute paths into relative paths.
+ # Fixes absolute paths in preprocessed source due to use of -g.
+ # We don't really use system headers much so the rootdir is
+ # fine; ensures these paths are relative for all Android trees
+ # on a workstation.
+ export CCACHE_BASEDIR := /
+
+ # Workaround for ccache with clang.
+ # See http://petereisentraut.blogspot.com/2011/09/ccache-and-clang-part-2.html
+ export CCACHE_CPP2 := true
+
+ CCACHE_HOST_TAG := $(HOST_PREBUILT_TAG)
+ ccache := prebuilts/misc/$(CCACHE_HOST_TAG)/ccache/ccache
+ # Check that the executable is here.
+ ccache := $(strip $(wildcard $(ccache)))
+ ifdef ccache
+ ifndef CC_WRAPPER
+ CC_WRAPPER := $(ccache)
+ endif
+ ifndef CXX_WRAPPER
+ CXX_WRAPPER := $(ccache)
+ endif
+ ccache =
+ endif
+endif
diff --git a/core/checktree b/core/checktree
new file mode 100755
index 0000000000000000000000000000000000000000..b0b9cfab6d27edc1ebc7e462459342ea7e4760a2
--- /dev/null
+++ b/core/checktree
@@ -0,0 +1,113 @@
+#!/usr/bin/python -E
+
+import sys, os, re
+
+excludes = [r'.*?/\.obj.*?',
+ r'.*?~',
+ r'.*?\/.DS_Store',
+ r'.*?\/.gdb_history',
+ r'.*?\/buildspec.mk',
+ r'.*?/\..*?\.swp',
+ r'.*?/out/.*?',
+ r'.*?/install/.*?']
+
+excludes_compiled = map(re.compile, excludes)
+
+def filter_excludes(str):
+ for e in excludes_compiled:
+ if e.match(str):
+ return False
+ return True
+
+def split_perforce_parts(s):
+ spaces = ((s.count(" ") + 1) / 3) * 2
+ pos = 0
+ while spaces > 0:
+ pos = s.find(" ", pos) + 1
+ spaces = spaces - 1
+ return s[pos:]
+
+def quotate(s):
+ return '"' + s + '"'
+
+class PerforceError(Exception):
+ def __init__(self,value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+
+def run(command, regex, filt):
+ def matchit(s):
+ m = regex_compiled.match(s)
+ if m:
+ return m.group(1)
+ else:
+ return ""
+ def filterit(s):
+ if filt_compiled.match(s):
+ return True
+ else:
+ return False
+
+ fd = os.popen(command);
+ lines = fd.readlines()
+ status = fd.close()
+ if status:
+ raise PerforceError("error calling " + command)
+
+ regex_compiled = re.compile(regex)
+ filt_compiled = re.compile(filt)
+
+ if len(lines) >= 1:
+ lines = filter(filterit, lines)
+ if len(lines) >= 1:
+ return map(matchit, lines)
+ return None
+
+try:
+ if len(sys.argv) == 1:
+ do_exclude = True
+ elif len(sys.argv) == 2 and sys.argv[1] == "-a":
+ do_exclude = False
+ else:
+ print "usage: checktree [-a]"
+ print " -a don't filter common crud in the tree"
+ sys.exit(1)
+
+ have = run("p4 have ...", r'[^#]+#[0-9]+ - (.*)', r'.*')
+
+ cwd = os.getcwd()
+ files = run("find . -not -type d", r'.(.*)', r'.*')
+ files = map(lambda s: cwd+s, files)
+
+ added_depot_path = run("p4 opened ...", r'([^#]+)#.*', r'.*?#[0-9]+ - add .*');
+ added = []
+ if added_depot_path:
+ added_depot_path = map(quotate, added_depot_path)
+
+ where = "p4 where " + " ".join(added_depot_path)
+ added = run(where, r'(.*)', r'.*')
+ added = map(split_perforce_parts, added)
+
+ extras = []
+
+ # Python 2.3 -- still default on Mac OS X -- does not have set()
+ # Make dict's here to support the "in" operations below
+ have = dict().fromkeys(have, 1)
+ added = dict().fromkeys(added, 1)
+
+ for file in files:
+ if not file in have:
+ if not file in added:
+ extras.append(file)
+
+ if do_exclude:
+ extras = filter(filter_excludes, extras)
+
+ for s in extras:
+ print s.replace(" ", "\\ ")
+
+except PerforceError, e:
+ sys.exit(2)
+
diff --git a/core/clang/HOST_CROSS_x86.mk b/core/clang/HOST_CROSS_x86.mk
new file mode 100644
index 0000000000000000000000000000000000000000..b78a0743bbec5c0d0f2bd8989390a0e75f8373b7
--- /dev/null
+++ b/core/clang/HOST_CROSS_x86.mk
@@ -0,0 +1,56 @@
+
+include $(BUILD_SYSTEM)/clang/x86.mk
+
+CLANG_CONFIG_x86_HOST_CROSS_TRIPLE := i686-pc-mingw32
+
+CLANG_CONFIG_x86_HOST_CROSS_EXTRA_ASFLAGS := \
+ $(CLANG_CONFIG_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_HOST_CROSS_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_x86_HOST_CROSS_COMBO_EXTRA_ASFLAGS) \
+ -target $(CLANG_CONFIG_x86_HOST_CROSS_TRIPLE)
+
+CLANG_CONFIG_x86_HOST_CROSS_EXTRA_CFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_HOST_CROSS_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_HOST_CROSS_COMBO_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_HOST_CROSS_EXTRA_ASFLAGS)
+
+CLANG_CONFIG_x86_HOST_CROSS_EXTRA_CONLYFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_HOST_CROSS_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_x86_HOST_CROSS_COMBO_EXTRA_CONLYFLAGS)
+
+CLANG_CONFIG_x86_HOST_CROSS_EXTRA_CPPFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_HOST_CROSS_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_x86_HOST_CROSS_COMBO_EXTRA_CPPFLAGS) \
+ -target $(CLANG_CONFIG_x86_HOST_CROSS_TRIPLE)
+
+CLANG_CONFIG_x86_HOST_CROSS_EXTRA_LDFLAGS := \
+ $(CLANG_CONFIG_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_HOST_CROSS_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_x86_HOST_CROSS_COMBO_EXTRA_LDFLAGS) \
+ -target $(CLANG_CONFIG_x86_HOST_CROSS_TRIPLE)
+
+$(clang_2nd_arch_prefix)CLANG_HOST_CROSS_GLOBAL_CFLAGS := \
+ $(call convert-to-host-clang-flags,$($(clang_2nd_arch_prefix)HOST_CROSS_GLOBAL_CFLAGS)) \
+ $(CLANG_CONFIG_x86_HOST_CROSS_EXTRA_CFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_HOST_CROSS_GLOBAL_CONLYFLAGS := \
+ $(call convert-to-host-clang-flags,$($(clang_2nd_arch_prefix)HOST_CROSS_GLOBAL_CONLYFLAGS)) \
+ $(CLANG_CONFIG_x86_HOST_CROSS_EXTRA_CONLYFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_HOST_CROSS_GLOBAL_CPPFLAGS := \
+ $(call convert-to-host-clang-flags,$($(clang_2nd_arch_prefix)HOST_CROSS_GLOBAL_CPPFLAGS)) \
+ $(CLANG_CONFIG_x86_HOST_CROSS_EXTRA_CPPFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_HOST_CROSS_GLOBAL_LDFLAGS := \
+ $(call convert-to-host-clang-flags,$($(clang_2nd_arch_prefix)HOST_CROSS_GLOBAL_LDFLAGS)) \
+ $(CLANG_CONFIG_x86_HOST_CROSS_EXTRA_LDFLAGS)
+
+$(clang_2nd_arch_prefix)HOST_CROSS_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-i686.a
diff --git a/core/clang/HOST_CROSS_x86_64.mk b/core/clang/HOST_CROSS_x86_64.mk
new file mode 100644
index 0000000000000000000000000000000000000000..b6f2de97681f9629c8b7f36caeb058dd90425249
--- /dev/null
+++ b/core/clang/HOST_CROSS_x86_64.mk
@@ -0,0 +1,56 @@
+
+include $(BUILD_SYSTEM)/clang/x86_64.mk
+
+CLANG_CONFIG_x86_64_HOST_CROSS_TRIPLE := x86_64-pc-mingw32
+
+CLANG_CONFIG_x86_64_HOST_CROSS_EXTRA_ASFLAGS := \
+ $(CLANG_CONFIG_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_HOST_CROSS_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_x86_64_HOST_CROSS_COMBO_EXTRA_ASFLAGS) \
+ -target $(CLANG_CONFIG_x86_64_HOST_CROSS_TRIPLE)
+
+CLANG_CONFIG_x86_64_HOST_CROSS_EXTRA_CFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_HOST_CROSS_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_64_HOST_CROSS_COMBO_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_64_HOST_CROSS_EXTRA_ASFLAGS)
+
+CLANG_CONFIG_x86_64_HOST_CROSS_EXTRA_CONLYFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_HOST_CROSS_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_x86_64_HOST_CROSS_COMBO_EXTRA_CONLYFLAGS)
+
+CLANG_CONFIG_x86_64_HOST_CROSS_EXTRA_CPPFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_HOST_CROSS_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_x86_64_HOST_CROSS_COMBO_EXTRA_CPPFLAGS) \
+ -target $(CLANG_CONFIG_x86_64_HOST_CROSS_TRIPLE)
+
+CLANG_CONFIG_x86_64_HOST_CROSS_EXTRA_LDFLAGS := \
+ $(CLANG_CONFIG_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_HOST_CROSS_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_x86_64_HOST_CROSS_COMBO_EXTRA_LDFLAGS) \
+ -target $(CLANG_CONFIG_x86_64_HOST_CROSS_TRIPLE)
+
+$(clang_2nd_arch_prefix)CLANG_HOST_CROSS_GLOBAL_CFLAGS := \
+ $(call convert-to-host-clang-flags,$($(clang_2nd_arch_prefix)HOST_CROSS_GLOBAL_CFLAGS)) \
+ $(CLANG_CONFIG_x86_64_HOST_CROSS_EXTRA_CFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_HOST_CROSS_GLOBAL_CONLYFLAGS := \
+ $(call convert-to-host-clang-flags,$($(clang_2nd_arch_prefix)HOST_CROSS_GLOBAL_CONLYFLAGS)) \
+ $(CLANG_CONFIG_x86_64_HOST_CROSS_EXTRA_CONLYFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_HOST_CROSS_GLOBAL_CPPFLAGS := \
+ $(call convert-to-host-clang-flags,$($(clang_2nd_arch_prefix)HOST_CROSS_GLOBAL_CPPFLAGS)) \
+ $(CLANG_CONFIG_x86_64_HOST_CROSS_EXTRA_CPPFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_HOST_CROSS_GLOBAL_LDFLAGS := \
+ $(call convert-to-host-clang-flags,$($(clang_2nd_arch_prefix)HOST_CROSS_GLOBAL_LDFLAGS)) \
+ $(CLANG_CONFIG_x86_64_HOST_CROSS_EXTRA_LDFLAGS)
+
+$(clang_2nd_arch_prefix)HOST_CROSS_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-x86_64.a
diff --git a/core/clang/HOST_x86.mk b/core/clang/HOST_x86.mk
new file mode 100644
index 0000000000000000000000000000000000000000..0ec64adbafa64e316c99462a480af7352ba5139d
--- /dev/null
+++ b/core/clang/HOST_x86.mk
@@ -0,0 +1,77 @@
+
+include $(BUILD_SYSTEM)/clang/x86.mk
+include $(BUILD_SYSTEM)/clang/HOST_x86_common.mk
+
+ifeq ($(HOST_OS),linux)
+CLANG_CONFIG_x86_HOST_TRIPLE := i686-linux-gnu
+CLANG_CONFIG_x86_HOST_COMBO_EXTRA_ASFLAGS := $(CLANG_CONFIG_x86_LINUX_HOST_EXTRA_ASFLAGS)
+CLANG_CONFIG_x86_HOST_COMBO_EXTRA_CFLAGS := $(CLANG_CONFIG_x86_LINUX_HOST_EXTRA_CFLAGS)
+CLANG_CONFIG_x86_HOST_COMBO_EXTRA_CPPFLAGS := $(CLANG_CONFIG_x86_LINUX_HOST_EXTRA_CPPFLAGS)
+CLANG_CONFIG_x86_HOST_COMBO_EXTRA_LDFLAGS := $(CLANG_CONFIG_x86_LINUX_HOST_EXTRA_LDFLAGS)
+endif
+ifeq ($(HOST_OS),darwin)
+CLANG_CONFIG_x86_HOST_TRIPLE := i686-apple-darwin
+CLANG_CONFIG_x86_HOST_COMBO_EXTRA_ASFLAGS := $(CLANG_CONFIG_x86_DARWIN_HOST_EXTRA_ASFLAGS)
+CLANG_CONFIG_x86_HOST_COMBO_EXTRA_CFLAGS := $(CLANG_CONFIG_x86_DARWIN_HOST_EXTRA_CFLAGS)
+CLANG_CONFIG_x86_HOST_COMBO_EXTRA_CPPFLAGS := $(CLANG_CONFIG_x86_DARWIN_HOST_EXTRA_CPPFLAGS)
+CLANG_CONFIG_x86_HOST_COMBO_EXTRA_LDFLAGS := $(CLANG_CONFIG_x86_DARWIN_HOST_EXTRA_LDFLAGS)
+endif
+
+CLANG_CONFIG_x86_HOST_EXTRA_ASFLAGS := \
+ $(CLANG_CONFIG_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_HOST_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_x86_HOST_COMBO_EXTRA_ASFLAGS) \
+ -target $(CLANG_CONFIG_x86_HOST_TRIPLE)
+
+CLANG_CONFIG_x86_HOST_EXTRA_CFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_HOST_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_HOST_COMBO_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_HOST_EXTRA_ASFLAGS)
+
+CLANG_CONFIG_x86_HOST_EXTRA_CONLYFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_HOST_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_x86_HOST_COMBO_EXTRA_CONLYFLAGS)
+
+CLANG_CONFIG_x86_HOST_EXTRA_CPPFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_HOST_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_x86_HOST_COMBO_EXTRA_CPPFLAGS) \
+ -target $(CLANG_CONFIG_x86_HOST_TRIPLE)
+
+CLANG_CONFIG_x86_HOST_EXTRA_LDFLAGS := \
+ $(CLANG_CONFIG_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_HOST_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_x86_HOST_COMBO_EXTRA_LDFLAGS) \
+ -target $(CLANG_CONFIG_x86_HOST_TRIPLE)
+
+define $(clang_2nd_arch_prefix)convert-to-host-clang-flags
+ $(strip \
+ $(call subst-clang-incompatible-x86-flags,\
+ $(filter-out $(CLANG_CONFIG_x86_UNKNOWN_CFLAGS),\
+ $(1))))
+endef
+
+$(clang_2nd_arch_prefix)CLANG_HOST_GLOBAL_CFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-host-clang-flags,$($(clang_2nd_arch_prefix)HOST_GLOBAL_CFLAGS)) \
+ $(CLANG_CONFIG_x86_HOST_EXTRA_CFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_HOST_GLOBAL_CONLYFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-host-clang-flags,$($(clang_2nd_arch_prefix)HOST_GLOBAL_CONLYFLAGS)) \
+ $(CLANG_CONFIG_x86_HOST_EXTRA_CONLYFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_HOST_GLOBAL_CPPFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-host-clang-flags,$($(clang_2nd_arch_prefix)HOST_GLOBAL_CPPFLAGS)) \
+ $(CLANG_CONFIG_x86_HOST_EXTRA_CPPFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_HOST_GLOBAL_LDFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-host-clang-flags,$($(clang_2nd_arch_prefix)HOST_GLOBAL_LDFLAGS)) \
+ $(CLANG_CONFIG_x86_HOST_EXTRA_LDFLAGS)
+
+$(clang_2nd_arch_prefix)HOST_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-i686.a
diff --git a/core/clang/HOST_x86_64.mk b/core/clang/HOST_x86_64.mk
new file mode 100644
index 0000000000000000000000000000000000000000..d46cb67e433aa4f4898d6e06eb3a59f3ad026286
--- /dev/null
+++ b/core/clang/HOST_x86_64.mk
@@ -0,0 +1,77 @@
+
+include $(BUILD_SYSTEM)/clang/x86_64.mk
+include $(BUILD_SYSTEM)/clang/HOST_x86_common.mk
+
+ifeq ($(HOST_OS),linux)
+CLANG_CONFIG_x86_64_HOST_TRIPLE := x86_64-linux-gnu
+CLANG_CONFIG_x86_64_HOST_COMBO_EXTRA_ASFLAGS := $(CLANG_CONFIG_x86_LINUX_HOST_EXTRA_ASFLAGS)
+CLANG_CONFIG_x86_64_HOST_COMBO_EXTRA_CFLAGS := $(CLANG_CONFIG_x86_LINUX_HOST_EXTRA_CFLAGS)
+CLANG_CONFIG_x86_64_HOST_COMBO_EXTRA_CPPFLAGS := $(CLANG_CONFIG_x86_LINUX_HOST_EXTRA_CPPFLAGS)
+CLANG_CONFIG_x86_64_HOST_COMBO_EXTRA_LDFLAGS := $(CLANG_CONFIG_x86_LINUX_HOST_EXTRA_LDFLAGS)
+endif
+ifeq ($(HOST_OS),darwin)
+CLANG_CONFIG_x86_64_HOST_TRIPLE := x86_64-apple-darwin
+CLANG_CONFIG_x86_64_HOST_COMBO_EXTRA_ASFLAGS := $(CLANG_CONFIG_x86_DARWIN_HOST_EXTRA_ASFLAGS)
+CLANG_CONFIG_x86_64_HOST_COMBO_EXTRA_CFLAGS := $(CLANG_CONFIG_x86_DARWIN_HOST_EXTRA_CFLAGS)
+CLANG_CONFIG_x86_64_HOST_COMBO_EXTRA_CPPFLAGS := $(CLANG_CONFIG_x86_DARWIN_HOST_EXTRA_CPPFLAGS)
+CLANG_CONFIG_x86_64_HOST_COMBO_EXTRA_LDFLAGS := $(CLANG_CONFIG_x86_DARWIN_HOST_EXTRA_LDFLAGS)
+endif
+
+CLANG_CONFIG_x86_64_HOST_EXTRA_ASFLAGS := \
+ $(CLANG_CONFIG_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_HOST_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_x86_64_HOST_COMBO_EXTRA_ASFLAGS) \
+ -target $(CLANG_CONFIG_x86_64_HOST_TRIPLE)
+
+CLANG_CONFIG_x86_64_HOST_EXTRA_CFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_HOST_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_64_HOST_COMBO_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_64_HOST_EXTRA_ASFLAGS)
+
+CLANG_CONFIG_x86_64_HOST_EXTRA_CONLYFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_HOST_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_x86_64_HOST_COMBO_EXTRA_CONLYFLAGS)
+
+CLANG_CONFIG_x86_64_HOST_EXTRA_CPPFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_HOST_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_x86_64_HOST_COMBO_EXTRA_CPPFLAGS) \
+ -target $(CLANG_CONFIG_x86_64_HOST_TRIPLE)
+
+CLANG_CONFIG_x86_64_HOST_EXTRA_LDFLAGS := \
+ $(CLANG_CONFIG_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_HOST_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_x86_64_HOST_COMBO_EXTRA_LDFLAGS) \
+ -target $(CLANG_CONFIG_x86_64_HOST_TRIPLE)
+
+define convert-to-host-clang-flags
+ $(strip \
+ $(call subst-clang-incompatible-x86_64-flags,\
+ $(filter-out $(CLANG_CONFIG_x86_64_UNKNOWN_CFLAGS),\
+ $(1))))
+endef
+
+CLANG_HOST_GLOBAL_CFLAGS := \
+ $(call convert-to-host-clang-flags,$(HOST_GLOBAL_CFLAGS)) \
+ $(CLANG_CONFIG_x86_64_HOST_EXTRA_CFLAGS)
+
+CLANG_HOST_GLOBAL_CONLYFLAGS := \
+ $(call convert-to-host-clang-flags,$(HOST_GLOBAL_CONLYFLAGS)) \
+ $(CLANG_CONFIG_x86_64_HOST_EXTRA_CONLYFLAGS)
+
+CLANG_HOST_GLOBAL_CPPFLAGS := \
+ $(call convert-to-host-clang-flags,$(HOST_GLOBAL_CPPFLAGS)) \
+ $(CLANG_CONFIG_x86_64_HOST_EXTRA_CPPFLAGS)
+
+CLANG_HOST_GLOBAL_LDFLAGS := \
+ $(call convert-to-host-clang-flags,$(HOST_GLOBAL_LDFLAGS)) \
+ $(CLANG_CONFIG_x86_64_HOST_EXTRA_LDFLAGS)
+
+HOST_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-x86_64.a
diff --git a/core/clang/HOST_x86_common.mk b/core/clang/HOST_x86_common.mk
new file mode 100644
index 0000000000000000000000000000000000000000..9e71750c14252f934fbf7f9be4c2eba36393cd8f
--- /dev/null
+++ b/core/clang/HOST_x86_common.mk
@@ -0,0 +1,54 @@
+# Shared by HOST_x86.mk and HOST_x86_64.mk.
+
+ifeq ($(HOST_OS),darwin)
+CLANG_CONFIG_x86_DARWIN_HOST_EXTRA_ASFLAGS := \
+ -integrated-as
+
+CLANG_CONFIG_x86_DARWIN_HOST_EXTRA_CFLAGS := \
+ -integrated-as
+
+CLANG_CONFIG_x86_DARWIN_HOST_EXTRA_CFLAGS += -fstack-protector-strong
+endif
+
+ifeq ($(HOST_OS),linux)
+CLANG_CONFIG_x86_LINUX_HOST_EXTRA_ASFLAGS := \
+ --gcc-toolchain=$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG) \
+ --sysroot $($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/sysroot
+
+CLANG_CONFIG_x86_LINUX_HOST_EXTRA_CFLAGS := \
+ --gcc-toolchain=$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)
+
+CLANG_CONFIG_x86_LINUX_HOST_EXTRA_CFLAGS += -fstack-protector-strong
+
+ifneq ($(strip $($(clang_2nd_arch_prefix)HOST_IS_64_BIT)),)
+CLANG_CONFIG_x86_LINUX_HOST_EXTRA_CPPFLAGS := \
+ --gcc-toolchain=$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG) \
+ --sysroot $($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/sysroot \
+ -isystem $($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/x86_64-linux/include/c++/4.8 \
+ -isystem $($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/x86_64-linux/include/c++/4.8/x86_64-linux \
+ -isystem $($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/x86_64-linux/include/c++/4.8/backward
+
+CLANG_CONFIG_x86_LINUX_HOST_EXTRA_LDFLAGS := \
+ --gcc-toolchain=$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG) \
+ --sysroot $($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/sysroot \
+ -B$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/x86_64-linux/bin \
+ -B$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/lib/gcc/x86_64-linux/4.8 \
+ -L$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/lib/gcc/x86_64-linux/4.8 \
+ -L$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/x86_64-linux/lib64/
+else
+CLANG_CONFIG_x86_LINUX_HOST_EXTRA_CPPFLAGS := \
+ --gcc-toolchain=$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG) \
+ --sysroot $($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/sysroot \
+ -isystem $($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/x86_64-linux/include/c++/4.8 \
+ -isystem $($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/x86_64-linux/include/c++/4.8/x86_64-linux/32 \
+ -isystem $($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/x86_64-linux/include/c++/4.8/backward
+
+CLANG_CONFIG_x86_LINUX_HOST_EXTRA_LDFLAGS := \
+ --gcc-toolchain=$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG) \
+ --sysroot $($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/sysroot \
+ -B$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/x86_64-linux/bin \
+ -B$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/lib/gcc/x86_64-linux/4.8/32 \
+ -L$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/lib/gcc/x86_64-linux/4.8/32 \
+ -L$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/x86_64-linux/lib32/
+endif
+endif # Linux
diff --git a/core/clang/TARGET_arm.mk b/core/clang/TARGET_arm.mk
new file mode 100644
index 0000000000000000000000000000000000000000..5c1bf6f806e7514ba5f2817b98df9756262fa70b
--- /dev/null
+++ b/core/clang/TARGET_arm.mk
@@ -0,0 +1,72 @@
+
+include $(BUILD_SYSTEM)/clang/arm.mk
+
+CLANG_CONFIG_arm_TARGET_TRIPLE := arm-linux-androideabi
+CLANG_CONFIG_arm_TARGET_TOOLCHAIN_PREFIX := \
+ $($(clang_2nd_arch_prefix)TARGET_TOOLCHAIN_ROOT)/$(CLANG_CONFIG_arm_TARGET_TRIPLE)/bin
+
+CLANG_CONFIG_arm_TARGET_EXTRA_ASFLAGS := \
+ $(CLANG_CONFIG_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_arm_EXTRA_ASFLAGS) \
+ -target $(CLANG_CONFIG_arm_TARGET_TRIPLE) \
+ -B$(CLANG_CONFIG_arm_TARGET_TOOLCHAIN_PREFIX)
+
+CLANG_CONFIG_arm_TARGET_EXTRA_CFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_arm_EXTRA_CFLAGS) \
+ -target $(CLANG_CONFIG_arm_TARGET_TRIPLE) \
+ $(CLANG_CONFIG_arm_TARGET_EXTRA_ASFLAGS)
+
+CLANG_CONFIG_arm_TARGET_EXTRA_CONLYFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_arm_EXTRA_CONLYFLAGS)
+
+CLANG_CONFIG_arm_TARGET_EXTRA_CPPFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_arm_EXTRA_CPPFLAGS) \
+ -target $(CLANG_CONFIG_arm_TARGET_TRIPLE)
+
+CLANG_CONFIG_arm_TARGET_EXTRA_LDFLAGS := \
+ $(CLANG_CONFIG_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_arm_EXTRA_LDFLAGS) \
+ -target $(CLANG_CONFIG_arm_TARGET_TRIPLE) \
+ -B$(CLANG_CONFIG_arm_TARGET_TOOLCHAIN_PREFIX)
+
+
+define $(clang_2nd_arch_prefix)convert-to-clang-flags
+ $(strip \
+ $(call subst-clang-incompatible-arm-flags,\
+ $(filter-out $(CLANG_CONFIG_arm_UNKNOWN_CFLAGS),\
+ $(1))))
+endef
+
+$(clang_2nd_arch_prefix)CLANG_TARGET_GLOBAL_CFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-clang-flags,$($(clang_2nd_arch_prefix)TARGET_GLOBAL_CFLAGS)) \
+ $(CLANG_CONFIG_arm_TARGET_EXTRA_CFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_TARGET_GLOBAL_CONLYFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-clang-flags,$($(clang_2nd_arch_prefix)TARGET_GLOBAL_CONLYFLAGS)) \
+ $(CLANG_CONFIG_arm_TARGET_EXTRA_CONLYFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_TARGET_GLOBAL_CPPFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-clang-flags,$($(clang_2nd_arch_prefix)TARGET_GLOBAL_CPPFLAGS)) \
+ $(CLANG_CONFIG_arm_TARGET_EXTRA_CPPFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_TARGET_GLOBAL_LDFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-clang-flags,$($(clang_2nd_arch_prefix)TARGET_GLOBAL_LDFLAGS)) \
+ $(CLANG_CONFIG_arm_TARGET_EXTRA_LDFLAGS)
+
+$(clang_2nd_arch_prefix)RS_TRIPLE := armv7-linux-androideabi
+$(clang_2nd_arch_prefix)RS_TRIPLE_CFLAGS :=
+$(clang_2nd_arch_prefix)RS_COMPAT_TRIPLE := armv7-none-linux-gnueabi
+
+$(clang_2nd_arch_prefix)TARGET_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-arm-android.a
+
+# Address sanitizer clang config
+$(clang_2nd_arch_prefix)ADDRESS_SANITIZER_RUNTIME_LIBRARY := libclang_rt.asan-arm-android
+$(clang_2nd_arch_prefix)ADDRESS_SANITIZER_LINKER := /system/bin/linker_asan
diff --git a/core/clang/TARGET_arm64.mk b/core/clang/TARGET_arm64.mk
new file mode 100644
index 0000000000000000000000000000000000000000..15b0172a6ff90df84d0ddaee98a5b1363593799e
--- /dev/null
+++ b/core/clang/TARGET_arm64.mk
@@ -0,0 +1,70 @@
+
+include $(BUILD_SYSTEM)/clang/arm64.mk
+
+CLANG_CONFIG_arm64_TARGET_TRIPLE := aarch64-linux-android
+CLANG_CONFIG_arm64_TARGET_TOOLCHAIN_PREFIX := \
+ $(TARGET_TOOLCHAIN_ROOT)/$(CLANG_CONFIG_arm64_TARGET_TRIPLE)/bin
+
+CLANG_CONFIG_arm64_TARGET_EXTRA_ASFLAGS := \
+ $(CLANG_CONFIG_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_arm64_EXTRA_ASFLAGS) \
+ -target $(CLANG_CONFIG_arm64_TARGET_TRIPLE) \
+ -B$(CLANG_CONFIG_arm64_TARGET_TOOLCHAIN_PREFIX)
+
+CLANG_CONFIG_arm64_TARGET_EXTRA_CFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_arm64_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_arm64_TARGET_EXTRA_ASFLAGS)
+
+CLANG_CONFIG_arm64_TARGET_EXTRA_CONLYFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_arm64_EXTRA_CONLYFLAGS)
+
+CLANG_CONFIG_arm64_TARGET_EXTRA_CPPFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_arm64_EXTRA_CPPFLAGS) \
+
+CLANG_CONFIG_arm64_TARGET_EXTRA_LDFLAGS := \
+ $(CLANG_CONFIG_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_arm64_EXTRA_LDFLAGS) \
+ -target $(CLANG_CONFIG_arm64_TARGET_TRIPLE) \
+ -B$(CLANG_CONFIG_arm64_TARGET_TOOLCHAIN_PREFIX)
+
+
+define convert-to-clang-flags
+ $(strip \
+ $(call subst-clang-incompatible-arm64-flags,\
+ $(filter-out $(CLANG_CONFIG_arm64_UNKNOWN_CFLAGS),\
+ $(1))))
+endef
+
+CLANG_TARGET_GLOBAL_CFLAGS := \
+ $(call convert-to-clang-flags,$(TARGET_GLOBAL_CFLAGS)) \
+ $(CLANG_CONFIG_arm64_TARGET_EXTRA_CFLAGS)
+
+CLANG_TARGET_GLOBAL_CONLYFLAGS := \
+ $(call convert-to-clang-flags,$(TARGET_GLOBAL_CONLYFLAGS)) \
+ $(CLANG_CONFIG_arm64_TARGET_EXTRA_CONLYFLAGS)
+
+CLANG_TARGET_GLOBAL_CPPFLAGS := \
+ $(call convert-to-clang-flags,$(TARGET_GLOBAL_CPPFLAGS)) \
+ $(CLANG_CONFIG_arm64_TARGET_EXTRA_CPPFLAGS)
+
+CLANG_TARGET_GLOBAL_LDFLAGS := \
+ $(call convert-to-clang-flags,$(TARGET_GLOBAL_LDFLAGS)) \
+ $(CLANG_CONFIG_arm64_TARGET_EXTRA_LDFLAGS)
+
+RS_TRIPLE := aarch64-linux-android
+RS_TRIPLE_CFLAGS :=
+RS_COMPAT_TRIPLE := aarch64-linux-android
+
+TARGET_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-aarch64-android.a
+
+# Address sanitizer clang config
+ADDRESS_SANITIZER_RUNTIME_LIBRARY := libclang_rt.asan-aarch64-android
+ADDRESS_SANITIZER_LINKER := /system/bin/linker_asan64
diff --git a/core/clang/TARGET_mips.mk b/core/clang/TARGET_mips.mk
new file mode 100644
index 0000000000000000000000000000000000000000..1a0176a5ab9b73c622df08409c36591ef764f982
--- /dev/null
+++ b/core/clang/TARGET_mips.mk
@@ -0,0 +1,67 @@
+
+include $(BUILD_SYSTEM)/clang/mips.mk
+
+CLANG_CONFIG_mips_TARGET_TRIPLE := mipsel-linux-android
+CLANG_CONFIG_mips_TARGET_TOOLCHAIN_PREFIX := \
+ $($(clang_2nd_arch_prefix)TARGET_TOOLCHAIN_ROOT)/mips64el-linux-android/bin
+
+CLANG_CONFIG_mips_TARGET_EXTRA_ASFLAGS := \
+ $(CLANG_CONFIG_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_mips_EXTRA_ASFLAGS) \
+ -fPIC \
+ -target $(CLANG_CONFIG_mips_TARGET_TRIPLE) \
+ -B$(CLANG_CONFIG_mips_TARGET_TOOLCHAIN_PREFIX)
+
+CLANG_CONFIG_mips_TARGET_EXTRA_CFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_mips_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_mips_TARGET_EXTRA_ASFLAGS)
+
+CLANG_CONFIG_mips_TARGET_EXTRA_CONLYFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_mips_EXTRA_CONLYFLAGS)
+
+CLANG_CONFIG_mips_TARGET_EXTRA_CPPFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_mips_EXTRA_CPPFLAGS) \
+
+CLANG_CONFIG_mips_TARGET_EXTRA_LDFLAGS := \
+ $(CLANG_CONFIG_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_mips_EXTRA_LDFLAGS) \
+ -target $(CLANG_CONFIG_mips_TARGET_TRIPLE) \
+ -B$(CLANG_CONFIG_mips_TARGET_TOOLCHAIN_PREFIX)
+
+
+define $(clang_2nd_arch_prefix)convert-to-clang-flags
+ $(strip \
+ $(call subst-clang-incompatible-mips-flags,\
+ $(filter-out $(CLANG_CONFIG_mips_UNKNOWN_CFLAGS),\
+ $(1))))
+endef
+
+$(clang_2nd_arch_prefix)CLANG_TARGET_GLOBAL_CFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-clang-flags,$($(clang_2nd_arch_prefix)TARGET_GLOBAL_CFLAGS)) \
+ $(CLANG_CONFIG_mips_TARGET_EXTRA_CFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_TARGET_GLOBAL_CONLYFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-clang-flags,$($(clang_2nd_arch_prefix)TARGET_GLOBAL_CONLYFLAGS)) \
+ $(CLANG_CONFIG_mips_TARGET_EXTRA_CONLYFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_TARGET_GLOBAL_CPPFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-clang-flags,$($(clang_2nd_arch_prefix)TARGET_GLOBAL_CPPFLAGS)) \
+ $(CLANG_CONFIG_mips_TARGET_EXTRA_CPPFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_TARGET_GLOBAL_LDFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-clang-flags,$($(clang_2nd_arch_prefix)TARGET_GLOBAL_LDFLAGS)) \
+ $(CLANG_CONFIG_mips_TARGET_EXTRA_LDFLAGS)
+
+$(clang_2nd_arch_prefix)RS_TRIPLE := armv7-linux-androideabi
+$(clang_2nd_arch_prefix)RS_TRIPLE_CFLAGS :=
+RS_COMPAT_TRIPLE := mipsel-linux-android
+
+$(clang_2nd_arch_prefix)TARGET_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-mipsel-android.a
diff --git a/core/clang/TARGET_mips64.mk b/core/clang/TARGET_mips64.mk
new file mode 100644
index 0000000000000000000000000000000000000000..104fb70217a3a64a3849fe796da01ad4cba243d5
--- /dev/null
+++ b/core/clang/TARGET_mips64.mk
@@ -0,0 +1,66 @@
+
+include $(BUILD_SYSTEM)/clang/mips64.mk
+
+CLANG_CONFIG_mips64_TARGET_TRIPLE := mips64el-linux-android
+CLANG_CONFIG_mips64_TARGET_TOOLCHAIN_PREFIX := \
+ $(TARGET_TOOLCHAIN_ROOT)/$(CLANG_CONFIG_mips64_TARGET_TRIPLE)/bin
+
+CLANG_CONFIG_mips64_TARGET_EXTRA_ASFLAGS := \
+ $(CLANG_CONFIG_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_mips64_EXTRA_ASFLAGS) \
+ -target $(CLANG_CONFIG_mips64_TARGET_TRIPLE) \
+ -B$(CLANG_CONFIG_mips64_TARGET_TOOLCHAIN_PREFIX)
+
+CLANG_CONFIG_mips64_TARGET_EXTRA_CFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_mips64_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_mips64_TARGET_EXTRA_ASFLAGS)
+
+CLANG_CONFIG_mips64_TARGET_EXTRA_CONLYFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_mips64_EXTRA_CONLYFLAGS)
+
+CLANG_CONFIG_mips64_TARGET_EXTRA_CPPFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_mips64_EXTRA_CPPFLAGS) \
+
+CLANG_CONFIG_mips64_TARGET_EXTRA_LDFLAGS := \
+ $(CLANG_CONFIG_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_mips64_EXTRA_LDFLAGS) \
+ -target $(CLANG_CONFIG_mips64_TARGET_TRIPLE) \
+ -B$(CLANG_CONFIG_mips64_TARGET_TOOLCHAIN_PREFIX)
+
+
+define convert-to-clang-flags
+ $(strip \
+ $(call subst-clang-incompatible-mips64-flags,\
+ $(filter-out $(CLANG_CONFIG_mips64_UNKNOWN_CFLAGS),\
+ $(1))))
+endef
+
+CLANG_TARGET_GLOBAL_CFLAGS := \
+ $(call convert-to-clang-flags,$(TARGET_GLOBAL_CFLAGS)) \
+ $(CLANG_CONFIG_mips64_TARGET_EXTRA_CFLAGS)
+
+CLANG_TARGET_GLOBAL_CONLYFLAGS := \
+ $(call convert-to-clang-flags,$(TARGET_GLOBAL_CONLYFLAGS)) \
+ $(CLANG_CONFIG_mips64_TARGET_EXTRA_CONLYFLAGS)
+
+CLANG_TARGET_GLOBAL_CPPFLAGS := \
+ $(call convert-to-clang-flags,$(TARGET_GLOBAL_CPPFLAGS)) \
+ $(CLANG_CONFIG_mips64_TARGET_EXTRA_CPPFLAGS)
+
+CLANG_TARGET_GLOBAL_LDFLAGS := \
+ $(call convert-to-clang-flags,$(TARGET_GLOBAL_LDFLAGS)) \
+ $(CLANG_CONFIG_mips64_TARGET_EXTRA_LDFLAGS)
+
+RS_TRIPLE := aarch64-linux-android
+RS_TRIPLE_CFLAGS :=
+RS_COMPAT_TRIPLE := mips64el-linux-android
+
+TARGET_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-mips64el-android.a
diff --git a/core/clang/TARGET_x86.mk b/core/clang/TARGET_x86.mk
new file mode 100644
index 0000000000000000000000000000000000000000..741768b5e2c3d2530e34d111554a83e5346bdc78
--- /dev/null
+++ b/core/clang/TARGET_x86.mk
@@ -0,0 +1,77 @@
+
+include $(BUILD_SYSTEM)/clang/x86.mk
+
+CLANG_CONFIG_x86_TARGET_TRIPLE := i686-linux-android
+# NOTE: There is no i686-linux-android prebuilt, so we must hardcode the
+# x86_64 target instead.
+CLANG_CONFIG_x86_TARGET_TOOLCHAIN_PREFIX := \
+ $($(clang_2nd_arch_prefix)TARGET_TOOLCHAIN_ROOT)/x86_64-linux-android/bin
+
+CLANG_CONFIG_x86_TARGET_EXTRA_ASFLAGS := \
+ $(CLANG_CONFIG_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_ASFLAGS) \
+ -target $(CLANG_CONFIG_x86_TARGET_TRIPLE) \
+ -B$(CLANG_CONFIG_x86_TARGET_TOOLCHAIN_PREFIX)
+
+CLANG_CONFIG_x86_TARGET_EXTRA_CFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_TARGET_EXTRA_ASFLAGS) \
+ -mstackrealign
+
+# -mstackrealign is needed to realign stack in native code
+# that could be called from JNI, so that movaps instruction
+# will work on assumed stack aligned local variables.
+
+CLANG_CONFIG_x86_TARGET_EXTRA_CONLYFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_CONLYFLAGS)
+
+CLANG_CONFIG_x86_TARGET_EXTRA_CPPFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_CPPFLAGS) \
+
+CLANG_CONFIG_x86_TARGET_EXTRA_LDFLAGS := \
+ $(CLANG_CONFIG_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_x86_EXTRA_LDFLAGS) \
+ -target $(CLANG_CONFIG_x86_TARGET_TRIPLE) \
+ -B$(CLANG_CONFIG_x86_TARGET_TOOLCHAIN_PREFIX)
+
+
+define $(clang_2nd_arch_prefix)convert-to-clang-flags
+ $(strip \
+ $(call subst-clang-incompatible-x86-flags,\
+ $(filter-out $(CLANG_CONFIG_x86_UNKNOWN_CFLAGS),\
+ $(1))))
+endef
+
+$(clang_2nd_arch_prefix)CLANG_TARGET_GLOBAL_CFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-clang-flags,$($(clang_2nd_arch_prefix)TARGET_GLOBAL_CFLAGS)) \
+ $(CLANG_CONFIG_x86_TARGET_EXTRA_CFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_TARGET_GLOBAL_CONLYFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-clang-flags,$($(clang_2nd_arch_prefix)TARGET_GLOBAL_CONLYFLAGS)) \
+ $(CLANG_CONFIG_x86_TARGET_EXTRA_CONLYFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_TARGET_GLOBAL_CPPFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-clang-flags,$($(clang_2nd_arch_prefix)TARGET_GLOBAL_CPPFLAGS)) \
+ $(CLANG_CONFIG_x86_TARGET_EXTRA_CPPFLAGS)
+
+$(clang_2nd_arch_prefix)CLANG_TARGET_GLOBAL_LDFLAGS := \
+ $(call $(clang_2nd_arch_prefix)convert-to-clang-flags,$($(clang_2nd_arch_prefix)TARGET_GLOBAL_LDFLAGS)) \
+ $(CLANG_CONFIG_x86_TARGET_EXTRA_LDFLAGS)
+
+$(clang_2nd_arch_prefix)RS_TRIPLE := armv7-linux-androideabi
+$(clang_2nd_arch_prefix)RS_TRIPLE_CFLAGS := -D__i386__
+$(clang_2nd_arch_prefix)RS_COMPAT_TRIPLE := i686-linux-android
+
+$(clang_2nd_arch_prefix)TARGET_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-i686-android.a
+
+# Address sanitizer clang config
+$(clang_2nd_arch_prefix)ADDRESS_SANITIZER_RUNTIME_LIBRARY := libclang_rt.asan-i686-android
+$(clang_2nd_arch_prefix)ADDRESS_SANITIZER_LINKER := /system/bin/linker_asan
diff --git a/core/clang/TARGET_x86_64.mk b/core/clang/TARGET_x86_64.mk
new file mode 100644
index 0000000000000000000000000000000000000000..e44382dcb821a3e0b825adf4a35f77a27aa095b0
--- /dev/null
+++ b/core/clang/TARGET_x86_64.mk
@@ -0,0 +1,66 @@
+
+include $(BUILD_SYSTEM)/clang/x86_64.mk
+
+CLANG_CONFIG_x86_64_TARGET_TRIPLE := x86_64-linux-android
+CLANG_CONFIG_x86_64_TARGET_TOOLCHAIN_PREFIX := \
+ $(TARGET_TOOLCHAIN_ROOT)/$(CLANG_CONFIG_x86_64_TARGET_TRIPLE)/bin
+
+CLANG_CONFIG_x86_64_TARGET_EXTRA_ASFLAGS := \
+ $(CLANG_CONFIG_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_ASFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_ASFLAGS) \
+ -target $(CLANG_CONFIG_x86_64_TARGET_TRIPLE) \
+ -B$(CLANG_CONFIG_x86_64_TARGET_TOOLCHAIN_PREFIX)
+
+CLANG_CONFIG_x86_64_TARGET_EXTRA_CFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_CFLAGS) \
+ $(CLANG_CONFIG_x86_64_TARGET_EXTRA_ASFLAGS)
+
+CLANG_CONFIG_x86_64_TARGET_EXTRA_CONLYFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CONLYFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_CONLYFLAGS)
+
+CLANG_CONFIG_x86_64_TARGET_EXTRA_CPPFLAGS := \
+ $(CLANG_CONFIG_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_CPPFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_CPPFLAGS) \
+
+CLANG_CONFIG_x86_64_TARGET_EXTRA_LDFLAGS := \
+ $(CLANG_CONFIG_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_TARGET_EXTRA_LDFLAGS) \
+ $(CLANG_CONFIG_x86_64_EXTRA_LDFLAGS) \
+ -target $(CLANG_CONFIG_x86_64_TARGET_TRIPLE) \
+ -B$(CLANG_CONFIG_x86_64_TARGET_TOOLCHAIN_PREFIX)
+
+
+define convert-to-clang-flags
+ $(strip \
+ $(call subst-clang-incompatible-x86_64-flags,\
+ $(filter-out $(CLANG_CONFIG_x86_64_UNKNOWN_CFLAGS),\
+ $(1))))
+endef
+
+CLANG_TARGET_GLOBAL_CFLAGS := \
+ $(call convert-to-clang-flags,$(TARGET_GLOBAL_CFLAGS)) \
+ $(CLANG_CONFIG_x86_64_TARGET_EXTRA_CFLAGS)
+
+CLANG_TARGET_GLOBAL_CONLYFLAGS := \
+ $(call convert-to-clang-flags,$(TARGET_GLOBAL_CONLYFLAGS)) \
+ $(CLANG_CONFIG_x86_64_TARGET_EXTRA_CONLYFLAGS)
+
+CLANG_TARGET_GLOBAL_CPPFLAGS := \
+ $(call convert-to-clang-flags,$(TARGET_GLOBAL_CPPFLAGS)) \
+ $(CLANG_CONFIG_x86_64_TARGET_EXTRA_CPPFLAGS)
+
+CLANG_TARGET_GLOBAL_LDFLAGS := \
+ $(call convert-to-clang-flags,$(TARGET_GLOBAL_LDFLAGS)) \
+ $(CLANG_CONFIG_x86_64_TARGET_EXTRA_LDFLAGS)
+
+RS_TRIPLE := aarch64-linux-android
+RS_TRIPLE_CFLAGS := -D__x86_64__
+RS_COMPAT_TRIPLE := x86_64-linux-android
+
+TARGET_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-x86_64-android.a
diff --git a/core/clang/arm.mk b/core/clang/arm.mk
new file mode 100644
index 0000000000000000000000000000000000000000..4053bb2766ce35f81dbaf1c90a40d034751fb806
--- /dev/null
+++ b/core/clang/arm.mk
@@ -0,0 +1,38 @@
+# Clang flags for arm arch, target or host.
+
+CLANG_CONFIG_arm_EXTRA_ASFLAGS :=
+
+CLANG_CONFIG_arm_EXTRA_CFLAGS :=
+
+ifneq (,$(filter krait,$(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT)))
+ # Android's clang support's krait as a CPU whereas GCC doesn't. Specify
+ # -mcpu here rather than the more normal core/combo/arch/arm/armv7-a-neon.mk.
+ CLANG_CONFIG_arm_EXTRA_CFLAGS += -mcpu=krait -mfpu=neon-vfpv4
+endif
+
+CLANG_CONFIG_arm_EXTRA_CPPFLAGS :=
+
+CLANG_CONFIG_arm_EXTRA_LDFLAGS :=
+
+# Include common unknown flags
+CLANG_CONFIG_arm_UNKNOWN_CFLAGS := \
+ $(CLANG_CONFIG_UNKNOWN_CFLAGS) \
+ -mthumb-interwork \
+ -fgcse-after-reload \
+ -frerun-cse-after-loop \
+ -frename-registers \
+ -fno-align-jumps \
+ -fno-builtin-sin \
+ -fno-caller-saves \
+ -fno-early-inlining \
+ -fno-move-loop-invariants \
+ -fno-partial-inlining \
+ -fno-strict-volatile-bitfields \
+ -fno-tree-copy-prop \
+ -fno-tree-loop-optimize
+
+define subst-clang-incompatible-arm-flags
+ $(subst -march=armv5te,-march=armv5t,\
+ $(subst -march=armv5e,-march=armv5,\
+ $(1)))
+endef
diff --git a/core/clang/arm64.mk b/core/clang/arm64.mk
new file mode 100644
index 0000000000000000000000000000000000000000..cad7321ec1ebb6ea10adf372c17a5e3ab94b96e3
--- /dev/null
+++ b/core/clang/arm64.mk
@@ -0,0 +1,21 @@
+# Clang flags for arm64 arch, target or host.
+
+CLANG_CONFIG_arm64_EXTRA_ASFLAGS :=
+
+CLANG_CONFIG_arm64_EXTRA_CFLAGS :=
+
+CLANG_CONFIG_arm64_EXTRA_LDFLAGS :=
+
+# Include common unknown flags
+CLANG_CONFIG_arm64_UNKNOWN_CFLAGS := \
+ $(CLANG_CONFIG_UNKNOWN_CFLAGS) \
+ -fgcse-after-reload \
+ -frerun-cse-after-loop \
+ -frename-registers \
+ -fno-strict-volatile-bitfields \
+ -fno-align-jumps
+
+# We don't have any arm64 flags to substitute yet.
+define subst-clang-incompatible-arm64-flags
+ $(1)
+endef
diff --git a/core/clang/config.mk b/core/clang/config.mk
new file mode 100644
index 0000000000000000000000000000000000000000..6cc344637e89dded591acbe9874ac9848465ff5d
--- /dev/null
+++ b/core/clang/config.mk
@@ -0,0 +1,181 @@
+## Clang configurations.
+
+LLVM_PREBUILTS_PATH := $(LLVM_PREBUILTS_BASE)/$(BUILD_OS)-x86/$(LLVM_PREBUILTS_VERSION)/bin
+LLVM_RTLIB_PATH := $(LLVM_PREBUILTS_PATH)/../lib64/clang/$(LLVM_RELEASE_VERSION)/lib/linux/
+
+CLANG := $(LLVM_PREBUILTS_PATH)/clang$(BUILD_EXECUTABLE_SUFFIX)
+CLANG_CXX := $(LLVM_PREBUILTS_PATH)/clang++$(BUILD_EXECUTABLE_SUFFIX)
+LLVM_AS := $(LLVM_PREBUILTS_PATH)/llvm-as$(BUILD_EXECUTABLE_SUFFIX)
+LLVM_LINK := $(LLVM_PREBUILTS_PATH)/llvm-link$(BUILD_EXECUTABLE_SUFFIX)
+
+CLANG_TBLGEN := $(BUILD_OUT_EXECUTABLES)/clang-tblgen$(BUILD_EXECUTABLE_SUFFIX)
+LLVM_TBLGEN := $(BUILD_OUT_EXECUTABLES)/llvm-tblgen$(BUILD_EXECUTABLE_SUFFIX)
+
+# RenderScript-specific tools
+# These are tied to the version of LLVM directly in external/, so they might
+# trail the host prebuilts being used for the rest of the build process.
+RS_LLVM_PREBUILTS_VERSION := clang-2690385
+RS_LLVM_PREBUILTS_BASE := prebuilts/clang/host
+RS_LLVM_PREBUILTS_PATH := $(RS_LLVM_PREBUILTS_BASE)/$(BUILD_OS)-x86/$(RS_LLVM_PREBUILTS_VERSION)/bin
+RS_CLANG := $(RS_LLVM_PREBUILTS_PATH)/clang$(BUILD_EXECUTABLE_SUFFIX)
+RS_LLVM_AS := $(RS_LLVM_PREBUILTS_PATH)/llvm-as$(BUILD_EXECUTABLE_SUFFIX)
+RS_LLVM_LINK := $(RS_LLVM_PREBUILTS_PATH)/llvm-link$(BUILD_EXECUTABLE_SUFFIX)
+
+# Clang flags for all host or target rules
+CLANG_CONFIG_EXTRA_ASFLAGS :=
+CLANG_CONFIG_EXTRA_CFLAGS :=
+CLANG_CONFIG_EXTRA_CONLYFLAGS := -std=gnu99
+CLANG_CONFIG_EXTRA_CPPFLAGS :=
+CLANG_CONFIG_EXTRA_LDFLAGS :=
+
+CLANG_CONFIG_EXTRA_CFLAGS += \
+ -D__compiler_offsetof=__builtin_offsetof
+
+# Help catch common 32/64-bit errors.
+CLANG_CONFIG_EXTRA_CFLAGS += \
+ -Werror=int-conversion
+
+# Disable overly aggressive warning for macros defined with a leading underscore
+# This used to happen in AndroidConfig.h, which was included everywhere.
+# TODO: can we remove this now?
+CLANG_CONFIG_EXTRA_CFLAGS += \
+ -Wno-reserved-id-macro
+
+# Disable overly aggressive warning for format strings.
+# Bug: 20148343
+CLANG_CONFIG_EXTRA_CFLAGS += \
+ -Wno-format-pedantic
+
+# Workaround for ccache with clang.
+# See http://petereisentraut.blogspot.com/2011/05/ccache-and-clang.html.
+CLANG_CONFIG_EXTRA_CFLAGS += \
+ -Wno-unused-command-line-argument
+
+# Disable -Winconsistent-missing-override until we can clean up the existing
+# codebase for it.
+CLANG_CONFIG_EXTRA_CPPFLAGS += \
+ -Wno-inconsistent-missing-override
+
+# Force clang to always output color diagnostics. Ninja will strip the ANSI
+# color codes if it is not running in a terminal.
+ifdef BUILDING_WITH_NINJA
+CLANG_CONFIG_EXTRA_CFLAGS += \
+ -fcolor-diagnostics
+endif
+
+CLANG_CONFIG_UNKNOWN_CFLAGS := \
+ -finline-functions \
+ -finline-limit=64 \
+ -fno-canonical-system-headers \
+ -Wno-clobbered \
+ -fno-devirtualize \
+ -fno-tree-sra \
+ -fprefetch-loop-arrays \
+ -funswitch-loops \
+ -Werror=unused-but-set-parameter \
+ -Werror=unused-but-set-variable \
+ -Wmaybe-uninitialized \
+ -Wno-error=clobbered \
+ -Wno-error=maybe-uninitialized \
+ -Wno-error=unused-but-set-parameter \
+ -Wno-error=unused-but-set-variable \
+ -Wno-free-nonheap-object \
+ -Wno-literal-suffix \
+ -Wno-maybe-uninitialized \
+ -Wno-old-style-declaration \
+ -Wno-psabi \
+ -Wno-unused-but-set-parameter \
+ -Wno-unused-but-set-variable \
+ -Wno-unused-local-typedefs \
+ -Wunused-but-set-parameter \
+ -Wunused-but-set-variable \
+ -fdiagnostics-color \
+ -fdebug-prefix-map=/proc/self/cwd=
+
+# Clang flags for all host rules
+CLANG_CONFIG_HOST_EXTRA_ASFLAGS :=
+CLANG_CONFIG_HOST_EXTRA_CFLAGS :=
+CLANG_CONFIG_HOST_EXTRA_CPPFLAGS :=
+CLANG_CONFIG_HOST_EXTRA_LDFLAGS :=
+
+# Clang flags for all host cross rules
+CLANG_CONFIG_HOST_CROSS_EXTRA_ASFLAGS :=
+CLANG_CONFIG_HOST_CROSS_EXTRA_CFLAGS :=
+CLANG_CONFIG_HOST_CROSS_EXTRA_CPPFLAGS :=
+CLANG_CONFIG_HOST_CROSS_EXTRA_LDFLAGS :=
+
+# Clang flags for all target rules
+CLANG_CONFIG_TARGET_EXTRA_ASFLAGS :=
+CLANG_CONFIG_TARGET_EXTRA_CFLAGS := -nostdlibinc
+CLANG_CONFIG_TARGET_EXTRA_CPPFLAGS := -nostdlibinc
+CLANG_CONFIG_TARGET_EXTRA_LDFLAGS :=
+
+CLANG_DEFAULT_UB_CHECKS := \
+ bool \
+ integer-divide-by-zero \
+ return \
+ returns-nonnull-attribute \
+ shift-exponent \
+ unreachable \
+ vla-bound \
+
+# TODO(danalbert): The following checks currently have compiler performance
+# issues.
+# CLANG_DEFAULT_UB_CHECKS += alignment
+# CLANG_DEFAULT_UB_CHECKS += bounds
+# CLANG_DEFAULT_UB_CHECKS += enum
+# CLANG_DEFAULT_UB_CHECKS += float-cast-overflow
+# CLANG_DEFAULT_UB_CHECKS += float-divide-by-zero
+# CLANG_DEFAULT_UB_CHECKS += nonnull-attribute
+# CLANG_DEFAULT_UB_CHECKS += null
+# CLANG_DEFAULT_UB_CHECKS += shift-base
+# CLANG_DEFAULT_UB_CHECKS += signed-integer-overflow
+
+# TODO(danalbert): Fix UB in libc++'s __tree so we can turn this on.
+# https://llvm.org/PR19302
+# http://reviews.llvm.org/D6974
+# CLANG_DEFAULT_UB_CHECKS += object-size
+
+# HOST config
+clang_2nd_arch_prefix :=
+include $(BUILD_SYSTEM)/clang/HOST_$(HOST_ARCH).mk
+
+# HOST_2ND_ARCH config
+ifdef HOST_2ND_ARCH
+clang_2nd_arch_prefix := $(HOST_2ND_ARCH_VAR_PREFIX)
+include $(BUILD_SYSTEM)/clang/HOST_$(HOST_2ND_ARCH).mk
+endif
+
+ifdef HOST_CROSS_ARCH
+clang_2nd_arch_prefix :=
+include $(BUILD_SYSTEM)/clang/HOST_CROSS_$(HOST_CROSS_ARCH).mk
+ifdef HOST_CROSS_2ND_ARCH
+clang_2nd_arch_prefix := $(HOST_CROSS_2ND_ARCH_VAR_PREFIX)
+include $(BUILD_SYSTEM)/clang/HOST_CROSS_$(HOST_CROSS_2ND_ARCH).mk
+endif
+endif
+
+# TARGET config
+clang_2nd_arch_prefix :=
+include $(BUILD_SYSTEM)/clang/TARGET_$(TARGET_ARCH).mk
+
+# TARGET_2ND_ARCH config
+ifdef TARGET_2ND_ARCH
+clang_2nd_arch_prefix := $(TARGET_2ND_ARCH_VAR_PREFIX)
+include $(BUILD_SYSTEM)/clang/TARGET_$(TARGET_2ND_ARCH).mk
+endif
+
+ADDRESS_SANITIZER_CONFIG_EXTRA_CFLAGS := -fno-omit-frame-pointer
+ADDRESS_SANITIZER_CONFIG_EXTRA_LDFLAGS := -Wl,-u,__asan_preinit
+
+ADDRESS_SANITIZER_CONFIG_EXTRA_SHARED_LIBRARIES :=
+ADDRESS_SANITIZER_CONFIG_EXTRA_STATIC_LIBRARIES := libasan
+
+# This allows us to use the superset of functionality that compiler-rt
+# provides to Clang (for supporting features like -ftrapv).
+COMPILER_RT_CONFIG_EXTRA_STATIC_LIBRARIES := libcompiler_rt-extras
+
+ifeq ($(HOST_PREFER_32_BIT),true)
+# We don't have 32-bit prebuilt libLLVM/libclang, so force to build them from source.
+FORCE_BUILD_LLVM_COMPONENTS := true
+endif
diff --git a/core/clang/mips.mk b/core/clang/mips.mk
new file mode 100644
index 0000000000000000000000000000000000000000..4a8f81224ffdd0eb28b95fe152f7e36395fa41ae
--- /dev/null
+++ b/core/clang/mips.mk
@@ -0,0 +1,26 @@
+# Clang flags for mips arch, target or host.
+
+CLANG_CONFIG_mips_EXTRA_ASFLAGS :=
+CLANG_CONFIG_mips_EXTRA_CFLAGS :=
+CLANG_CONFIG_mips_EXTRA_LDFLAGS :=
+
+# Include common unknown flags
+CLANG_CONFIG_mips_UNKNOWN_CFLAGS := \
+ $(CLANG_CONFIG_UNKNOWN_CFLAGS) \
+ -fno-strict-volatile-bitfields \
+ -fgcse-after-reload \
+ -frerun-cse-after-loop \
+ -frename-registers \
+ -msynci \
+ -mno-synci \
+ -mno-fused-madd
+
+# Temporary workaround for Mips clang++ problem, creates
+# relocated ptrs in read-only pic .gcc_exception_table;
+# permanent fix pending at http://reviews.llvm.org/D9669
+CLANG_CONFIG_mips_UNKNOWN_CFLAGS += -Wl,--warn-shared-textrel
+
+# We don't have any mips flags to substitute yet.
+define subst-clang-incompatible-mips-flags
+ $(1)
+endef
diff --git a/core/clang/mips64.mk b/core/clang/mips64.mk
new file mode 100644
index 0000000000000000000000000000000000000000..1b72e058fe54f915915f5b27d3fd4a422287293c
--- /dev/null
+++ b/core/clang/mips64.mk
@@ -0,0 +1,26 @@
+# Clang flags for mips64 arch, target or host.
+
+CLANG_CONFIG_mips64_EXTRA_ASFLAGS :=
+CLANG_CONFIG_mips64_EXTRA_CFLAGS :=
+CLANG_CONFIG_mips64_EXTRA_LDFLAGS :=
+
+# Include common unknown flags
+CLANG_CONFIG_mips64_UNKNOWN_CFLAGS := \
+ $(CLANG_CONFIG_UNKNOWN_CFLAGS) \
+ -fno-strict-volatile-bitfields \
+ -fgcse-after-reload \
+ -frerun-cse-after-loop \
+ -frename-registers \
+ -msynci \
+ -mno-synci \
+ -mno-fused-madd
+
+# Temporary workaround for Mips clang++ problem creating
+# relocated ptrs in read-only pic .gcc_exception_table;
+# permanent fix pending at http://reviews.llvm.org/D9669
+CLANG_CONFIG_mips64_UNKNOWN_CFLAGS += -Wl,--warn-shared-textrel
+
+# We don't have any mips64 flags to substitute yet.
+define subst-clang-incompatible-mips64-flags
+ $(1)
+endef
diff --git a/core/clang/versions.mk b/core/clang/versions.mk
new file mode 100644
index 0000000000000000000000000000000000000000..81bd3b873b1bc65c444bc91724fe52a572391ef7
--- /dev/null
+++ b/core/clang/versions.mk
@@ -0,0 +1,5 @@
+## Clang/LLVM release versions.
+
+LLVM_RELEASE_VERSION := 3.8
+LLVM_PREBUILTS_VERSION ?= clang-2690385
+LLVM_PREBUILTS_BASE ?= prebuilts/clang/host
diff --git a/core/clang/x86.mk b/core/clang/x86.mk
new file mode 100644
index 0000000000000000000000000000000000000000..69c3fb2af8198c82dd6ec9e79621780a1cde0257
--- /dev/null
+++ b/core/clang/x86.mk
@@ -0,0 +1,19 @@
+# Clang flags for x86 arch, target or host.
+
+CLANG_CONFIG_x86_EXTRA_ASFLAGS := \
+ -msse3
+CLANG_CONFIG_x86_EXTRA_CFLAGS :=
+CLANG_CONFIG_x86_EXTRA_LDFLAGS :=
+
+# Include common unknown flags
+CLANG_CONFIG_x86_UNKNOWN_CFLAGS := \
+ $(CLANG_CONFIG_UNKNOWN_CFLAGS) \
+ -finline-limit=300 \
+ -fno-inline-functions-called-once \
+ -mfpmath=sse \
+ -mbionic
+
+# We don't have any x86 flags to substitute yet.
+define subst-clang-incompatible-x86-flags
+ $(1)
+endef
diff --git a/core/clang/x86_64.mk b/core/clang/x86_64.mk
new file mode 100644
index 0000000000000000000000000000000000000000..cba10d47b7d0de0bf87c372df0e09bfd4bd48fb3
--- /dev/null
+++ b/core/clang/x86_64.mk
@@ -0,0 +1,18 @@
+# Clang flags for x86_64 arch, target or host.
+
+CLANG_CONFIG_x86_64_EXTRA_ASFLAGS :=
+CLANG_CONFIG_x86_64_EXTRA_CFLAGS :=
+CLANG_CONFIG_x86_64_EXTRA_LDFLAGS :=
+
+# Include common unknown flags
+CLANG_CONFIG_x86_64_UNKNOWN_CFLAGS := \
+ $(CLANG_CONFIG_UNKNOWN_CFLAGS) \
+ -finline-limit=300 \
+ -fno-inline-functions-called-once \
+ -mfpmath=sse \
+ -mbionic
+
+# We don't have any x86_64 flags to substitute yet.
+define subst-clang-incompatible-x86_64-flags
+ $(1)
+endef
diff --git a/core/cleanbuild.mk b/core/cleanbuild.mk
new file mode 100644
index 0000000000000000000000000000000000000000..e46d93415399c9962b56745d03a98dbd7a5d1896
--- /dev/null
+++ b/core/cleanbuild.mk
@@ -0,0 +1,332 @@
+# Copyright (C) 2007 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Don't bother with the cleanspecs if you are running mm/mmm
+ifeq ($(ONE_SHOT_MAKEFILE)$(dont_bother),)
+
+INTERNAL_CLEAN_STEPS :=
+
+# Builds up a list of clean steps. Creates a unique
+# id for each step by taking makefile path, INTERNAL_CLEAN_BUILD_VERSION
+# and appending an increasing number of '@' characters.
+#
+# $(1): shell command to run
+# $(2): indicate to not use makefile path as part of step id if not empty.
+# $(2) should only be used in build/core/cleanspec.mk: just for compatibility.
+define _add-clean-step
+ $(if $(strip $(INTERNAL_CLEAN_BUILD_VERSION)),, \
+ $(error INTERNAL_CLEAN_BUILD_VERSION not set))
+ $(eval _acs_makefile_prefix := $(lastword $(MAKEFILE_LIST)))
+ $(eval _acs_makefile_prefix := $(subst /,_,$(_acs_makefile_prefix)))
+ $(eval _acs_makefile_prefix := $(subst .,-,$(_acs_makefile_prefix)))
+ $(eval _acs_makefile_prefix := $(_acs_makefile_prefix)_acs)
+ $(if $($(_acs_makefile_prefix)),,\
+ $(eval $(_acs_makefile_prefix) := $(INTERNAL_CLEAN_BUILD_VERSION)))
+ $(eval $(_acs_makefile_prefix) := $($(_acs_makefile_prefix))@)
+ $(if $(strip $(2)),$(eval _acs_id := $($(_acs_makefile_prefix))),\
+ $(eval _acs_id := $(_acs_makefile_prefix)$($(_acs_makefile_prefix))))
+ $(eval INTERNAL_CLEAN_STEPS += $(_acs_id))
+ $(eval INTERNAL_CLEAN_STEP.$(_acs_id) := $(1))
+ $(eval _acs_id :=)
+ $(eval _acs_makefile_prefix :=)
+endef
+define add-clean-step
+$(eval # for build/core/cleanspec.mk, dont use makefile path as part of step id) \
+$(if $(filter %/cleanspec.mk,$(lastword $(MAKEFILE_LIST))),\
+ $(eval $(call _add-clean-step,$(1),true)),\
+ $(eval $(call _add-clean-step,$(1))))
+endef
+
+# Defines INTERNAL_CLEAN_BUILD_VERSION and the individual clean steps.
+# cleanspec.mk is outside of the core directory so that more people
+# can have permission to touch it.
+include $(BUILD_SYSTEM)/cleanspec.mk
+INTERNAL_CLEAN_BUILD_VERSION := $(strip $(INTERNAL_CLEAN_BUILD_VERSION))
+INTERNAL_CLEAN_STEPS := $(strip $(INTERNAL_CLEAN_STEPS))
+
+# If the clean_steps.mk file is missing (usually after a clean build)
+# then we won't do anything.
+CURRENT_CLEAN_BUILD_VERSION := $(INTERNAL_CLEAN_BUILD_VERSION)
+CURRENT_CLEAN_STEPS := $(INTERNAL_CLEAN_STEPS)
+
+# Read the current state from the file, if present.
+# Will set CURRENT_CLEAN_BUILD_VERSION and CURRENT_CLEAN_STEPS.
+#
+clean_steps_file := $(PRODUCT_OUT)/clean_steps.mk
+-include $(clean_steps_file)
+
+ifneq ($(CURRENT_CLEAN_BUILD_VERSION),$(INTERNAL_CLEAN_BUILD_VERSION))
+ # The major clean version is out-of-date. Do a full clean, and
+ # don't even bother with the clean steps.
+ $(info *** A clean build is required because of a recent change.)
+ $(shell rm -rf $(OUT_DIR))
+ $(info *** Done with the cleaning, now starting the real build.)
+else
+ # The major clean version is correct. Find the list of clean steps
+ # that we need to execute to get up-to-date.
+ steps := \
+ $(filter-out $(CURRENT_CLEAN_STEPS),$(INTERNAL_CLEAN_STEPS))
+ $(foreach step,$(steps), \
+ $(info Clean step: $(INTERNAL_CLEAN_STEP.$(step))) \
+ $(shell $(INTERNAL_CLEAN_STEP.$(step))) \
+ )
+
+ # Rewrite the clean step for the second arch.
+ ifdef TARGET_2ND_ARCH
+ # $(1): the clean step cmd
+ # $(2): the prefix to search for
+ # $(3): the prefix to replace with
+ define -cs-rewrite-cleanstep
+ $(if $(filter $(2)/%,$(1)),\
+ $(eval _crs_new_cmd := $(patsubst $(2)/%,$(3)/%,$(1)))\
+ $(info Clean step: $(_crs_new_cmd))\
+ $(shell $(_crs_new_cmd)))
+ endef
+ $(foreach step,$(steps), \
+ $(call -cs-rewrite-cleanstep,$(INTERNAL_CLEAN_STEP.$(step)),$(TARGET_OUT_INTERMEDIATES),$($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATES))\
+ $(call -cs-rewrite-cleanstep,$(INTERNAL_CLEAN_STEP.$(step)),$(TARGET_OUT_SHARED_LIBRARIES),$($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SHARED_LIBRARIES))\
+ $(call -cs-rewrite-cleanstep,$(INTERNAL_CLEAN_STEP.$(step)),$(TARGET_OUT_VENDOR_SHARED_LIBRARIES),$($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_SHARED_LIBRARIES))\
+ $(call -cs-rewrite-cleanstep,$(INTERNAL_CLEAN_STEP.$(step)),$($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATES),$(TARGET_OUT_INTERMEDIATES))\
+ $(call -cs-rewrite-cleanstep,$(INTERNAL_CLEAN_STEP.$(step)),$($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_SHARED_LIBRARIES),$(TARGET_OUT_SHARED_LIBRARIES))\
+ $(call -cs-rewrite-cleanstep,$(INTERNAL_CLEAN_STEP.$(step)),$($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_SHARED_LIBRARIES),$(TARGET_OUT_VENDOR_SHARED_LIBRARIES))\
+ )
+ endif
+ _crs_new_cmd :=
+ steps :=
+endif
+
+# Write the new state to the file.
+#
+rewrite_clean_steps_file :=
+ifneq ($(CURRENT_CLEAN_BUILD_VERSION)-$(CURRENT_CLEAN_STEPS),$(INTERNAL_CLEAN_BUILD_VERSION)-$(INTERNAL_CLEAN_STEPS))
+rewrite_clean_steps_file := true
+endif
+ifeq ($(wildcard $(clean_steps_file)),)
+# This is the first build.
+rewrite_clean_steps_file := true
+endif
+ifeq ($(rewrite_clean_steps_file),true)
+$(shell \
+ mkdir -p $(dir $(clean_steps_file)) && \
+ echo "CURRENT_CLEAN_BUILD_VERSION := $(INTERNAL_CLEAN_BUILD_VERSION)" > \
+ $(clean_steps_file) ;\
+ echo "CURRENT_CLEAN_STEPS := $(wordlist 1,500,$(INTERNAL_CLEAN_STEPS))" >> $(clean_steps_file) \
+ )
+define -cs-write-clean-steps-if-arg1-not-empty
+$(if $(1),$(shell echo "CURRENT_CLEAN_STEPS += $(1)" >> $(clean_steps_file)))
+endef
+$(call -cs-write-clean-steps-if-arg1-not-empty,$(wordlist 501,1000,$(INTERNAL_CLEAN_STEPS)))
+$(call -cs-write-clean-steps-if-arg1-not-empty,$(wordlist 1001,1500,$(INTERNAL_CLEAN_STEPS)))
+$(call -cs-write-clean-steps-if-arg1-not-empty,$(wordlist 1501,2000,$(INTERNAL_CLEAN_STEPS)))
+$(call -cs-write-clean-steps-if-arg1-not-empty,$(wordlist 2001,2500,$(INTERNAL_CLEAN_STEPS)))
+$(call -cs-write-clean-steps-if-arg1-not-empty,$(wordlist 2501,3000,$(INTERNAL_CLEAN_STEPS)))
+$(call -cs-write-clean-steps-if-arg1-not-empty,$(wordlist 3001,99999,$(INTERNAL_CLEAN_STEPS)))
+endif
+
+CURRENT_CLEAN_BUILD_VERSION :=
+CURRENT_CLEAN_STEPS :=
+clean_steps_file :=
+rewrite_clean_steps_file :=
+INTERNAL_CLEAN_STEPS :=
+INTERNAL_CLEAN_BUILD_VERSION :=
+
+endif # if not ONE_SHOT_MAKEFILE dont_bother
+
+# Since products and build variants (unfortunately) share the same
+# PRODUCT_OUT staging directory, things can get out of sync if different
+# build configurations are built in the same tree. The following logic
+# will notice when the configuration has changed and remove the files
+# necessary to keep things consistent.
+
+previous_build_config_file := $(PRODUCT_OUT)/previous_build_config.mk
+
+# A change in the list of aapt configs warrants an installclean, too.
+aapt_config_list := $(strip $(PRODUCT_AAPT_CONFIG) $(PRODUCT_AAPT_PREF_CONFIG))
+
+current_build_config := \
+ $(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT)-{$(aapt_config_list)}
+current_sanitize_target := $(strip $(SANITIZE_TARGET))
+ifeq (,$(current_sanitize_target))
+ current_sanitize_target := false
+endif
+aapt_config_list :=
+force_installclean := false
+force_objclean := false
+
+# Read the current state from the file, if present.
+# Will set PREVIOUS_BUILD_CONFIG.
+#
+PREVIOUS_BUILD_CONFIG :=
+PREVIOUS_SANITIZE_TARGET :=
+-include $(previous_build_config_file)
+PREVIOUS_BUILD_CONFIG := $(strip $(PREVIOUS_BUILD_CONFIG))
+PREVIOUS_SANITIZE_TARGET := $(strip $(PREVIOUS_SANITIZE_TARGET))
+
+ifdef PREVIOUS_BUILD_CONFIG
+ ifneq "$(current_build_config)" "$(PREVIOUS_BUILD_CONFIG)"
+ $(info *** Build configuration changed: "$(PREVIOUS_BUILD_CONFIG)" -> "$(current_build_config)")
+ ifneq ($(DISABLE_AUTO_INSTALLCLEAN),true)
+ force_installclean := true
+ else
+ $(info DISABLE_AUTO_INSTALLCLEAN is set; skipping auto-clean. Your tree may be in an inconsistent state.)
+ endif
+ endif
+endif # else, this is the first build, so no need to clean.
+
+ifdef PREVIOUS_SANITIZE_TARGET
+ ifneq "$(current_sanitize_target)" "$(PREVIOUS_SANITIZE_TARGET)"
+ $(info *** SANITIZE_TARGET changed: "$(PREVIOUS_SANITIZE_TARGET)" -> "$(current_sanitize_target)")
+ force_objclean := true
+ endif
+endif # else, this is the first build, so no need to clean.
+
+# Write the new state to the file.
+#
+ifneq ($(PREVIOUS_BUILD_CONFIG)-$(PREVIOUS_SANITIZE_TARGET),$(current_build_config)-$(current_sanitize_target))
+$(shell \
+ mkdir -p $(dir $(previous_build_config_file)) && \
+ echo "PREVIOUS_BUILD_CONFIG := $(current_build_config)" > \
+ $(previous_build_config_file) && \
+ echo "PREVIOUS_SANITIZE_TARGET := $(current_sanitize_target)" >> \
+ $(previous_build_config_file) \
+ )
+endif
+PREVIOUS_BUILD_CONFIG :=
+PREVIOUS_SANITIZE_TARGET :=
+previous_build_config_file :=
+current_build_config :=
+
+#
+# installclean logic
+#
+
+# The files/dirs to delete during an installclean. This includes the
+# non-common APPS directory, which may contain the wrong resources.
+#
+# Deletes all of the files that change between different build types,
+# like "make user" vs. "make sdk". This lets you work with different
+# build types without having to do a full clean each time. E.g.:
+#
+# $ make -j8 all
+# $ make installclean
+# $ make -j8 user
+# $ make installclean
+# $ make -j8 sdk
+#
+installclean_files := \
+ $(HOST_OUT)/obj/NOTICE_FILES \
+ $(HOST_OUT)/sdk \
+ $(PRODUCT_OUT)/*.img \
+ $(PRODUCT_OUT)/*.ini \
+ $(PRODUCT_OUT)/*.txt \
+ $(PRODUCT_OUT)/*.xlb \
+ $(PRODUCT_OUT)/*.zip \
+ $(PRODUCT_OUT)/kernel \
+ $(PRODUCT_OUT)/data \
+ $(PRODUCT_OUT)/skin \
+ $(PRODUCT_OUT)/obj/APPS \
+ $(PRODUCT_OUT)/obj/NOTICE_FILES \
+ $(PRODUCT_OUT)/obj/PACKAGING \
+ $(PRODUCT_OUT)/recovery \
+ $(PRODUCT_OUT)/root \
+ $(PRODUCT_OUT)/system \
+ $(PRODUCT_OUT)/system_other \
+ $(PRODUCT_OUT)/vendor \
+ $(PRODUCT_OUT)/oem \
+ $(PRODUCT_OUT)/dex_bootjars \
+ $(PRODUCT_OUT)/obj/JAVA_LIBRARIES \
+ $(PRODUCT_OUT)/obj/FAKE \
+ $(PRODUCT_OUT)/obj/EXECUTABLES/adbd_intermediates \
+ $(PRODUCT_OUT)/obj/EXECUTABLES/logd_intermediates \
+ $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libfs_mgr_intermediates \
+ $(PRODUCT_OUT)/obj/EXECUTABLES/init_intermediates \
+ $(PRODUCT_OUT)/obj/ETC/mac_permissions.xml_intermediates \
+ $(PRODUCT_OUT)/obj/ETC/sepolicy_intermediates \
+ $(PRODUCT_OUT)/obj/ETC/sepolicy.recovery_intermediates \
+ $(PRODUCT_OUT)/obj/ETC/init.environ.rc_intermediates
+
+# The files/dirs to delete during a dataclean, which removes any files
+# in the staging and emulator data partitions.
+dataclean_files := \
+ $(PRODUCT_OUT)/data/* \
+ $(PRODUCT_OUT)/data-qemu/* \
+ $(PRODUCT_OUT)/userdata-qemu.img
+
+# The files/dirs to delete during an objclean, which removes any files
+# in the staging and emulator data partitions.
+objclean_files := \
+ $(TARGET_OUT_INTERMEDIATES) \
+ $($(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_INTERMEDIATES)
+
+# make sure *_OUT is set so that we won't result in deleting random parts
+# of the filesystem.
+ifneq (2,$(words $(HOST_OUT) $(PRODUCT_OUT)))
+ $(error both HOST_OUT and PRODUCT_OUT should be set at this point.)
+endif
+
+# Define the rules for commandline invocation.
+.PHONY: dataclean
+dataclean: FILES := $(dataclean_files)
+dataclean:
+ $(hide) rm -rf $(FILES)
+ @echo "Deleted emulator userdata images."
+
+.PHONY: installclean
+installclean: FILES := $(installclean_files)
+installclean: dataclean
+ $(hide) rm -rf $(FILES)
+ @echo "Deleted images and staging directories."
+
+.PHONY: objclean
+objclean: FILES := $(objclean_files)
+objclean:
+ $(hide) rm -rf $(FILES)
+ @echo "Deleted images and staging directories."
+
+ifeq "$(force_installclean)" "true"
+ $(info *** Forcing "make installclean"...)
+ $(info *** rm -rf $(dataclean_files) $(installclean_files))
+ $(shell rm -rf $(dataclean_files) $(installclean_files))
+ $(info *** Done with the cleaning, now starting the real build.)
+endif
+force_installclean :=
+
+ifeq "$(force_objclean)" "true"
+ $(info *** Forcing cleanup of intermediate files...)
+ $(info *** rm -rf $(objclean_files))
+ $(shell rm -rf $(objclean_files))
+ $(info *** Done with the cleaning, now starting the real build.)
+endif
+force_objclean :=
+
+###########################################################
+
+.PHONY: clean-jack-files
+clean-jack-files: clean-dex-files
+ $(hide) find $(OUT_DIR) -name "*.jack" | xargs rm -f
+ $(hide) find $(OUT_DIR) -type d -name "jack" | xargs rm -rf
+ @echo "All jack files have been removed."
+
+.PHONY: clean-dex-files
+clean-dex-files:
+ $(hide) find $(OUT_DIR) -name "*.dex" ! -path "*/jack-incremental/*" | xargs rm -f
+ $(hide) for i in `find $(OUT_DIR) -name "*.jar" -o -name "*.apk"` ; do ((unzip -l $$i 2> /dev/null | \
+ grep -q "\.dex$$" && rm -f $$i) || continue ) ; done
+ @echo "All dex files and archives containing dex files have been removed."
+
+.PHONY: clean-jack-incremental
+clean-jack-incremental:
+ $(hide) find $(OUT_DIR) -name "jack-incremental" -type d | xargs rm -rf
+ @echo "All jack incremental dirs have been removed."
diff --git a/core/cleanspec.mk b/core/cleanspec.mk
new file mode 100644
index 0000000000000000000000000000000000000000..4441e2ac3f6e0e6458a920113b2bc72071f9f96b
--- /dev/null
+++ b/core/cleanspec.mk
@@ -0,0 +1,69 @@
+# Copyright (C) 2007 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Just bump this if you want to force a clean build.
+# **********************************************************************
+# WHEN DOING SO
+# 1. DELETE ANY "add-clean-step" ENTRIES THAT HAVE PILED UP IN THIS FILE.
+# 2. REMOVE ALL FILES NAMED CleanSpec.mk.
+# 3. BUMP THE VERSION.
+# IDEALLY, THOSE STEPS SHOULD BE DONE ATOMICALLY.
+# **********************************************************************
+#
+INTERNAL_CLEAN_BUILD_VERSION := 6
+#
+# ***********************************************************************
+# Do not touch INTERNAL_CLEAN_BUILD_VERSION if you've added a clean step!
+# ***********************************************************************
+
+# If you don't need to do a full clean build but would like to touch
+# a file or delete some intermediate files, add a clean step to the end
+# of the list. These steps will only be run once, if they haven't been
+# run before.
+#
+# E.g.:
+# $(call add-clean-step, touch -c external/sqlite/sqlite3.h)
+# $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libz_intermediates)
+#
+# Always use "touch -c" and "rm -f" or "rm -rf" to gracefully deal with
+# files that are missing or have been moved.
+#
+# Use $(PRODUCT_OUT) to get to the "out/target/product/blah/" directory.
+# Use $(OUT_DIR) to refer to the "out" directory.
+#
+# If you need to re-do something that's already mentioned, just copy
+# the command and add it to the bottom of the list. E.g., if a change
+# that you made last week required touching a file and a change you
+# made today requires touching the same file, just copy the old
+# touch step and add it to the end of the list.
+#
+# ************************************************
+# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
+# ************************************************
+
+# For example:
+#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/AndroidTests_intermediates)
+#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/core_intermediates)
+#$(call add-clean-step, find $(OUT_DIR) -type f -name "IGTalkSession*" -print0 | xargs -0 rm -f)
+#$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/*)
+
+# ************************************************
+# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
+# ************************************************
+
+subdir_cleanspecs := \
+ $(shell build/tools/findleaves.py $(FIND_LEAVES_EXCLUDES) . CleanSpec.mk)
+include $(subdir_cleanspecs)
+subdir_cleanspecs :=
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
new file mode 100644
index 0000000000000000000000000000000000000000..5886610588895ef30ce87ed4f87b418fa4de4909
--- /dev/null
+++ b/core/clear_vars.mk
@@ -0,0 +1,371 @@
+###########################################################
+## Clear out values of all variables used by rule templates.
+###########################################################
+
+LOCAL_MODULE:=
+LOCAL_MODULE_PATH:=
+LOCAL_MODULE_RELATIVE_PATH :=
+LOCAL_MODULE_STEM:=
+LOCAL_DONT_CHECK_MODULE:=
+LOCAL_CHECKED_MODULE:=
+LOCAL_BUILT_MODULE:=
+LOCAL_BUILT_MODULE_STEM:=
+OVERRIDE_BUILT_MODULE_PATH:=
+LOCAL_INSTALLED_MODULE:=
+LOCAL_INSTALLED_MODULE_STEM:=
+LOCAL_PICKUP_FILES:=
+LOCAL_UNINSTALLABLE_MODULE:=
+LOCAL_INTERMEDIATE_TARGETS:=
+LOCAL_UNSTRIPPED_PATH:=
+LOCAL_MODULE_CLASS:=
+LOCAL_MODULE_SUFFIX:=
+LOCAL_PACKAGE_NAME:=
+LOCAL_OVERRIDES_PACKAGES:=
+LOCAL_EXPORT_PACKAGE_RESOURCES:=
+LOCAL_MANIFEST_PACKAGE_NAME:=
+LOCAL_PACKAGE_SPLITS:=
+LOCAL_REQUIRED_MODULES:=
+LOCAL_ACP_UNAVAILABLE:=
+LOCAL_MODULE_TAGS:=
+LOCAL_SRC_FILES:=
+LOCAL_SRC_FILES_EXCLUDE:=
+LOCAL_PREBUILT_OBJ_FILES:=
+LOCAL_STATIC_JAVA_LIBRARIES:=
+LOCAL_STATIC_JAVA_AAR_LIBRARIES:=
+LOCAL_STATIC_LIBRARIES:=
+# Group static libraries with "-Wl,--start-group" and "-Wl,--end-group" when linking.
+LOCAL_GROUP_STATIC_LIBRARIES:=
+LOCAL_WHOLE_STATIC_LIBRARIES:=
+LOCAL_SHARED_LIBRARIES:=
+LOCAL_IS_HOST_MODULE:=
+LOCAL_CC:=
+LOCAL_CXX:=
+LOCAL_CPP_EXTENSION:=
+LOCAL_NO_DEFAULT_COMPILER_FLAGS:=
+LOCAL_FDO_SUPPORT:=
+LOCAL_ARM_MODE:=
+LOCAL_YACCFLAGS:=
+LOCAL_ASFLAGS:=
+LOCAL_CFLAGS:=
+LOCAL_CPPFLAGS:=
+LOCAL_CLANG_ASFLAGS:=
+LOCAL_CLANG_CFLAGS:=
+LOCAL_CLANG_CONLYFLAGS:=
+LOCAL_CLANG_CPPFLAGS:=
+LOCAL_CONLYFLAGS:=
+LOCAL_RTTI_FLAG:=
+LOCAL_C_INCLUDES:=
+LOCAL_EXPORT_C_INCLUDE_DIRS:=
+LOCAL_LDFLAGS:=
+LOCAL_CLANG_LDFLAGS:=
+LOCAL_LDLIBS:=
+LOCAL_AAPT_FLAGS:=
+LOCAL_AAPT_INCLUDE_ALL_RESOURCES:=
+LOCAL_SYSTEM_SHARED_LIBRARIES:=none
+LOCAL_PREBUILT_LIBS:=
+LOCAL_PREBUILT_EXECUTABLES:=
+LOCAL_PREBUILT_JAVA_LIBRARIES:=
+LOCAL_PREBUILT_STATIC_JAVA_LIBRARIES:=
+LOCAL_PREBUILT_STRIP_COMMENTS:=
+LOCAL_INTERMEDIATE_SOURCES:=
+LOCAL_INTERMEDIATE_SOURCE_DIR:=
+LOCAL_JAVACFLAGS:=
+LOCAL_JAVA_LIBRARIES:=
+LOCAL_JAVA_LAYERS_FILE:=
+LOCAL_NO_STANDARD_LIBRARIES:=
+LOCAL_CLASSPATH:=
+LOCAL_JACK_CLASSPATH:=
+LOCAL_DROIDDOC_USE_STANDARD_DOCLET:=
+LOCAL_DROIDDOC_SOURCE_PATH:=
+LOCAL_DROIDDOC_TEMPLATE_DIR:=
+LOCAL_DROIDDOC_CUSTOM_TEMPLATE_DIR:=
+LOCAL_DROIDDOC_ASSET_DIR:=
+LOCAL_DROIDDOC_CUSTOM_ASSET_DIR:=
+LOCAL_DROIDDOC_OPTIONS:=
+LOCAL_DROIDDOC_HTML_DIR:=
+LOCAL_DROIDDOC_STUB_OUT_DIR:=
+LOCAL_ADDITIONAL_HTML_DIR:=
+LOCAL_ASSET_DIR:=
+LOCAL_RESOURCE_DIR:=
+LOCAL_JAVA_RESOURCE_DIRS:=
+LOCAL_JAVA_RESOURCE_FILES:=
+LOCAL_GENERATED_SOURCES:=
+LOCAL_COPY_HEADERS_TO:=
+LOCAL_COPY_HEADERS:=
+LOCAL_FORCE_STATIC_EXECUTABLE:=
+LOCAL_ADDITIONAL_DEPENDENCIES:=
+LOCAL_STRIP_MODULE:=
+LOCAL_PACK_MODULE_RELOCATIONS:=
+LOCAL_JNI_SHARED_LIBRARIES:=
+LOCAL_JNI_SHARED_LIBRARIES_ABI:=
+LOCAL_PREBUILT_JNI_LIBS:=
+LOCAL_JAR_MANIFEST:=
+LOCAL_INSTRUMENTATION_FOR:=
+LOCAL_APK_LIBRARIES:=
+LOCAL_RES_LIBRARIES:=
+LOCAL_MANIFEST_INSTRUMENTATION_FOR:=
+LOCAL_AIDL_INCLUDES:=
+LOCAL_VTS_INCLUDES:=
+LOCAL_JARJAR_RULES:=
+LOCAL_ADDITIONAL_JAVA_DIR:=
+LOCAL_ALLOW_UNDEFINED_SYMBOLS:=
+LOCAL_DX_FLAGS:=
+LOCAL_JACK_ENABLED:=$(DEFAULT_JACK_ENABLED) # '' (ie disabled), disabled, full, incremental
+LOCAL_JACK_FLAGS:=
+LOCAL_JACK_COVERAGE_INCLUDE_FILTER:=
+LOCAL_JACK_COVERAGE_EXCLUDE_FILTER:=
+LOCAL_JILL_FLAGS:=
+LOCAL_CERTIFICATE:=
+LOCAL_SDK_VERSION:=
+LOCAL_MIN_SDK_VERSION:=
+LOCAL_SDK_RES_VERSION:=
+LOCAL_NDK_STL_VARIANT:=
+LOCAL_EMMA_INSTRUMENT:=
+LOCAL_PROGUARD_ENABLED:= # '',full,custom,nosystem,disabled,obfuscation,optimization
+LOCAL_PROGUARD_FLAGS:=
+LOCAL_JACK_PROGUARD_FLAGS:=
+LOCAL_PROGUARD_FLAG_FILES:=
+LOCAL_TEST_MODULE_TO_PROGUARD_WITH:=
+LOCAL_EMMA_COVERAGE_FILTER:=
+LOCAL_WARNINGS_ENABLE:=
+LOCAL_FULL_MANIFEST_FILE:=
+LOCAL_MANIFEST_FILE:=
+LOCAL_FULL_LIBS_MANIFEST_FILES:=
+LOCAL_RENDERSCRIPT_INCLUDES:=
+LOCAL_RENDERSCRIPT_INCLUDES_OVERRIDE:=
+LOCAL_RENDERSCRIPT_CC:=
+LOCAL_RENDERSCRIPT_COMPATIBILITY:=
+LOCAL_RENDERSCRIPT_FLAGS:=
+LOCAL_RENDERSCRIPT_TARGET_API:=
+LOCAL_DEX_PREOPT:= # '',true,false,nostripping
+LOCAL_DEX_PREOPT_IMAGE_LOCATION:=
+LOCAL_DEX_PREOPT_FLAGS:=
+LOCAL_PROTOC_OPTIMIZE_TYPE:= # lite(default),micro,nano,full,nanopb-c,nanopb-c-enable_malloc
+LOCAL_PROTOC_FLAGS:=
+LOCAL_PROTO_JAVA_OUTPUT_PARAMS:=
+LOCAL_VTSC_FLAGS:=
+LOCAL_NO_CRT:=
+LOCAL_NO_LIBGCC:=
+LOCAL_PROPRIETARY_MODULE:=
+LOCAL_OEM_MODULE:=
+LOCAL_ODM_MODULE:=
+LOCAL_PRIVILEGED_MODULE:=
+LOCAL_MODULE_OWNER:=
+LOCAL_COMPATIBILITY_SUITE:=
+LOCAL_COMPATIBILITY_SUPPORT_FILES:=
+LOCAL_CTS_TEST_PACKAGE:=
+LOCAL_CTS_TEST_RUNNER:=
+LOCAL_CLANG:=
+LOCAL_JAR_EXCLUDE_FILES:=
+LOCAL_JAR_PACKAGES:=
+LOCAL_JAR_EXCLUDE_PACKAGES:=
+LOCAL_SOURCE_FILES_ALL_GENERATED:= # '',true
+# Don't delete the META_INF dir when merging static Java libraries.
+LOCAL_DONT_DELETE_JAR_META_INF:=
+LOCAL_DONT_DELETE_JAR_DIRS:=
+LOCAL_ADDITIONAL_CERTIFICATES:=
+LOCAL_PREBUILT_MODULE_FILE:=
+LOCAL_POST_LINK_CMD:=
+LOCAL_POST_INSTALL_CMD:=
+LOCAL_HAL_STATIC_LIBRARIES:=
+LOCAL_RMTYPEDEFS:=
+LOCAL_NO_SYNTAX_CHECK:=
+LOCAL_NO_STATIC_ANALYZER:=
+LOCAL_32_BIT_ONLY:= # '',true
+LOCAL_MULTILIB:=
+LOCAL_MODULE_TARGET_ARCH:=
+LOCAL_MODULE_TARGET_ARCH_WARN:=
+LOCAL_MODULE_UNSUPPORTED_TARGET_ARCH:=
+LOCAL_MODULE_UNSUPPORTED_TARGET_ARCH_WARN:=
+LOCAL_MODULE_HOST_ARCH:=
+LOCAL_MODULE_HOST_ARCH_WARN:=
+LOCAL_MODULE_UNSUPPORTED_HOST_ARCH:=
+LOCAL_MODULE_UNSUPPORTED_HOST_ARCH_WARN:=
+LOCAL_MODULE_HOST_CROSS_ARCH:=
+LOCAL_MODULE_HOST_CROSS_ARCH_WARN:=
+LOCAL_MODULE_UNSUPPORTED_HOST_CROSS_ARCH:=
+LOCAL_MODULE_UNSUPPORTED_HOST_CROSS_ARCH_WARN:=
+LOCAL_NO_FPIE :=
+LOCAL_CXX_STL := default
+LOCAL_NATIVE_COVERAGE :=
+LOCAL_DPI_VARIANTS:=
+LOCAL_DPI_FILE_STEM:=
+LOCAL_SANITIZE:=
+LOCAL_SANITIZE_RECOVER:=
+LOCAL_DATA_BINDING:=
+LOCAL_DBUS_PROXY_PREFIX:=
+LOCAL_INIT_RC:=
+LOCAL_MODULE_HOST_OS:=
+LOCAL_FINDBUGS_FLAGS:=
+LOCAL_NOTICE_FILE:=
+LOCAL_USE_AAPT2:=$(USE_AAPT2)
+LOCAL_STATIC_ANDROID_LIBRARIES:=
+LOCAL_SHARED_ANDROID_LIBRARIES:=
+# Used to replace the installed file of a presigned prebuilt apk in PDK fusion build,
+# to avoid installing the presigned apks with classes.dex unstripped.
+LOCAL_REPLACE_PREBUILT_APK_INSTALLED:=
+LOCAL_EXTRACT_APK:=
+LOCAL_EXTRACT_DPI_APK:=
+
+# arch specific variables
+LOCAL_SRC_FILES_$(TARGET_ARCH):=
+LOCAL_SRC_FILES_EXCLUDE_$(TARGET_ARCH):=
+LOCAL_CFLAGS_$(TARGET_ARCH):=
+LOCAL_CLANG_CFLAGS_$(TARGET_ARCH):=
+LOCAL_CPPFLAGS_$(TARGET_ARCH):=
+LOCAL_CLANG_CPPFLAGS_$(TARGET_ARCH):=
+LOCAL_C_INCLUDES_$(TARGET_ARCH):=
+LOCAL_ASFLAGS_$(TARGET_ARCH):=
+LOCAL_CLANG_ASFLAGS_$(TARGET_ARCH):=
+LOCAL_LDFLAGS_$(TARGET_ARCH):=
+LOCAL_CLANG_LDFLAGS_$(TARGET_ARCH):=
+LOCAL_SHARED_LIBRARIES_$(TARGET_ARCH):=
+LOCAL_STATIC_LIBRARIES_$(TARGET_ARCH):=
+LOCAL_WHOLE_STATIC_LIBRARIES_$(TARGET_ARCH):=
+LOCAL_GENERATED_SOURCES_$(TARGET_ARCH):=
+LOCAL_REQUIRED_MODULES_$(TARGET_ARCH):=
+LOCAL_CLANG_$(TARGET_ARCH):=
+LOCAL_PREBUILT_JNI_LIBS_$(TARGET_ARCH):=
+LOCAL_STRIP_MODULE_$(TARGET_ARCH):=
+LOCAL_PACK_MODULE_RELOCATIONS_$(TARGET_ARCH):=
+ifdef TARGET_2ND_ARCH
+LOCAL_SRC_FILES_$(TARGET_2ND_ARCH):=
+LOCAL_SRC_FILES_EXCLUDE_$(TARGET_2ND_ARCH):=
+LOCAL_CFLAGS_$(TARGET_2ND_ARCH):=
+LOCAL_CLANG_CFLAGS_$(TARGET_2ND_ARCH):=
+LOCAL_CPPFLAGS_$(TARGET_2ND_ARCH):=
+LOCAL_CLANG_CPPFLAGS_$(TARGET_2ND_ARCH):=
+LOCAL_C_INCLUDES_$(TARGET_2ND_ARCH):=
+LOCAL_ASFLAGS_$(TARGET_2ND_ARCH):=
+LOCAL_CLANG_ASFLAGS_$(TARGET_2ND_ARCH):=
+LOCAL_LDFLAGS_$(TARGET_2ND_ARCH):=
+LOCAL_CLANG_LDFLAGS_$(TARGET_2ND_ARCH):=
+LOCAL_SHARED_LIBRARIES_$(TARGET_2ND_ARCH):=
+LOCAL_STATIC_LIBRARIES_$(TARGET_2ND_ARCH):=
+LOCAL_WHOLE_STATIC_LIBRARIES_$(TARGET_2ND_ARCH):=
+LOCAL_GENERATED_SOURCES_$(TARGET_2ND_ARCH):=
+LOCAL_REQUIRED_MODULES_$(TARGET_2ND_ARCH):=
+LOCAL_CLANG_$(TARGET_2ND_ARCH):=
+LOCAL_PREBUILT_JNI_LIBS_$(TARGET_2ND_ARCH):=
+LOCAL_STRIP_MODULE_$(TARGET_2ND_ARCH):=
+LOCAL_PACK_MODULE_RELOCATIONS_$(TARGET_2ND_ARCH):=
+endif
+LOCAL_SRC_FILES_$(HOST_ARCH):=
+LOCAL_SRC_FILES_EXCLUDE_$(HOST_ARCH):=
+LOCAL_CFLAGS_$(HOST_ARCH):=
+LOCAL_CLANG_CFLAGS_$(HOST_ARCH):=
+LOCAL_CPPFLAGS_$(HOST_ARCH):=
+LOCAL_CLANG_CPPFLAGS_$(HOST_ARCH):=
+LOCAL_C_INCLUDES_$(HOST_ARCH):=
+LOCAL_ASFLAGS_$(HOST_ARCH):=
+LOCAL_CLANG_ASFLAGS_$(HOST_ARCH):=
+LOCAL_LDFLAGS_$(HOST_ARCH):=
+LOCAL_CLANG_LDFLAGS_$(HOST_ARCH):=
+LOCAL_SHARED_LIBRARIES_$(HOST_ARCH):=
+LOCAL_STATIC_LIBRARIES_$(HOST_ARCH):=
+LOCAL_WHOLE_STATIC_LIBRARIES_$(HOST_ARCH):=
+LOCAL_GENERATED_SOURCES_$(HOST_ARCH):=
+LOCAL_REQUIRED_MODULES_$(HOST_ARCH):=
+LOCAL_CLANG_$(HOST_ARCH):=
+ifdef HOST_2ND_ARCH
+LOCAL_SRC_FILES_$(HOST_2ND_ARCH):=
+LOCAL_SRC_FILES_EXCLUDE_$(HOST_2ND_ARCH):=
+LOCAL_CFLAGS_$(HOST_2ND_ARCH):=
+LOCAL_CLANG_CFLAGS_$(HOST_2ND_ARCH):=
+LOCAL_CPPFLAGS_$(HOST_2ND_ARCH):=
+LOCAL_CLANG_CPPFLAGS_$(HOST_2ND_ARCH):=
+LOCAL_C_INCLUDES_$(HOST_2ND_ARCH):=
+LOCAL_ASFLAGS_$(HOST_2ND_ARCH):=
+LOCAL_CLANG_ASFLAGS_$(HOST_2ND_ARCH):=
+LOCAL_LDFLAGS_$(HOST_2ND_ARCH):=
+LOCAL_CLANG_LDFLAGS_$(HOST_2ND_ARCH):=
+LOCAL_SHARED_LIBRARIES_$(HOST_2ND_ARCH):=
+LOCAL_STATIC_LIBRARIES_$(HOST_2ND_ARCH):=
+LOCAL_WHOLE_STATIC_LIBRARIES_$(HOST_2ND_ARCH):=
+LOCAL_GENERATED_SOURCES_$(HOST_2ND_ARCH):=
+LOCAL_REQUIRED_MODULES_$(HOST_2ND_ARCH):=
+LOCAL_CLANG_$(HOST_2ND_ARCH):=
+endif
+
+LOCAL_SRC_FILES_$(HOST_OS):=
+LOCAL_STATIC_LIBRARIES_$(HOST_OS):=
+LOCAL_SHARED_LIBRARIES_$(HOST_OS):=
+LOCAL_CFLAGS_$(HOST_OS):=
+LOCAL_CPPFLAGS_$(HOST_OS):=
+LOCAL_LDFLAGS_$(HOST_OS):=
+LOCAL_LDLIBS_$(HOST_OS):=
+LOCAL_ASFLAGS_$(HOST_OS):=
+LOCAL_C_INCLUDES_$(HOST_OS):=
+LOCAL_GENERATED_SOURCES_$(HOST_OS):=
+LOCAL_REQUIRED_MODULES_$(HOST_OS):=
+
+ifdef HOST_CROSS_OS
+LOCAL_SRC_FILES_$(HOST_CROSS_OS):=
+LOCAL_STATIC_LIBRARIES_$(HOST_CROSS_OS):=
+LOCAL_SHARED_LIBRARIES_$(HOST_CROSS_OS):=
+LOCAL_CFLAGS_$(HOST_CROSS_OS):=
+LOCAL_CPPFLAGS_$(HOST_CROSS_OS):=
+LOCAL_LDFLAGS_$(HOST_CROSS_OS):=
+LOCAL_LDLIBS_$(HOST_CROSS_OS):=
+LOCAL_ASFLAGS_$(HOST_CROSS_OS):=
+LOCAL_C_INCLUDES_$(HOST_CROSS_OS):=
+LOCAL_GENERATED_SOURCES_$(HOST_CROSS_OS):=
+LOCAL_REQUIRED_MODULES_$(HOST_CROSS_OS):=
+endif
+
+LOCAL_SRC_FILES_$(HOST_OS)_$(HOST_ARCH):=
+ifdef HOST_2ND_ARCH
+LOCAL_SRC_FILES_$(HOST_OS)_$(HOST_2ND_ARCH):=
+endif
+ifdef HOST_CROSS_OS
+LOCAL_SRC_FILES_$(HOST_CROSS_OS)_$(HOST_CROSS_ARCH):=
+ifdef HOST_CROSS_2ND_ARCH
+LOCAL_SRC_FILES_$(HOST_CROSS_OS)_$(HOST_CROSS_2ND_ARCH):=
+endif
+endif
+
+LOCAL_SRC_FILES_32:=
+LOCAL_SRC_FILES_64:=
+LOCAL_SRC_FILES_EXCLUDE_32:=
+LOCAL_SRC_FILES_EXCLUDE_64:=
+LOCAL_SHARED_LIBRARIES_32:=
+LOCAL_SHARED_LIBRARIES_64:=
+LOCAL_STATIC_LIBRARIES_32:=
+LOCAL_STATIC_LIBRARIES_64:=
+LOCAL_WHOLE_STATIC_LIBRARIES_32:=
+LOCAL_WHOLE_STATIC_LIBRARIES_64:=
+LOCAL_GENERATED_SOURCES_32:=
+LOCAL_GENERATED_SOURCES_64:=
+LOCAL_CFLAGS_32:=
+LOCAL_CFLAGS_64:=
+LOCAL_CPPFLAGS_32:=
+LOCAL_CPPFLAGS_64:=
+LOCAL_LDFLAGS_32:=
+LOCAL_LDFLAGS_64:=
+LOCAL_ASFLAGS_32:=
+LOCAL_ASFLAGS_64:=
+LOCAL_CLANG_CFLAGS_32:=
+LOCAL_CLANG_CFLAGS_64:=
+LOCAL_CLANG_CPPFLAGS_32:=
+LOCAL_CLANG_CPPFLAGS_64:=
+LOCAL_CLANG_LDFLAGS_32:=
+LOCAL_CLANG_LDFLAGS_64:=
+LOCAL_CLANG_ASFLAGS_32:=
+LOCAL_CLANG_ASFLAGS_64:=
+LOCAL_C_INCLUDES_32:=
+LOCAL_C_INCLUDES_64:=
+LOCAL_MODULE_PATH_32:=
+LOCAL_MODULE_PATH_64:=
+LOCAL_MODULE_STEM_32:=
+LOCAL_MODULE_STEM_64:=
+LOCAL_CLANG_32:=
+LOCAL_CLANG_64:=
+LOCAL_INIT_RC_32:=
+LOCAL_INIT_RC_64:=
+LOCAL_JAVA_LANGUAGE_VERSION:=
+
+# Trim MAKEFILE_LIST so that $(call my-dir) doesn't need to
+# iterate over thousands of entries every time.
+# Leave the current makefile to make sure we don't break anything
+# that expects to be able to find the name of the current makefile.
+MAKEFILE_LIST := $(lastword $(MAKEFILE_LIST))
diff --git a/core/combo/HOST_CROSS_windows-x86.mk b/core/combo/HOST_CROSS_windows-x86.mk
new file mode 100644
index 0000000000000000000000000000000000000000..6180a265a8814985aaf4b278c9250b62678b7db7
--- /dev/null
+++ b/core/combo/HOST_CROSS_windows-x86.mk
@@ -0,0 +1,65 @@
+#
+# Copyright (C) 2006 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Settings to use MinGW as a cross-compiler under Linux
+# Included by combo/select.make
+
+$(combo_var_prefix)GLOBAL_CFLAGS += -DUSE_MINGW -DWIN32_LEAN_AND_MEAN
+$(combo_var_prefix)GLOBAL_CFLAGS += -Wno-unused-parameter
+$(combo_var_prefix)GLOBAL_CFLAGS += --sysroot prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/x86_64-w64-mingw32
+$(combo_var_prefix)GLOBAL_CFLAGS += -m32
+$(combo_var_prefix)GLOBAL_LDFLAGS += -m32
+TOOLS_PREFIX := prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/bin/x86_64-w64-mingw32-
+$(combo_var_prefix)C_INCLUDES += prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/x86_64-w64-mingw32/include
+$(combo_var_prefix)C_INCLUDES += prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/lib/gcc/x86_64-w64-mingw32/4.8.3/include
+$(combo_var_prefix)GLOBAL_LD_DIRS += -Lprebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/x86_64-w64-mingw32/lib32
+
+# Workaround differences in inttypes.h between host and target.
+# See bug 12708004.
+$(combo_var_prefix)GLOBAL_CFLAGS += -D__STDC_FORMAT_MACROS -D__STDC_CONSTANT_MACROS
+# Use C99-compliant printf functions (%zd).
+$(combo_var_prefix)GLOBAL_CFLAGS += -D__USE_MINGW_ANSI_STDIO=1
+# Admit to using >= Vista. Both are needed because of <_mingw.h>.
+$(combo_var_prefix)GLOBAL_CFLAGS += -D_WIN32_WINNT=0x0600 -DWINVER=0x0600
+# Get 64-bit off_t and related functions.
+$(combo_var_prefix)GLOBAL_CFLAGS += -D_FILE_OFFSET_BITS=64
+
+$(combo_var_prefix)CC := $(TOOLS_PREFIX)gcc
+$(combo_var_prefix)CXX := $(TOOLS_PREFIX)g++
+$(combo_var_prefix)AR := $(TOOLS_PREFIX)ar
+$(combo_var_prefix)NM := $(TOOLS_PREFIX)nm
+$(combo_var_prefix)OBJDUMP := $(TOOLS_PREFIX)objdump
+
+define $(combo_var_prefix)transform-shared-lib-to-toc
+$(hide) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)OBJDUMP) -x $(1) | grep "^Name" | cut -f3 -d" " > $(2)
+$(hide) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)NM) -g -f p $(1) | cut -f1-2 -d" " >> $(2)
+endef
+
+$(combo_var_prefix)GLOBAL_LDFLAGS += \
+ --enable-stdcall-fixup
+
+ifneq ($(strip $(BUILD_HOST_static)),)
+# Statically-linked binaries are desirable for sandboxed environment
+$(combo_var_prefix)GLOBAL_LDFLAGS += -static
+endif # BUILD_HOST_static
+
+$(combo_var_prefix)SHLIB_SUFFIX := .dll
+$(combo_var_prefix)EXECUTABLE_SUFFIX := .exe
+
+$(combo_var_prefix)IS_64_BIT :=
+
+# The mingw gcc is 4.8, 4.9 is required for color diagnostics
+$(combo_var_prefix)UNKNOWN_CFLAGS := -fdiagnostics-color
diff --git a/core/combo/HOST_CROSS_windows-x86_64.mk b/core/combo/HOST_CROSS_windows-x86_64.mk
new file mode 100644
index 0000000000000000000000000000000000000000..e9b19cf954302a06b6fc34d134845c5e3880edc2
--- /dev/null
+++ b/core/combo/HOST_CROSS_windows-x86_64.mk
@@ -0,0 +1,65 @@
+#
+# Copyright (C) 2006 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Settings to use MinGW as a cross-compiler under Linux
+# Included by combo/select.make
+
+$(combo_var_prefix)GLOBAL_CFLAGS += -DUSE_MINGW -DWIN32_LEAN_AND_MEAN
+$(combo_var_prefix)GLOBAL_CFLAGS += -Wno-unused-parameter
+$(combo_var_prefix)GLOBAL_CFLAGS += --sysroot prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/x86_64-w64-mingw32
+$(combo_var_prefix)GLOBAL_CFLAGS += -m64
+$(combo_var_prefix)GLOBAL_LDFLAGS += -m64
+TOOLS_PREFIX := prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/bin/x86_64-w64-mingw32-
+$(combo_var_prefix)C_INCLUDES += prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/x86_64-w64-mingw32/include
+$(combo_var_prefix)C_INCLUDES += prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/lib/gcc/x86_64-w64-mingw32/4.8.3/include
+$(combo_var_prefix)GLOBAL_LD_DIRS += -Lprebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/x86_64-w64-mingw32/lib64
+
+# Workaround differences in inttypes.h between host and target.
+# See bug 12708004.
+$(combo_var_prefix)GLOBAL_CFLAGS += -D__STDC_FORMAT_MACROS -D__STDC_CONSTANT_MACROS
+# Use C99-compliant printf functions (%zd).
+$(combo_var_prefix)GLOBAL_CFLAGS += -D__USE_MINGW_ANSI_STDIO=1
+# Admit to using >= Vista. Both are needed because of <_mingw.h>.
+$(combo_var_prefix)GLOBAL_CFLAGS += -D_WIN32_WINNT=0x0600 -DWINVER=0x0600
+# Get 64-bit off_t and related functions.
+$(combo_var_prefix)GLOBAL_CFLAGS += -D_FILE_OFFSET_BITS=64
+
+$(combo_var_prefix)CC := $(TOOLS_PREFIX)gcc
+$(combo_var_prefix)CXX := $(TOOLS_PREFIX)g++
+$(combo_var_prefix)AR := $(TOOLS_PREFIX)ar
+$(combo_var_prefix)NM := $(TOOLS_PREFIX)nm
+$(combo_var_prefix)OBJDUMP := $(TOOLS_PREFIX)objdump
+
+define $(combo_var_prefix)transform-shared-lib-to-toc
+$(hide) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)OBJDUMP) -x $(1) | grep "^Name" | cut -f3 -d" " > $(2)
+$(hide) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)NM) -g -f p $(1) | cut -f1-2 -d" " >> $(2)
+endef
+
+$(combo_var_prefix)GLOBAL_LDFLAGS += \
+ --enable-stdcall-fixup
+
+ifneq ($(strip $(BUILD_HOST_static)),)
+# Statically-linked binaries are desirable for sandboxed environment
+$(combo_var_prefix)GLOBAL_LDFLAGS += -static
+endif # BUILD_HOST_static
+
+$(combo_var_prefix)SHLIB_SUFFIX := .dll
+$(combo_var_prefix)EXECUTABLE_SUFFIX := .exe
+
+$(combo_var_prefix)IS_64_BIT := true
+
+# The mingw gcc is 4.8, 4.9 is required for color diagnostics
+$(combo_var_prefix)UNKNOWN_CFLAGS := -fdiagnostics-color
diff --git a/core/combo/HOST_darwin-x86.mk b/core/combo/HOST_darwin-x86.mk
new file mode 100644
index 0000000000000000000000000000000000000000..fc56e52996fd85179abe5ffd4ad1f502dc347b3d
--- /dev/null
+++ b/core/combo/HOST_darwin-x86.mk
@@ -0,0 +1,106 @@
+#
+# Copyright (C) 2006 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Configuration for Darwin (Mac OS X) on x86.
+# Included by combo/select.mk
+
+$(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -m32
+$(combo_2nd_arch_prefix)HOST_GLOBAL_LDFLAGS += -m32
+
+ifneq ($(strip $(BUILD_HOST_static)),)
+# Statically-linked binaries are desirable for sandboxed environment
+$(combo_2nd_arch_prefix)HOST_GLOBAL_LDFLAGS += -static
+endif # BUILD_HOST_static
+
+# Workaround differences in inttypes.h between host and target.
+# See bug 12708004.
+$(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -D__STDC_FORMAT_MACROS -D__STDC_CONSTANT_MACROS
+
+include $(BUILD_COMBOS)/mac_version.mk
+
+$(combo_2nd_arch_prefix)HOST_TOOLCHAIN_ROOT := prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1
+$(combo_2nd_arch_prefix)HOST_TOOLCHAIN_PREFIX := $($(combo_2nd_arch_prefix)HOST_TOOLCHAIN_ROOT)/bin/i686-apple-darwin$(gcc_darwin_version)
+$(combo_2nd_arch_prefix)HOST_CC := $($(combo_2nd_arch_prefix)HOST_TOOLCHAIN_PREFIX)-gcc
+$(combo_2nd_arch_prefix)HOST_CXX := $($(combo_2nd_arch_prefix)HOST_TOOLCHAIN_PREFIX)-g++
+
+define $(combo_var_prefix)transform-shared-lib-to-toc
+$(call _gen_toc_command_for_macho,$(1),$(2))
+endef
+
+# gcc location for clang; to be updated when clang is updated
+# HOST_TOOLCHAIN_ROOT is a Darwin-specific define
+$(combo_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG := $($(combo_2nd_arch_prefix)HOST_TOOLCHAIN_ROOT)
+
+$(combo_2nd_arch_prefix)HOST_AR := $(AR)
+
+$(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -isysroot $(mac_sdk_root) -mmacosx-version-min=$(mac_sdk_version) -DMACOSX_DEPLOYMENT_TARGET=$(mac_sdk_version)
+$(combo_2nd_arch_prefix)HOST_GLOBAL_CPPFLAGS += -isystem $(mac_sdk_path)/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1
+$(combo_2nd_arch_prefix)HOST_GLOBAL_LDFLAGS += -isysroot $(mac_sdk_root) -Wl,-syslibroot,$(mac_sdk_root) -mmacosx-version-min=$(mac_sdk_version)
+
+$(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -fPIC -funwind-tables
+$(combo_2nd_arch_prefix)HOST_NO_UNDEFINED_LDFLAGS := -Wl,-undefined,error
+
+$(combo_2nd_arch_prefix)HOST_SHLIB_SUFFIX := .dylib
+$(combo_2nd_arch_prefix)HOST_JNILIB_SUFFIX := .jnilib
+
+$(combo_2nd_arch_prefix)HOST_GLOBAL_ARFLAGS := cqs
+
+############################################################
+## Macros after this line are shared by the 64-bit config.
+
+HOST_CUSTOM_LD_COMMAND := true
+
+define transform-host-o-to-shared-lib-inner
+$(hide) $(PRIVATE_CXX) \
+ -dynamiclib -single_module -read_only_relocs suppress \
+ $($(PRIVATE_2ND_ARCH_VAR_PREFIX)HOST_GLOBAL_LD_DIRS) \
+ $(if $(PRIVATE_NO_DEFAULT_COMPILER_FLAGS),, \
+ $(PRIVATE_HOST_GLOBAL_LDFLAGS) \
+ ) \
+ $(PRIVATE_ALL_OBJECTS) \
+ $(addprefix -force_load , $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)) \
+ $(call normalize-host-libraries,$(PRIVATE_ALL_SHARED_LIBRARIES)) \
+ $(call normalize-host-libraries,$(PRIVATE_ALL_STATIC_LIBRARIES)) \
+ $(PRIVATE_LDLIBS) \
+ -o $@ \
+ -install_name @rpath/$(notdir $@) \
+ -Wl,-rpath,@loader_path/../$(notdir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)HOST_OUT_SHARED_LIBRARIES)) \
+ -Wl,-rpath,@loader_path/$(notdir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)HOST_OUT_SHARED_LIBRARIES)) \
+ $(PRIVATE_LDFLAGS)
+endef
+
+define transform-host-o-to-executable-inner
+$(hide) $(PRIVATE_CXX) \
+ -Wl,-rpath,@loader_path/../$(notdir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)HOST_OUT_SHARED_LIBRARIES)) \
+ -Wl,-rpath,@loader_path/$(notdir $($(PRIVATE_2ND_ARCH_VAR_PREFIX)HOST_OUT_SHARED_LIBRARIES)) \
+ -o $@ \
+ -Wl,-headerpad_max_install_names \
+ $($(PRIVATE_2ND_ARCH_VAR_PREFIX)HOST_GLOBAL_LD_DIRS) \
+ $(if $(PRIVATE_NO_DEFAULT_COMPILER_FLAGS),, \
+ $(PRIVATE_HOST_GLOBAL_LDFLAGS) \
+ ) \
+ $(call normalize-host-libraries,$(PRIVATE_ALL_SHARED_LIBRARIES)) \
+ $(PRIVATE_ALL_OBJECTS) \
+ $(call normalize-host-libraries,$(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)) \
+ $(call normalize-host-libraries,$(PRIVATE_ALL_STATIC_LIBRARIES)) \
+ $(PRIVATE_LDFLAGS) \
+ $(PRIVATE_LDLIBS)
+endef
+
+# $(1): The file to check
+define get-file-size
+stat -f "%z" $(1)
+endef
diff --git a/core/combo/HOST_darwin-x86_64.mk b/core/combo/HOST_darwin-x86_64.mk
new file mode 100644
index 0000000000000000000000000000000000000000..251455f0bf6e914147f526af04c9548380597563
--- /dev/null
+++ b/core/combo/HOST_darwin-x86_64.mk
@@ -0,0 +1,64 @@
+#
+# Copyright (C) 2006 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Configuration for Darwin (Mac OS X) on x86_64.
+# Included by combo/select.mk
+
+HOST_GLOBAL_CFLAGS += -m64
+HOST_GLOBAL_LDFLAGS += -m64
+
+ifneq ($(strip $(BUILD_HOST_static)),)
+# Statically-linked binaries are desirable for sandboxed environment
+HOST_GLOBAL_LDFLAGS += -static
+endif # BUILD_HOST_static
+
+# Workaround differences in inttypes.h between host and target.
+# See bug 12708004.
+HOST_GLOBAL_CFLAGS += -D__STDC_FORMAT_MACROS -D__STDC_CONSTANT_MACROS
+
+include $(BUILD_COMBOS)/mac_version.mk
+
+HOST_TOOLCHAIN_ROOT := prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1
+HOST_TOOLCHAIN_PREFIX := $(HOST_TOOLCHAIN_ROOT)/bin/i686-apple-darwin$(gcc_darwin_version)
+HOST_CC := $(HOST_TOOLCHAIN_PREFIX)-gcc
+HOST_CXX := $(HOST_TOOLCHAIN_PREFIX)-g++
+
+define $(combo_var_prefix)transform-shared-lib-to-toc
+$(call _gen_toc_command_for_macho,$(1),$(2))
+endef
+
+# gcc location for clang; to be updated when clang is updated
+# HOST_TOOLCHAIN_ROOT is a Darwin-specific define
+HOST_TOOLCHAIN_FOR_CLANG := $(HOST_TOOLCHAIN_ROOT)
+
+HOST_AR := $(AR)
+
+HOST_GLOBAL_CFLAGS += -isysroot $(mac_sdk_root) -mmacosx-version-min=$(mac_sdk_version) -DMACOSX_DEPLOYMENT_TARGET=$(mac_sdk_version)
+HOST_GLOBAL_CPPFLAGS += -isystem $(mac_sdk_path)/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1
+HOST_GLOBAL_LDFLAGS += -isysroot $(mac_sdk_root) -Wl,-syslibroot,$(mac_sdk_root) -mmacosx-version-min=$(mac_sdk_version)
+
+HOST_GLOBAL_CFLAGS += -fPIC -funwind-tables
+HOST_NO_UNDEFINED_LDFLAGS := -Wl,-undefined,error
+
+HOST_SHLIB_SUFFIX := .dylib
+HOST_JNILIB_SUFFIX := .jnilib
+
+HOST_GLOBAL_ARFLAGS := cqs
+
+# We Reuse the following functions with the same name from HOST_darwin-x86.mk:
+# transform-host-o-to-shared-lib-inner
+# transform-host-o-to-executable-inner
+# get-file-size
diff --git a/core/combo/HOST_linux-x86.mk b/core/combo/HOST_linux-x86.mk
new file mode 100644
index 0000000000000000000000000000000000000000..169e2d2448a31e5c6de3e88d6151a40c63af7b65
--- /dev/null
+++ b/core/combo/HOST_linux-x86.mk
@@ -0,0 +1,66 @@
+#
+# Copyright (C) 2006 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Configuration for builds hosted on linux-x86.
+# Included by combo/select.mk
+
+ifeq ($(strip $($(combo_2nd_arch_prefix)HOST_TOOLCHAIN_PREFIX)),)
+$(combo_2nd_arch_prefix)HOST_TOOLCHAIN_PREFIX := prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/bin/x86_64-linux-
+endif
+$(combo_2nd_arch_prefix)HOST_CC := $($(combo_2nd_arch_prefix)HOST_TOOLCHAIN_PREFIX)gcc
+$(combo_2nd_arch_prefix)HOST_CXX := $($(combo_2nd_arch_prefix)HOST_TOOLCHAIN_PREFIX)g++
+$(combo_2nd_arch_prefix)HOST_AR := $($(combo_2nd_arch_prefix)HOST_TOOLCHAIN_PREFIX)ar
+$(combo_2nd_arch_prefix)HOST_READELF := $($(combo_2nd_arch_prefix)HOST_TOOLCHAIN_PREFIX)readelf
+$(combo_2nd_arch_prefix)HOST_NM := $($(combo_2nd_arch_prefix)HOST_TOOLCHAIN_PREFIX)nm
+
+define $(combo_var_prefix)transform-shared-lib-to-toc
+$(call _gen_toc_command_for_elf,$(1),$(2))
+endef
+
+# gcc location for clang; to be updated when clang is updated
+$(combo_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG := prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8
+
+# We expect SSE3 floating point math.
+$(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -msse3 -mfpmath=sse -m32 -Wa,--noexecstack -march=prescott
+$(combo_2nd_arch_prefix)HOST_GLOBAL_LDFLAGS += -m32 -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now -Wl,--no-undefined-version
+
+ifneq ($(strip $(BUILD_HOST_static)),)
+# Statically-linked binaries are desirable for sandboxed environment
+$(combo_2nd_arch_prefix)HOST_GLOBAL_LDFLAGS += -static
+endif # BUILD_HOST_static
+
+$(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -fPIC \
+ -no-canonical-prefixes \
+
+$(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector
+
+# Workaround differences in inttypes.h between host and target.
+# See bug 12708004.
+$(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -D__STDC_FORMAT_MACROS -D__STDC_CONSTANT_MACROS
+
+# We build a 32-bit host art, and right now that also means building *all* host libraries
+# both 32- and 64-bit (whether art uses them or not --- 9d59f417767991246848c3e101cb27d2dfea5988).
+$(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE=1
+
+$(combo_2nd_arch_prefix)HOST_NO_UNDEFINED_LDFLAGS := -Wl,--no-undefined
+
+############################################################
+## Macros after this line are shared by the 64-bit config.
+
+# $(1): The file to check
+define get-file-size
+stat --format "%s" "$(1)" | tr -d '\n'
+endef
diff --git a/core/combo/HOST_linux-x86_64.mk b/core/combo/HOST_linux-x86_64.mk
new file mode 100644
index 0000000000000000000000000000000000000000..9766f2bde8b538b533fb9d8f1f9d163a406cdb33
--- /dev/null
+++ b/core/combo/HOST_linux-x86_64.mk
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2006 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Configuration for builds hosted on linux-x86_64.
+# Included by combo/select.mk
+
+ifeq ($(strip $(HOST_TOOLCHAIN_PREFIX)),)
+HOST_TOOLCHAIN_PREFIX := prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8/bin/x86_64-linux-
+endif
+HOST_CC := $(HOST_TOOLCHAIN_PREFIX)gcc
+HOST_CXX := $(HOST_TOOLCHAIN_PREFIX)g++
+HOST_AR := $(HOST_TOOLCHAIN_PREFIX)ar
+HOST_READELF := $(HOST_TOOLCHAIN_PREFIX)readelf
+HOST_NM := $(HOST_TOOLCHAIN_PREFIX)nm
+
+define $(combo_var_prefix)transform-shared-lib-to-toc
+$(call _gen_toc_command_for_elf,$(1),$(2))
+endef
+
+# gcc location for clang; to be updated when clang is updated
+HOST_TOOLCHAIN_FOR_CLANG := prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8
+
+HOST_GLOBAL_CFLAGS += -m64 -Wa,--noexecstack
+HOST_GLOBAL_LDFLAGS += -m64 -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now -Wl,--no-undefined-version
+
+ifneq ($(strip $(BUILD_HOST_static)),)
+# Statically-linked binaries are desirable for sandboxed environment
+HOST_GLOBAL_LDFLAGS += -static
+endif # BUILD_HOST_static
+
+HOST_GLOBAL_CFLAGS += -fPIC \
+ -no-canonical-prefixes \
+
+HOST_GLOBAL_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector
+
+# Workaround differences in inttypes.h between host and target.
+# See bug 12708004.
+HOST_GLOBAL_CFLAGS += -D__STDC_FORMAT_MACROS -D__STDC_CONSTANT_MACROS
+
+HOST_NO_UNDEFINED_LDFLAGS := -Wl,--no-undefined
diff --git a/core/combo/TARGET_linux-arm.mk b/core/combo/TARGET_linux-arm.mk
new file mode 100644
index 0000000000000000000000000000000000000000..510aae52f2d9a1d73096b5adfd9dc618f20fc2b4
--- /dev/null
+++ b/core/combo/TARGET_linux-arm.mk
@@ -0,0 +1,195 @@
+#
+# Copyright (C) 2006 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Configuration for Linux on ARM.
+# Included by combo/select.mk
+
+# You can set TARGET_ARCH_VARIANT to use an arch version other
+# than ARMv5TE. Each value should correspond to a file named
+# $(BUILD_COMBOS)/arch/.mk which must contain
+# makefile variable definitions. Their
+# purpose is to allow module Android.mk files to selectively compile
+# different versions of code based upon the funtionality and
+# instructions available in a given architecture version.
+#
+# The blocks also define specific arch_variant_cflags, which
+# include defines, and compiler settings for the given architecture
+# version.
+#
+ifeq ($(strip $(TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT)),)
+TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT := armv5te
+endif
+
+# Decouple NDK library selection with platform compiler version
+$(combo_2nd_arch_prefix)TARGET_NDK_GCC_VERSION := 4.9
+
+ifeq ($(strip $(TARGET_GCC_VERSION_EXP)),)
+$(combo_2nd_arch_prefix)TARGET_GCC_VERSION := 4.9
+else
+$(combo_2nd_arch_prefix)TARGET_GCC_VERSION := $(TARGET_GCC_VERSION_EXP)
+endif
+
+TARGET_ARCH_SPECIFIC_MAKEFILE := $(BUILD_COMBOS)/arch/$(TARGET_$(combo_2nd_arch_prefix)ARCH)/$(TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT).mk
+ifeq ($(strip $(wildcard $(TARGET_ARCH_SPECIFIC_MAKEFILE))),)
+$(error Unknown ARM architecture version: $(TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT))
+endif
+
+include $(TARGET_ARCH_SPECIFIC_MAKEFILE)
+include $(BUILD_SYSTEM)/combo/fdo.mk
+
+# You can set TARGET_TOOLS_PREFIX to get gcc from somewhere else
+ifeq ($(strip $($(combo_2nd_arch_prefix)TARGET_TOOLS_PREFIX)),)
+$(combo_2nd_arch_prefix)TARGET_TOOLCHAIN_ROOT := prebuilts/gcc/$(HOST_PREBUILT_TAG)/arm/arm-linux-androideabi-$($(combo_2nd_arch_prefix)TARGET_GCC_VERSION)
+$(combo_2nd_arch_prefix)TARGET_TOOLS_PREFIX := $($(combo_2nd_arch_prefix)TARGET_TOOLCHAIN_ROOT)/bin/arm-linux-androideabi-
+endif
+
+$(combo_2nd_arch_prefix)TARGET_CC := $($(combo_2nd_arch_prefix)TARGET_TOOLS_PREFIX)gcc
+$(combo_2nd_arch_prefix)TARGET_CXX := $($(combo_2nd_arch_prefix)TARGET_TOOLS_PREFIX)g++
+$(combo_2nd_arch_prefix)TARGET_AR := $($(combo_2nd_arch_prefix)TARGET_TOOLS_PREFIX)ar
+$(combo_2nd_arch_prefix)TARGET_OBJCOPY := $($(combo_2nd_arch_prefix)TARGET_TOOLS_PREFIX)objcopy
+$(combo_2nd_arch_prefix)TARGET_LD := $($(combo_2nd_arch_prefix)TARGET_TOOLS_PREFIX)ld
+$(combo_2nd_arch_prefix)TARGET_READELF := $($(combo_2nd_arch_prefix)TARGET_TOOLS_PREFIX)readelf
+$(combo_2nd_arch_prefix)TARGET_STRIP := $($(combo_2nd_arch_prefix)TARGET_TOOLS_PREFIX)strip
+$(combo_2nd_arch_prefix)TARGET_NM := $($(combo_2nd_arch_prefix)TARGET_TOOLS_PREFIX)nm
+
+define $(combo_var_prefix)transform-shared-lib-to-toc
+$(call _gen_toc_command_for_elf,$(1),$(2))
+endef
+
+$(combo_2nd_arch_prefix)TARGET_NO_UNDEFINED_LDFLAGS := -Wl,--no-undefined
+
+$(combo_2nd_arch_prefix)TARGET_arm_CFLAGS := -O2 \
+ -fomit-frame-pointer \
+ -fstrict-aliasing \
+ -funswitch-loops
+
+# Modules can choose to compile some source as thumb.
+$(combo_2nd_arch_prefix)TARGET_thumb_CFLAGS := -mthumb \
+ -Os \
+ -fomit-frame-pointer \
+ -fno-strict-aliasing
+
+# Set FORCE_ARM_DEBUGGING to "true" in your buildspec.mk
+# or in your environment to force a full arm build, even for
+# files that are normally built as thumb; this can make
+# gdb debugging easier. Don't forget to do a clean build.
+#
+# NOTE: if you try to build a -O0 build with thumb, several
+# of the libraries (libpv, libwebcore, libkjs) need to be built
+# with -mlong-calls. When built at -O0, those libraries are
+# too big for a thumb "BL