aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan Prichard <rprichard@google.com>2021-09-01 13:44:01 -0700
committerRyan Prichard <rprichard@google.com>2021-09-01 13:49:54 -0700
commit90908f0a79679755f9b7b5513894ecfd16476554 (patch)
tree7346eae8a3ad37961f57cbcd79444d3cce12b262
parent6f2886bda85fe7a4e5ad938b7b636d9b38caf634 (diff)
parente72d1d581c945c158ed68d9bc48911063022a2c6 (diff)
downloadninja-90908f0a79679755f9b7b5513894ecfd16476554.tar.gz
Merge v1.10.2 (aosp/upstream-release) into cmake-master-dev
Bug: http://b/198206301 Change-Id: I215317abbb8ea1efa9a55ecb289bf0f7cf0023eb
-rw-r--r--.clang-format26
-rw-r--r--.clang-tidy13
-rw-r--r--.editorconfig11
-rw-r--r--.github/workflows/linux.yml125
-rw-r--r--.github/workflows/macos.yml54
-rw-r--r--.github/workflows/windows.yml51
-rw-r--r--.gitignore40
-rw-r--r--.travis.yml36
-rw-r--r--CMakeLists.txt213
-rw-r--r--CONTRIBUTING.md34
-rw-r--r--COPYING202
-rw-r--r--README.md51
-rw-r--r--RELEASING33
-rw-r--r--appveyor.yml61
-rwxr-xr-xconfigure.py714
-rw-r--r--doc/README.md11
-rw-r--r--doc/dblatex.xsl7
-rw-r--r--doc/docbook.xsl34
-rw-r--r--doc/doxygen.config1250
-rw-r--r--doc/manual.asciidoc1173
-rw-r--r--doc/style.css29
-rw-r--r--misc/afl-fuzz-tokens/kw_build1
-rw-r--r--misc/afl-fuzz-tokens/kw_default1
-rw-r--r--misc/afl-fuzz-tokens/kw_include1
-rw-r--r--misc/afl-fuzz-tokens/kw_pool1
-rw-r--r--misc/afl-fuzz-tokens/kw_rule1
-rw-r--r--misc/afl-fuzz-tokens/kw_subninja1
-rw-r--r--misc/afl-fuzz-tokens/misc_a1
-rw-r--r--misc/afl-fuzz-tokens/misc_b1
-rw-r--r--misc/afl-fuzz-tokens/misc_colon1
-rw-r--r--misc/afl-fuzz-tokens/misc_cont1
-rw-r--r--misc/afl-fuzz-tokens/misc_dollar1
-rw-r--r--misc/afl-fuzz-tokens/misc_eq1
-rw-r--r--misc/afl-fuzz-tokens/misc_indent1
-rw-r--r--misc/afl-fuzz-tokens/misc_pipe1
-rw-r--r--misc/afl-fuzz-tokens/misc_pipepipe1
-rw-r--r--misc/afl-fuzz-tokens/misc_space1
-rw-r--r--misc/afl-fuzz/build.ninja5
-rw-r--r--misc/bash-completion57
-rwxr-xr-xmisc/ci.py41
-rw-r--r--misc/inherited-fds.ninja23
-rw-r--r--misc/long-slow-build.ninja38
-rwxr-xr-xmisc/measure.py56
-rw-r--r--misc/ninja-mode.el85
-rw-r--r--misc/ninja.vim87
-rw-r--r--misc/ninja_syntax.py197
-rwxr-xr-xmisc/ninja_syntax_test.py191
-rwxr-xr-xmisc/output_test.py115
-rw-r--r--misc/packaging/ninja.spec42
-rwxr-xr-xmisc/packaging/rpmbuild.sh29
-rw-r--r--misc/write_fake_manifests.py272
-rw-r--r--misc/zsh-completion72
-rw-r--r--src/browse.cc80
-rw-r--r--src/browse.h28
-rwxr-xr-xsrc/browse.py233
-rw-r--r--src/build.cc1138
-rw-r--r--src/build.h338
-rw-r--r--src/build_log.cc494
-rw-r--r--src/build_log.h107
-rw-r--r--src/build_log_perftest.cc151
-rw-r--r--src/build_log_test.cc358
-rw-r--r--src/build_test.cc3304
-rw-r--r--src/canon_perftest.cc59
-rw-r--r--src/clean.cc293
-rw-r--r--src/clean.h111
-rw-r--r--src/clean_test.cc540
-rw-r--r--src/clparser.cc128
-rw-r--r--src/clparser.h51
-rw-r--r--src/clparser_perftest.cc159
-rw-r--r--src/clparser_test.cc119
-rw-r--r--src/debug_flags.cc21
-rw-r--r--src/debug_flags.h33
-rw-r--r--src/depfile_parser.cc371
-rw-r--r--src/depfile_parser.h42
-rw-r--r--src/depfile_parser.in.cc207
-rw-r--r--src/depfile_parser_perftest.cc79
-rw-r--r--src/depfile_parser_test.cc380
-rw-r--r--src/deps_log.cc438
-rw-r--r--src/deps_log.h128
-rw-r--r--src/deps_log_test.cc481
-rw-r--r--src/disk_interface.cc287
-rw-r--r--src/disk_interface.h101
-rw-r--r--src/disk_interface_test.cc324
-rw-r--r--src/dyndep.cc126
-rw-r--r--src/dyndep.h64
-rw-r--r--src/dyndep_parser.cc225
-rw-r--r--src/dyndep_parser.h47
-rw-r--r--src/dyndep_parser_test.cc514
-rw-r--r--src/edit_distance.cc71
-rw-r--r--src/edit_distance.h25
-rw-r--r--src/edit_distance_test.cc48
-rw-r--r--src/eval_env.cc149
-rw-r--r--src/eval_env.h109
-rw-r--r--src/exit_status.h24
-rwxr-xr-xsrc/gen_doxygen_mainpage.sh92
-rw-r--r--src/getopt.c410
-rw-r--r--src/getopt.h57
-rw-r--r--src/graph.cc662
-rw-r--r--src/graph.h321
-rw-r--r--src/graph_test.cc860
-rw-r--r--src/graphviz.cc90
-rw-r--r--src/graphviz.h40
-rw-r--r--src/hash_collision_bench.cc65
-rw-r--r--src/hash_map.h123
-rw-r--r--src/includes_normalize-win32.cc211
-rw-r--r--src/includes_normalize.h40
-rw-r--r--src/includes_normalize_test.cc169
-rwxr-xr-xsrc/inline.sh32
-rw-r--r--src/lexer.cc822
-rw-r--r--src/lexer.h105
-rw-r--r--src/lexer.in.cc280
-rw-r--r--src/lexer_test.cc98
-rw-r--r--src/line_printer.cc166
-rw-r--r--src/line_printer.h76
-rw-r--r--src/load_status.h24
-rw-r--r--src/manifest_parser.cc424
-rw-r--r--src/manifest_parser.h72
-rw-r--r--src/manifest_parser_perftest.cc123
-rw-r--r--src/manifest_parser_test.cc1158
-rw-r--r--src/metrics.cc129
-rw-r--r--src/metrics.h91
-rw-r--r--src/minidump-win32.cc89
-rw-r--r--src/msvc_helper-win32.cc108
-rw-r--r--src/msvc_helper.h32
-rw-r--r--src/msvc_helper_main-win32.cc150
-rw-r--r--src/msvc_helper_test.cc41
-rw-r--r--src/ninja.cc1457
-rw-r--r--src/ninja_test.cc162
-rw-r--r--src/parser.cc53
-rw-r--r--src/parser.h48
-rw-r--r--src/state.cc214
-rw-r--r--src/state.h130
-rw-r--r--src/state_test.cc48
-rw-r--r--src/string_piece.h70
-rw-r--r--src/string_piece_util.cc78
-rw-r--r--src/string_piece_util.h33
-rw-r--r--src/string_piece_util_test.cc131
-rw-r--r--src/subprocess-posix.cc368
-rw-r--r--src/subprocess-win32.cc307
-rw-r--r--src/subprocess.h113
-rw-r--r--src/subprocess_test.cc263
-rw-r--r--src/test.cc237
-rw-r--r--src/test.h185
-rw-r--r--src/timestamp.h33
-rw-r--r--src/util.cc634
-rw-r--r--src/util.h125
-rw-r--r--src/util_test.cc436
-rw-r--r--src/version.cc55
-rw-r--r--src/version.h31
-rw-r--r--src/win32port.h39
150 files changed, 29889 insertions, 0 deletions
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..b8e9225
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,26 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This isn't meant to be authoritative, but it's good enough to be useful.
+# Still use your best judgement for formatting decisions: clang-format
+# sometimes makes strange choices.
+
+BasedOnStyle: Google
+AllowShortFunctionsOnASingleLine: Inline
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+ConstructorInitializerAllOnOneLineOrOnePerLine: false
+Cpp11BracedListStyle: false
+IndentCaseLabels: false
+DerivePointerBinding: false
diff --git a/.clang-tidy b/.clang-tidy
new file mode 100644
index 0000000..df4c1ed
--- /dev/null
+++ b/.clang-tidy
@@ -0,0 +1,13 @@
+---
+Checks: '
+ ,readability-avoid-const-params-in-decls,
+ ,readability-non-const-parameter,
+ ,readability-redundant-string-cstr,
+ ,readability-redundant-string-init,
+'
+WarningsAsErrors: '
+ ,readability-avoid-const-params-in-decls,
+ ,readability-non-const-parameter,
+ ,readability-redundant-string-cstr,
+ ,readability-redundant-string-init,
+'
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000..0cc68d6
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,11 @@
+root = true
+
+[*]
+charset = utf-8
+indent_style = space
+indent_size = 2
+insert_final_newline = true
+end_of_line = lf
+
+[CMakeLists.txt]
+indent_style = tab
diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml
new file mode 100644
index 0000000..9062d98
--- /dev/null
+++ b/.github/workflows/linux.yml
@@ -0,0 +1,125 @@
+name: Linux
+
+on:
+ pull_request:
+ push:
+ release:
+ types: published
+
+jobs:
+ build:
+ runs-on: [ubuntu-latest]
+ container:
+ image: centos:7
+ steps:
+ - uses: actions/checkout@v2
+ - name: Install dependencies
+ run: |
+ curl -L -O https://github.com/Kitware/CMake/releases/download/v3.16.4/cmake-3.16.4-Linux-x86_64.sh
+ chmod +x cmake-3.16.4-Linux-x86_64.sh
+ ./cmake-3.16.4-Linux-x86_64.sh --skip-license --prefix=/usr/local
+ curl -L -O https://www.mirrorservice.org/sites/dl.fedoraproject.org/pub/epel/7/x86_64/Packages/p/p7zip-16.02-10.el7.x86_64.rpm
+ curl -L -O https://www.mirrorservice.org/sites/dl.fedoraproject.org/pub/epel/7/x86_64/Packages/p/p7zip-plugins-16.02-10.el7.x86_64.rpm
+ rpm -U --quiet p7zip-16.02-10.el7.x86_64.rpm
+ rpm -U --quiet p7zip-plugins-16.02-10.el7.x86_64.rpm
+ yum install -y make gcc-c++ libasan clang-analyzer
+
+ - name: Build debug ninja
+ shell: bash
+ env:
+ CFLAGS: -fstack-protector-all -fsanitize=address
+ CXXFLAGS: -fstack-protector-all -fsanitize=address
+ run: |
+ scan-build -o scanlogs cmake -DCMAKE_BUILD_TYPE=Debug -B debug-build
+ scan-build -o scanlogs cmake --build debug-build --parallel --config Debug
+
+ - name: Test debug ninja
+ run: ./ninja_test
+ working-directory: debug-build
+
+ - name: Build release ninja
+ shell: bash
+ run: |
+ cmake -DCMAKE_BUILD_TYPE=Release -B release-build
+ cmake --build release-build --parallel --config Release
+ strip release-build/ninja
+
+ - name: Test release ninja
+ run: ./ninja_test
+ working-directory: release-build
+
+ - name: Create ninja archive
+ run: |
+ mkdir artifact
+ 7z a artifact/ninja-linux.zip ./release-build/ninja
+
+ # Upload ninja binary archive as an artifact
+ - name: Upload artifact
+ uses: actions/upload-artifact@v1
+ with:
+ name: ninja-binary-archives
+ path: artifact
+
+ - name: Upload release asset
+ if: github.event.action == 'published'
+ uses: actions/upload-release-asset@v1.0.1
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ upload_url: ${{ github.event.release.upload_url }}
+ asset_path: ./artifact/ninja-linux.zip
+ asset_name: ninja-linux.zip
+ asset_content_type: application/zip
+
+ test:
+ runs-on: [ubuntu-latest]
+ container:
+ image: ubuntu:20.04
+ steps:
+ - uses: actions/checkout@v2
+ - name: Install dependencies
+ run: |
+ apt update
+ apt install -y python3-pytest ninja-build clang-tidy python3-pip clang
+ pip3 install cmake==3.17.*
+ - name: Configure (GCC)
+ run: cmake -Bbuild-gcc -DCMAKE_BUILD_TYPE=Debug -G'Ninja Multi-Config'
+
+ - name: Build (GCC, Debug)
+ run: cmake --build build-gcc --config Debug
+ - name: Unit tests (GCC, Debug)
+ run: ./build-gcc/Debug/ninja_test
+ - name: Python tests (GCC, Debug)
+ run: pytest-3 --color=yes ../..
+ working-directory: build-gcc/Debug
+
+ - name: Build (GCC, Release)
+ run: cmake --build build-gcc --config Release
+ - name: Unit tests (GCC, Release)
+ run: ./build-gcc/Release/ninja_test
+ - name: Python tests (GCC, Release)
+ run: pytest-3 --color=yes ../..
+ working-directory: build-gcc/Release
+
+ - name: Configure (Clang)
+ run: CC=clang CXX=clang++ cmake -Bbuild-clang -DCMAKE_BUILD_TYPE=Debug -G'Ninja Multi-Config' -DCMAKE_EXPORT_COMPILE_COMMANDS=1
+
+ - name: Build (Clang, Debug)
+ run: cmake --build build-clang --config Debug
+ - name: Unit tests (Clang, Debug)
+ run: ./build-clang/Debug/ninja_test
+ - name: Python tests (Clang, Debug)
+ run: pytest-3 --color=yes ../..
+ working-directory: build-clang/Debug
+
+ - name: Build (Clang, Release)
+ run: cmake --build build-clang --config Release
+ - name: Unit tests (Clang, Release)
+ run: ./build-clang/Release/ninja_test
+ - name: Python tests (Clang, Release)
+ run: pytest-3 --color=yes ../..
+ working-directory: build-clang/Release
+
+ - name: clang-tidy
+ run: /usr/lib/llvm-10/share/clang/run-clang-tidy.py -header-filter=src
+ working-directory: build-clang
diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml
new file mode 100644
index 0000000..4ea958f
--- /dev/null
+++ b/.github/workflows/macos.yml
@@ -0,0 +1,54 @@
+name: macOS
+
+on:
+ pull_request:
+ push:
+ release:
+ types: published
+
+jobs:
+ build:
+ runs-on: macos-11.0
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Install dependencies
+ run: brew install re2c p7zip cmake
+
+ - name: Build ninja
+ shell: bash
+ env:
+ MACOSX_DEPLOYMENT_TARGET: 10.12
+ run: |
+ sudo xcode-select -s /Applications/Xcode_12.2.app
+ cmake -Bbuild -GXcode '-DCMAKE_OSX_ARCHITECTURES=arm64;x86_64'
+ cmake --build build --config Release
+
+ - name: Test ninja
+ run: ctest -vv
+ working-directory: build
+
+ - name: Create ninja archive
+ shell: bash
+ run: |
+ mkdir artifact
+ 7z a artifact/ninja-mac.zip ./build/Release/ninja
+
+ # Upload ninja binary archive as an artifact
+ - name: Upload artifact
+ uses: actions/upload-artifact@v1
+ with:
+ name: ninja-binary-archives
+ path: artifact
+
+ - name: Upload release asset
+ if: github.event.action == 'published'
+ uses: actions/upload-release-asset@v1.0.1
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ upload_url: ${{ github.event.release.upload_url }}
+ asset_path: ./artifact/ninja-mac.zip
+ asset_name: ninja-mac.zip
+ asset_content_type: application/zip
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
new file mode 100644
index 0000000..04fc2f6
--- /dev/null
+++ b/.github/workflows/windows.yml
@@ -0,0 +1,51 @@
+name: Windows
+
+on:
+ pull_request:
+ push:
+ release:
+ types: published
+
+jobs:
+ build:
+ runs-on: windows-latest
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Install dependencies
+ run: choco install re2c
+
+ - name: Build ninja
+ shell: bash
+ run: |
+ cmake -DCMAKE_BUILD_TYPE=Release -B build
+ cmake --build build --parallel --config Release
+
+ - name: Test ninja
+ run: .\ninja_test.exe
+ working-directory: build/Release
+
+ - name: Create ninja archive
+ shell: bash
+ run: |
+ mkdir artifact
+ 7z a artifact/ninja-win.zip ./build/Release/ninja.exe
+
+ # Upload ninja binary archive as an artifact
+ - name: Upload artifact
+ uses: actions/upload-artifact@v1
+ with:
+ name: ninja-binary-archives
+ path: artifact
+
+ - name: Upload release asset
+ if: github.event.action == 'published'
+ uses: actions/upload-release-asset@v1.0.1
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ upload_url: ${{ github.event.release.upload_url }}
+ asset_path: ./artifact/ninja-win.zip
+ asset_name: ninja-win.zip
+ asset_content_type: application/zip
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..dca1129
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,40 @@
+*.pyc
+*.obj
+*.exe
+*.pdb
+*.ilk
+/build*/
+/build.ninja
+/ninja
+/ninja.bootstrap
+/build_log_perftest
+/canon_perftest
+/clparser_perftest
+/depfile_parser_perftest
+/hash_collision_bench
+/ninja_test
+/manifest_parser_perftest
+/graph.png
+/doc/manual.html
+/doc/doxygen
+*.patch
+.DS_Store
+
+# Eclipse project files
+.project
+.cproject
+
+# SublimeText project files
+*.sublime-project
+*.sublime-workspace
+
+# Ninja output
+.ninja_deps
+.ninja_log
+
+# Visual Studio Code project files
+/.vscode/
+/.ccls-cache/
+
+# Qt Creator project files
+/CMakeLists.txt.user
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..e5d7d2b
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,36 @@
+matrix:
+ include:
+ - os: linux
+ dist: precise
+ compiler: gcc
+ - os: linux
+ dist: precise
+ compiler: clang
+ - os: linux
+ dist: trusty
+ compiler: gcc
+ - os: linux
+ dist: trusty
+ compiler: clang
+ - os: linux
+ dist: xenial
+ compiler: gcc
+ - os: linux
+ dist: xenial
+ compiler: clang
+ - os: osx
+ osx_image: xcode10
+ - os: osx
+ osx_image: xcode10.1
+sudo: false
+language: cpp
+before_install:
+ - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install re2c ; fi
+ - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then choco install re2c python ; fi
+script:
+ - ./misc/ci.py
+ - python3 configure.py --bootstrap
+ - ./ninja all
+ - ./ninja_test --gtest_filter=-SubprocessTest.SetWithLots
+ - ./misc/ninja_syntax_test.py
+ - ./misc/output_test.py
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..7f03c35
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,213 @@
+cmake_minimum_required(VERSION 3.15)
+
+include(CheckIncludeFileCXX)
+include(CheckIPOSupported)
+
+project(ninja)
+
+# --- optional link-time optimization
+check_ipo_supported(RESULT lto_supported OUTPUT error)
+
+if(lto_supported)
+ message(STATUS "IPO / LTO enabled")
+ set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE TRUE)
+else()
+ message(STATUS "IPO / LTO not supported: <${error}>")
+endif()
+
+# --- compiler flags
+if(MSVC)
+ set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")
+ string(APPEND CMAKE_CXX_FLAGS " /W4 /GR- /Zc:__cplusplus")
+else()
+ include(CheckCXXCompilerFlag)
+ check_cxx_compiler_flag(-Wno-deprecated flag_no_deprecated)
+ if(flag_no_deprecated)
+ string(APPEND CMAKE_CXX_FLAGS " -Wno-deprecated")
+ endif()
+ check_cxx_compiler_flag(-fdiagnostics-color flag_color_diag)
+ if(flag_color_diag)
+ string(APPEND CMAKE_CXX_FLAGS " -fdiagnostics-color")
+ endif()
+endif()
+
+# --- optional re2c
+find_program(RE2C re2c)
+if(RE2C)
+ # the depfile parser and ninja lexers are generated using re2c.
+ function(re2c IN OUT)
+ add_custom_command(DEPENDS ${IN} OUTPUT ${OUT}
+ COMMAND ${RE2C} -b -i --no-generation-date -o ${OUT} ${IN}
+ )
+ endfunction()
+ re2c(${PROJECT_SOURCE_DIR}/src/depfile_parser.in.cc ${PROJECT_BINARY_DIR}/depfile_parser.cc)
+ re2c(${PROJECT_SOURCE_DIR}/src/lexer.in.cc ${PROJECT_BINARY_DIR}/lexer.cc)
+ add_library(libninja-re2c OBJECT ${PROJECT_BINARY_DIR}/depfile_parser.cc ${PROJECT_BINARY_DIR}/lexer.cc)
+else()
+ message(WARNING "re2c was not found; changes to src/*.in.cc will not affect your build.")
+ add_library(libninja-re2c OBJECT src/depfile_parser.cc src/lexer.cc)
+endif()
+target_include_directories(libninja-re2c PRIVATE src)
+
+# --- Check for 'browse' mode support
+function(check_platform_supports_browse_mode RESULT)
+ # Make sure the inline.sh script works on this platform.
+ # It uses the shell commands such as 'od', which may not be available.
+ execute_process(
+ COMMAND sh -c "echo 'TEST' | src/inline.sh var"
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
+ RESULT_VARIABLE inline_result
+ OUTPUT_QUIET
+ ERROR_QUIET
+ )
+ if(NOT inline_result EQUAL "0")
+ # The inline script failed, so browse mode is not supported.
+ set(${RESULT} "0" PARENT_SCOPE)
+ return()
+ endif()
+
+ # Now check availability of the unistd header
+ check_include_file_cxx(unistd.h PLATFORM_HAS_UNISTD_HEADER)
+ set(${RESULT} "${PLATFORM_HAS_UNISTD_HEADER}" PARENT_SCOPE)
+endfunction()
+
+check_platform_supports_browse_mode(platform_supports_ninja_browse)
+
+# Core source files all build into ninja library.
+add_library(libninja OBJECT
+ src/build_log.cc
+ src/build.cc
+ src/clean.cc
+ src/clparser.cc
+ src/dyndep.cc
+ src/dyndep_parser.cc
+ src/debug_flags.cc
+ src/deps_log.cc
+ src/disk_interface.cc
+ src/edit_distance.cc
+ src/eval_env.cc
+ src/graph.cc
+ src/graphviz.cc
+ src/line_printer.cc
+ src/manifest_parser.cc
+ src/metrics.cc
+ src/parser.cc
+ src/state.cc
+ src/string_piece_util.cc
+ src/util.cc
+ src/version.cc
+)
+if(WIN32)
+ target_sources(libninja PRIVATE
+ src/subprocess-win32.cc
+ src/includes_normalize-win32.cc
+ src/msvc_helper-win32.cc
+ src/msvc_helper_main-win32.cc
+ src/getopt.c
+ )
+ if(MSVC)
+ target_sources(libninja PRIVATE src/minidump-win32.cc)
+ endif()
+else()
+ target_sources(libninja PRIVATE src/subprocess-posix.cc)
+ if(CMAKE_SYSTEM_NAME STREQUAL "OS400" OR CMAKE_SYSTEM_NAME STREQUAL "AIX")
+ target_sources(libninja PRIVATE src/getopt.c)
+ endif()
+
+ # Needed for perfstat_cpu_total
+ if(CMAKE_SYSTEM_NAME STREQUAL "AIX")
+ target_link_libraries(libninja PUBLIC "-lperfstat")
+ endif()
+endif()
+
+#Fixes GetActiveProcessorCount on MinGW
+if(MINGW)
+target_compile_definitions(libninja PRIVATE _WIN32_WINNT=0x0601 __USE_MINGW_ANSI_STDIO=1)
+endif()
+
+# On IBM i (identified as "OS400" for compatibility reasons) and AIX, this fixes missing
+# PRId64 (and others) at compile time in C++ sources
+if(CMAKE_SYSTEM_NAME STREQUAL "OS400" OR CMAKE_SYSTEM_NAME STREQUAL "AIX")
+ string(APPEND CMAKE_CXX_FLAGS " -D__STDC_FORMAT_MACROS")
+endif()
+
+# Main executable is library plus main() function.
+add_executable(ninja src/ninja.cc)
+target_link_libraries(ninja PRIVATE libninja libninja-re2c)
+
+# Adds browse mode into the ninja binary if it's supported by the host platform.
+if(platform_supports_ninja_browse)
+ # Inlines src/browse.py into the browse_py.h header, so that it can be included
+ # by src/browse.cc
+ add_custom_command(
+ OUTPUT build/browse_py.h
+ MAIN_DEPENDENCY src/browse.py
+ DEPENDS src/inline.sh
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_BINARY_DIR}/build
+ COMMAND src/inline.sh kBrowsePy
+ < src/browse.py
+ > ${CMAKE_BINARY_DIR}/build/browse_py.h
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
+ VERBATIM
+ )
+
+ target_compile_definitions(ninja PRIVATE NINJA_HAVE_BROWSE)
+ target_sources(ninja PRIVATE src/browse.cc)
+ set_source_files_properties(src/browse.cc
+ PROPERTIES
+ OBJECT_DEPENDS "${CMAKE_BINARY_DIR}/build/browse_py.h"
+ INCLUDE_DIRECTORIES "${CMAKE_BINARY_DIR}"
+ COMPILE_DEFINITIONS NINJA_PYTHON="python"
+ )
+endif()
+
+include(CTest)
+if(BUILD_TESTING)
+ # Tests all build into ninja_test executable.
+ add_executable(ninja_test
+ src/build_log_test.cc
+ src/build_test.cc
+ src/clean_test.cc
+ src/clparser_test.cc
+ src/depfile_parser_test.cc
+ src/deps_log_test.cc
+ src/disk_interface_test.cc
+ src/dyndep_parser_test.cc
+ src/edit_distance_test.cc
+ src/graph_test.cc
+ src/lexer_test.cc
+ src/manifest_parser_test.cc
+ src/ninja_test.cc
+ src/state_test.cc
+ src/string_piece_util_test.cc
+ src/subprocess_test.cc
+ src/test.cc
+ src/util_test.cc
+ )
+ if(WIN32)
+ target_sources(ninja_test PRIVATE src/includes_normalize_test.cc src/msvc_helper_test.cc)
+ endif()
+ target_link_libraries(ninja_test PRIVATE libninja libninja-re2c)
+
+ foreach(perftest
+ build_log_perftest
+ canon_perftest
+ clparser_perftest
+ depfile_parser_perftest
+ hash_collision_bench
+ manifest_parser_perftest
+ )
+ add_executable(${perftest} src/${perftest}.cc)
+ target_link_libraries(${perftest} PRIVATE libninja libninja-re2c)
+ endforeach()
+
+ if(CMAKE_SYSTEM_NAME STREQUAL "AIX" AND CMAKE_SIZEOF_VOID_P EQUAL 4)
+ # These tests require more memory than will fit in the standard AIX shared stack/heap (256M)
+ target_link_libraries(hash_collision_bench PRIVATE "-Wl,-bmaxdata:0x80000000")
+ target_link_libraries(manifest_parser_perftest PRIVATE "-Wl,-bmaxdata:0x80000000")
+ endif()
+
+ add_test(NinjaTest ninja_test)
+endif()
+
+install(TARGETS ninja DESTINATION bin)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..be1fc02
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,34 @@
+# How to successfully make changes to Ninja
+
+We're very wary of changes that increase the complexity of Ninja (in particular,
+new build file syntax or command-line flags) or increase the maintenance burden
+of Ninja. Ninja is already successfully used by hundreds of developers for large
+projects and it already achieves (most of) the goals we set out for it to do.
+It's probably best to discuss new feature ideas on the
+[mailing list](https://groups.google.com/forum/#!forum/ninja-build) or in an
+issue before creating a PR.
+
+## Coding guidelines
+
+Generally it's the
+[Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html) with
+a few additions:
+
+* Any code merged into the Ninja codebase which will be part of the main
+ executable must compile as C++03. You may use C++11 features in a test or an
+ unimportant tool if you guard your code with `#if __cplusplus >= 201103L`.
+* We have used `using namespace std;` a lot in the past. For new contributions,
+ please try to avoid relying on it and instead whenever possible use `std::`.
+ However, please do not change existing code simply to add `std::` unless your
+ contribution already needs to change that line of code anyway.
+* All source files should have the Google Inc. license header.
+* Use `///` for [Doxygen](http://www.doxygen.nl/) (use `\a` to refer to
+ arguments).
+* It's not necessary to document each argument, especially when they're
+ relatively self-evident (e.g. in
+ `CanonicalizePath(string* path, string* err)`, the arguments are hopefully
+ obvious).
+
+If you're unsure about code formatting, please use
+[clang-format](https://clang.llvm.org/docs/ClangFormat.html). However, please do
+not format code that is not otherwise part of your contribution.
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..131cb1d
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2010
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..d11fd33
--- /dev/null
+++ b/README.md
@@ -0,0 +1,51 @@
+# Ninja
+
+Ninja is a small build system with a focus on speed.
+https://ninja-build.org/
+
+See [the manual](https://ninja-build.org/manual.html) or
+`doc/manual.asciidoc` included in the distribution for background
+and more details.
+
+Binaries for Linux, Mac, and Windows are available at
+ [GitHub](https://github.com/ninja-build/ninja/releases).
+Run `./ninja -h` for Ninja help.
+
+Installation is not necessary because the only required file is the
+resulting ninja binary. However, to enable features like Bash
+completion and Emacs and Vim editing modes, some files in misc/ must be
+copied to appropriate locations.
+
+If you're interested in making changes to Ninja, read
+[CONTRIBUTING.md](CONTRIBUTING.md) first.
+
+## Building Ninja itself
+
+You can either build Ninja via the custom generator script written in Python or
+via CMake. For more details see
+[the wiki](https://github.com/ninja-build/ninja/wiki).
+
+### Python
+
+```
+./configure.py --bootstrap
+```
+
+This will generate the `ninja` binary and a `build.ninja` file you can now use
+to build Ninja with itself.
+
+### CMake
+
+```
+cmake -Bbuild-cmake -H.
+cmake --build build-cmake
+```
+
+The `ninja` binary will now be inside the `build-cmake` directory (you can
+choose any other name you like).
+
+To run the unit tests:
+
+```
+./build-cmake/ninja_test
+```
diff --git a/RELEASING b/RELEASING
new file mode 100644
index 0000000..0b03341
--- /dev/null
+++ b/RELEASING
@@ -0,0 +1,33 @@
+Notes to myself on all the steps to make for a Ninja release.
+
+Push new release branch:
+1. Run afl-fuzz for a day or so and run ninja_test
+2. Consider sending a heads-up to the ninja-build mailing list first
+3. Make sure branches 'master' and 'release' are synced up locally
+4. Update src/version.cc with new version (with ".git"), then
+ git commit -am 'mark this 1.5.0.git'
+5. git checkout release; git merge master
+6. Fix version number in src/version.cc (it will likely conflict in the above)
+7. Fix version in doc/manual.asciidoc (exists only on release branch)
+8. commit, tag, push (don't forget to push --tags)
+ git commit -am v1.5.0; git push origin release
+ git tag v1.5.0; git push --tags
+ # Push the 1.5.0.git change on master too:
+ git checkout master; git push origin master
+9. Construct release notes from prior notes
+ credits: git shortlog -s --no-merges REV..
+
+Release on github:
+1. https://github.com/blog/1547-release-your-software
+ Add binaries to https://github.com/ninja-build/ninja/releases
+
+Make announcement on mailing list:
+1. copy old mail
+
+Update website:
+1. Make sure your ninja checkout is on the v1.5.0 tag
+2. Clone https://github.com/ninja-build/ninja-build.github.io
+3. In that repo, `./update-docs.sh`
+4. Update index.html with newest version and link to release notes
+5. git commit -m 'run update-docs.sh, 1.5.0 release'
+6. git push origin master
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 0000000..f0b92b8
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,61 @@
+version: 1.0.{build}
+image:
+ - Visual Studio 2017
+ - Ubuntu1804
+
+environment:
+ CLICOLOR_FORCE: 1
+ CHERE_INVOKING: 1 # Tell Bash to inherit the current working directory
+ matrix:
+ - MSYSTEM: MINGW64
+ - MSYSTEM: MSVC
+ - MSYSTEM: LINUX
+
+matrix:
+ exclude:
+ - image: Visual Studio 2017
+ MSYSTEM: LINUX
+ - image: Ubuntu1804
+ MSYSTEM: MINGW64
+ - image: Ubuntu1804
+ MSYSTEM: MSVC
+
+for:
+ -
+ matrix:
+ only:
+ - MSYSTEM: MINGW64
+ build_script:
+ ps: "C:\\msys64\\usr\\bin\\bash -lc @\"\n
+ pacman -S --quiet --noconfirm --needed re2c 2>&1\n
+ ./configure.py --bootstrap --platform mingw 2>&1\n
+ ./ninja all\n
+ ./ninja_test 2>&1\n
+ ./misc/ninja_syntax_test.py 2>&1\n\"@"
+ -
+ matrix:
+ only:
+ - MSYSTEM: MSVC
+ build_script:
+ - cmd: >-
+ call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvars64.bat"
+
+ python configure.py --bootstrap
+
+ ninja.bootstrap.exe all
+
+ ninja_test
+
+ python misc/ninja_syntax_test.py
+
+ - matrix:
+ only:
+ - image: Ubuntu1804
+ build_script:
+ - ./configure.py --bootstrap
+ - ./ninja all
+ - ./ninja_test
+ - misc/ninja_syntax_test.py
+ - misc/output_test.py
+
+test: off
diff --git a/configure.py b/configure.py
new file mode 100755
index 0000000..cded265
--- /dev/null
+++ b/configure.py
@@ -0,0 +1,714 @@
+#!/usr/bin/env python
+#
+# Copyright 2001 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Script that generates the build.ninja for ninja itself.
+
+Projects that use ninja themselves should either write a similar script
+or use a meta-build system that supports Ninja output."""
+
+from __future__ import print_function
+
+from optparse import OptionParser
+import os
+import pipes
+import string
+import subprocess
+import sys
+
+sourcedir = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, os.path.join(sourcedir, 'misc'))
+import ninja_syntax
+
+
+class Platform(object):
+ """Represents a host/target platform and its specific build attributes."""
+ def __init__(self, platform):
+ self._platform = platform
+ if self._platform is not None:
+ return
+ self._platform = sys.platform
+ if self._platform.startswith('linux'):
+ self._platform = 'linux'
+ elif self._platform.startswith('freebsd'):
+ self._platform = 'freebsd'
+ elif self._platform.startswith('gnukfreebsd'):
+ self._platform = 'freebsd'
+ elif self._platform.startswith('openbsd'):
+ self._platform = 'openbsd'
+ elif self._platform.startswith('solaris') or self._platform == 'sunos5':
+ self._platform = 'solaris'
+ elif self._platform.startswith('mingw'):
+ self._platform = 'mingw'
+ elif self._platform.startswith('win'):
+ self._platform = 'msvc'
+ elif self._platform.startswith('bitrig'):
+ self._platform = 'bitrig'
+ elif self._platform.startswith('netbsd'):
+ self._platform = 'netbsd'
+ elif self._platform.startswith('aix'):
+ self._platform = 'aix'
+ elif self._platform.startswith('os400'):
+ self._platform = 'os400'
+ elif self._platform.startswith('dragonfly'):
+ self._platform = 'dragonfly'
+
+ @staticmethod
+ def known_platforms():
+ return ['linux', 'darwin', 'freebsd', 'openbsd', 'solaris', 'sunos5',
+ 'mingw', 'msvc', 'gnukfreebsd', 'bitrig', 'netbsd', 'aix',
+ 'dragonfly']
+
+ def platform(self):
+ return self._platform
+
+ def is_linux(self):
+ return self._platform == 'linux'
+
+ def is_mingw(self):
+ return self._platform == 'mingw'
+
+ def is_msvc(self):
+ return self._platform == 'msvc'
+
+ def msvc_needs_fs(self):
+ popen = subprocess.Popen(['cl', '/nologo', '/?'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = popen.communicate()
+ return b'/FS' in out
+
+ def is_windows(self):
+ return self.is_mingw() or self.is_msvc()
+
+ def is_solaris(self):
+ return self._platform == 'solaris'
+
+ def is_aix(self):
+ return self._platform == 'aix'
+
+ def is_os400_pase(self):
+ return self._platform == 'os400' or os.uname().sysname.startswith('OS400')
+
+ def uses_usr_local(self):
+ return self._platform in ('freebsd', 'openbsd', 'bitrig', 'dragonfly', 'netbsd')
+
+ def supports_ppoll(self):
+ return self._platform in ('freebsd', 'linux', 'openbsd', 'bitrig',
+ 'dragonfly')
+
+ def supports_ninja_browse(self):
+ return (not self.is_windows()
+ and not self.is_solaris()
+ and not self.is_aix())
+
+ def can_rebuild_in_place(self):
+ return not (self.is_windows() or self.is_aix())
+
+class Bootstrap:
+ """API shim for ninja_syntax.Writer that instead runs the commands.
+
+ Used to bootstrap Ninja from scratch. In --bootstrap mode this
+ class is used to execute all the commands to build an executable.
+ It also proxies all calls to an underlying ninja_syntax.Writer, to
+ behave like non-bootstrap mode.
+ """
+ def __init__(self, writer, verbose=False):
+ self.writer = writer
+ self.verbose = verbose
+ # Map of variable name => expanded variable value.
+ self.vars = {}
+ # Map of rule name => dict of rule attributes.
+ self.rules = {
+ 'phony': {}
+ }
+
+ def comment(self, text):
+ return self.writer.comment(text)
+
+ def newline(self):
+ return self.writer.newline()
+
+ def variable(self, key, val):
+ # In bootstrap mode, we have no ninja process to catch /showIncludes
+ # output.
+ self.vars[key] = self._expand(val).replace('/showIncludes', '')
+ return self.writer.variable(key, val)
+
+ def rule(self, name, **kwargs):
+ self.rules[name] = kwargs
+ return self.writer.rule(name, **kwargs)
+
+ def build(self, outputs, rule, inputs=None, **kwargs):
+ ruleattr = self.rules[rule]
+ cmd = ruleattr.get('command')
+ if cmd is None: # A phony rule, for example.
+ return
+
+ # Implement just enough of Ninja variable expansion etc. to
+ # make the bootstrap build work.
+ local_vars = {
+ 'in': self._expand_paths(inputs),
+ 'out': self._expand_paths(outputs)
+ }
+ for key, val in kwargs.get('variables', []):
+ local_vars[key] = ' '.join(ninja_syntax.as_list(val))
+
+ self._run_command(self._expand(cmd, local_vars))
+
+ return self.writer.build(outputs, rule, inputs, **kwargs)
+
+ def default(self, paths):
+ return self.writer.default(paths)
+
+ def _expand_paths(self, paths):
+ """Expand $vars in an array of paths, e.g. from a 'build' block."""
+ paths = ninja_syntax.as_list(paths)
+ return ' '.join(map(self._shell_escape, (map(self._expand, paths))))
+
+ def _expand(self, str, local_vars={}):
+ """Expand $vars in a string."""
+ return ninja_syntax.expand(str, self.vars, local_vars)
+
+ def _shell_escape(self, path):
+ """Quote paths containing spaces."""
+ return '"%s"' % path if ' ' in path else path
+
+ def _run_command(self, cmdline):
+ """Run a subcommand, quietly. Prints the full command on error."""
+ try:
+ if self.verbose:
+ print(cmdline)
+ subprocess.check_call(cmdline, shell=True)
+ except subprocess.CalledProcessError:
+ print('when running: ', cmdline)
+ raise
+
+
+parser = OptionParser()
+profilers = ['gmon', 'pprof']
+parser.add_option('--bootstrap', action='store_true',
+ help='bootstrap a ninja binary from nothing')
+parser.add_option('--verbose', action='store_true',
+ help='enable verbose build')
+parser.add_option('--platform',
+ help='target platform (' +
+ '/'.join(Platform.known_platforms()) + ')',
+ choices=Platform.known_platforms())
+parser.add_option('--host',
+ help='host platform (' +
+ '/'.join(Platform.known_platforms()) + ')',
+ choices=Platform.known_platforms())
+parser.add_option('--debug', action='store_true',
+ help='enable debugging extras',)
+parser.add_option('--profile', metavar='TYPE',
+ choices=profilers,
+ help='enable profiling (' + '/'.join(profilers) + ')',)
+parser.add_option('--with-gtest', metavar='PATH', help='ignored')
+parser.add_option('--with-python', metavar='EXE',
+ help='use EXE as the Python interpreter',
+ default=os.path.basename(sys.executable))
+parser.add_option('--force-pselect', action='store_true',
+ help='ppoll() is used by default where available, '
+ 'but some platforms may need to use pselect instead',)
+(options, args) = parser.parse_args()
+if args:
+ print('ERROR: extra unparsed command-line arguments:', args)
+ sys.exit(1)
+
+platform = Platform(options.platform)
+if options.host:
+ host = Platform(options.host)
+else:
+ host = platform
+
+BUILD_FILENAME = 'build.ninja'
+ninja_writer = ninja_syntax.Writer(open(BUILD_FILENAME, 'w'))
+n = ninja_writer
+
+if options.bootstrap:
+ # Make the build directory.
+ try:
+ os.mkdir('build')
+ except OSError:
+ pass
+ # Wrap ninja_writer with the Bootstrapper, which also executes the
+ # commands.
+ print('bootstrapping ninja...')
+ n = Bootstrap(n, verbose=options.verbose)
+
+n.comment('This file is used to build ninja itself.')
+n.comment('It is generated by ' + os.path.basename(__file__) + '.')
+n.newline()
+
+n.variable('ninja_required_version', '1.3')
+n.newline()
+
+n.comment('The arguments passed to configure.py, for rerunning it.')
+configure_args = sys.argv[1:]
+if '--bootstrap' in configure_args:
+ configure_args.remove('--bootstrap')
+n.variable('configure_args', ' '.join(configure_args))
+env_keys = set(['CXX', 'AR', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS'])
+configure_env = dict((k, os.environ[k]) for k in os.environ if k in env_keys)
+if configure_env:
+ config_str = ' '.join([k + '=' + pipes.quote(configure_env[k])
+ for k in configure_env])
+ n.variable('configure_env', config_str + '$ ')
+n.newline()
+
+CXX = configure_env.get('CXX', 'c++')
+objext = '.o'
+if platform.is_msvc():
+ CXX = 'cl'
+ objext = '.obj'
+
+def src(filename):
+ return os.path.join('$root', 'src', filename)
+def built(filename):
+ return os.path.join('$builddir', filename)
+def doc(filename):
+ return os.path.join('$root', 'doc', filename)
+def cc(name, **kwargs):
+ return n.build(built(name + objext), 'cxx', src(name + '.c'), **kwargs)
+def cxx(name, **kwargs):
+ return n.build(built(name + objext), 'cxx', src(name + '.cc'), **kwargs)
+def binary(name):
+ if platform.is_windows():
+ exe = name + '.exe'
+ n.build(name, 'phony', exe)
+ return exe
+ return name
+
+root = sourcedir
+if root == os.getcwd():
+ # In the common case where we're building directly in the source
+ # tree, simplify all the paths to just be cwd-relative.
+ root = '.'
+n.variable('root', root)
+n.variable('builddir', 'build')
+n.variable('cxx', CXX)
+if platform.is_msvc():
+ n.variable('ar', 'link')
+else:
+ n.variable('ar', configure_env.get('AR', 'ar'))
+
+if platform.is_msvc():
+ cflags = ['/showIncludes',
+ '/nologo', # Don't print startup banner.
+ '/Zi', # Create pdb with debug info.
+ '/W4', # Highest warning level.
+ '/WX', # Warnings as errors.
+ '/wd4530', '/wd4100', '/wd4706', '/wd4244',
+ '/wd4512', '/wd4800', '/wd4702', '/wd4819',
+ # Disable warnings about constant conditional expressions.
+ '/wd4127',
+ # Disable warnings about passing "this" during initialization.
+ '/wd4355',
+ # Disable warnings about ignored typedef in DbgHelp.h
+ '/wd4091',
+ '/GR-', # Disable RTTI.
+ # Disable size_t -> int truncation warning.
+ # We never have strings or arrays larger than 2**31.
+ '/wd4267',
+ '/DNOMINMAX', '/D_CRT_SECURE_NO_WARNINGS',
+ '/D_HAS_EXCEPTIONS=0',
+ '/DNINJA_PYTHON="%s"' % options.with_python]
+ if platform.msvc_needs_fs():
+ cflags.append('/FS')
+ ldflags = ['/DEBUG', '/libpath:$builddir']
+ if not options.debug:
+ cflags += ['/Ox', '/DNDEBUG', '/GL']
+ ldflags += ['/LTCG', '/OPT:REF', '/OPT:ICF']
+else:
+ cflags = ['-g', '-Wall', '-Wextra',
+ '-Wno-deprecated',
+ '-Wno-missing-field-initializers',
+ '-Wno-unused-parameter',
+ '-fno-rtti',
+ '-fno-exceptions',
+ '-fvisibility=hidden', '-pipe',
+ '-DNINJA_PYTHON="%s"' % options.with_python]
+ if options.debug:
+ cflags += ['-D_GLIBCXX_DEBUG', '-D_GLIBCXX_DEBUG_PEDANTIC']
+ cflags.remove('-fno-rtti') # Needed for above pedanticness.
+ else:
+ cflags += ['-O2', '-DNDEBUG']
+ try:
+ proc = subprocess.Popen(
+ [CXX, '-fdiagnostics-color', '-c', '-x', 'c++', '/dev/null',
+ '-o', '/dev/null'],
+ stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT)
+ if proc.wait() == 0:
+ cflags += ['-fdiagnostics-color']
+ except:
+ pass
+ if platform.is_mingw():
+ cflags += ['-D_WIN32_WINNT=0x0601', '-D__USE_MINGW_ANSI_STDIO=1']
+ ldflags = ['-L$builddir']
+ if platform.uses_usr_local():
+ cflags.append('-I/usr/local/include')
+ ldflags.append('-L/usr/local/lib')
+ if platform.is_aix():
+ # printf formats for int64_t, uint64_t; large file support
+ cflags.append('-D__STDC_FORMAT_MACROS')
+ cflags.append('-D_LARGE_FILES')
+
+
+libs = []
+
+if platform.is_mingw():
+ cflags.remove('-fvisibility=hidden');
+ ldflags.append('-static')
+elif platform.is_solaris():
+ cflags.remove('-fvisibility=hidden')
+elif platform.is_aix():
+ cflags.remove('-fvisibility=hidden')
+elif platform.is_msvc():
+ pass
+else:
+ if options.profile == 'gmon':
+ cflags.append('-pg')
+ ldflags.append('-pg')
+ elif options.profile == 'pprof':
+ cflags.append('-fno-omit-frame-pointer')
+ libs.extend(['-Wl,--no-as-needed', '-lprofiler'])
+
+if platform.supports_ppoll() and not options.force_pselect:
+ cflags.append('-DUSE_PPOLL')
+if platform.supports_ninja_browse():
+ cflags.append('-DNINJA_HAVE_BROWSE')
+
+# Search for generated headers relative to build dir.
+cflags.append('-I.')
+
+def shell_escape(str):
+ """Escape str such that it's interpreted as a single argument by
+ the shell."""
+
+ # This isn't complete, but it's just enough to make NINJA_PYTHON work.
+ if platform.is_windows():
+ return str
+ if '"' in str:
+ return "'%s'" % str.replace("'", "\\'")
+ return str
+
+if 'CFLAGS' in configure_env:
+ cflags.append(configure_env['CFLAGS'])
+ ldflags.append(configure_env['CFLAGS'])
+if 'CXXFLAGS' in configure_env:
+ cflags.append(configure_env['CXXFLAGS'])
+ ldflags.append(configure_env['CXXFLAGS'])
+n.variable('cflags', ' '.join(shell_escape(flag) for flag in cflags))
+if 'LDFLAGS' in configure_env:
+ ldflags.append(configure_env['LDFLAGS'])
+n.variable('ldflags', ' '.join(shell_escape(flag) for flag in ldflags))
+n.newline()
+
+if platform.is_msvc():
+ n.rule('cxx',
+ command='$cxx $cflags -c $in /Fo$out /Fd' + built('$pdb'),
+ description='CXX $out',
+ deps='msvc' # /showIncludes is included in $cflags.
+ )
+else:
+ n.rule('cxx',
+ command='$cxx -MMD -MT $out -MF $out.d $cflags -c $in -o $out',
+ depfile='$out.d',
+ deps='gcc',
+ description='CXX $out')
+n.newline()
+
+if host.is_msvc():
+ n.rule('ar',
+ command='lib /nologo /ltcg /out:$out $in',
+ description='LIB $out')
+elif host.is_mingw():
+ n.rule('ar',
+ command='$ar crs $out $in',
+ description='AR $out')
+else:
+ n.rule('ar',
+ command='rm -f $out && $ar crs $out $in',
+ description='AR $out')
+n.newline()
+
+if platform.is_msvc():
+ n.rule('link',
+ command='$cxx $in $libs /nologo /link $ldflags /out:$out',
+ description='LINK $out')
+else:
+ n.rule('link',
+ command='$cxx $ldflags -o $out $in $libs',
+ description='LINK $out')
+n.newline()
+
+objs = []
+
+if platform.supports_ninja_browse():
+ n.comment('browse_py.h is used to inline browse.py.')
+ n.rule('inline',
+ command='"%s"' % src('inline.sh') + ' $varname < $in > $out',
+ description='INLINE $out')
+ n.build(built('browse_py.h'), 'inline', src('browse.py'),
+ implicit=src('inline.sh'),
+ variables=[('varname', 'kBrowsePy')])
+ n.newline()
+
+ objs += cxx('browse', order_only=built('browse_py.h'))
+ n.newline()
+
+n.comment('the depfile parser and ninja lexers are generated using re2c.')
+def has_re2c():
+ try:
+ proc = subprocess.Popen(['re2c', '-V'], stdout=subprocess.PIPE)
+ return int(proc.communicate()[0], 10) >= 1103
+ except OSError:
+ return False
+if has_re2c():
+ n.rule('re2c',
+ command='re2c -b -i --no-generation-date -o $out $in',
+ description='RE2C $out')
+ # Generate the .cc files in the source directory so we can check them in.
+ n.build(src('depfile_parser.cc'), 're2c', src('depfile_parser.in.cc'))
+ n.build(src('lexer.cc'), 're2c', src('lexer.in.cc'))
+else:
+ print("warning: A compatible version of re2c (>= 0.11.3) was not found; "
+ "changes to src/*.in.cc will not affect your build.")
+n.newline()
+
+n.comment('Core source files all build into ninja library.')
+cxxvariables = []
+if platform.is_msvc():
+ cxxvariables = [('pdb', 'ninja.pdb')]
+for name in ['build',
+ 'build_log',
+ 'clean',
+ 'clparser',
+ 'debug_flags',
+ 'depfile_parser',
+ 'deps_log',
+ 'disk_interface',
+ 'dyndep',
+ 'dyndep_parser',
+ 'edit_distance',
+ 'eval_env',
+ 'graph',
+ 'graphviz',
+ 'lexer',
+ 'line_printer',
+ 'manifest_parser',
+ 'metrics',
+ 'parser',
+ 'state',
+ 'string_piece_util',
+ 'util',
+ 'version']:
+ objs += cxx(name, variables=cxxvariables)
+if platform.is_windows():
+ for name in ['subprocess-win32',
+ 'includes_normalize-win32',
+ 'msvc_helper-win32',
+ 'msvc_helper_main-win32']:
+ objs += cxx(name, variables=cxxvariables)
+ if platform.is_msvc():
+ objs += cxx('minidump-win32', variables=cxxvariables)
+ objs += cc('getopt')
+else:
+ objs += cxx('subprocess-posix')
+if platform.is_aix():
+ objs += cc('getopt')
+if platform.is_msvc():
+ ninja_lib = n.build(built('ninja.lib'), 'ar', objs)
+else:
+ ninja_lib = n.build(built('libninja.a'), 'ar', objs)
+n.newline()
+
+if platform.is_msvc():
+ libs.append('ninja.lib')
+else:
+ libs.append('-lninja')
+
+if platform.is_aix() and not platform.is_os400_pase():
+ libs.append('-lperfstat')
+
+all_targets = []
+
+n.comment('Main executable is library plus main() function.')
+objs = cxx('ninja', variables=cxxvariables)
+ninja = n.build(binary('ninja'), 'link', objs, implicit=ninja_lib,
+ variables=[('libs', libs)])
+n.newline()
+all_targets += ninja
+
+if options.bootstrap:
+ # We've built the ninja binary. Don't run any more commands
+ # through the bootstrap executor, but continue writing the
+ # build.ninja file.
+ n = ninja_writer
+
+n.comment('Tests all build into ninja_test executable.')
+
+objs = []
+if platform.is_msvc():
+ cxxvariables = [('pdb', 'ninja_test.pdb')]
+
+for name in ['build_log_test',
+ 'build_test',
+ 'clean_test',
+ 'clparser_test',
+ 'depfile_parser_test',
+ 'deps_log_test',
+ 'dyndep_parser_test',
+ 'disk_interface_test',
+ 'edit_distance_test',
+ 'graph_test',
+ 'lexer_test',
+ 'manifest_parser_test',
+ 'ninja_test',
+ 'state_test',
+ 'string_piece_util_test',
+ 'subprocess_test',
+ 'test',
+ 'util_test']:
+ objs += cxx(name, variables=cxxvariables)
+if platform.is_windows():
+ for name in ['includes_normalize_test', 'msvc_helper_test']:
+ objs += cxx(name, variables=cxxvariables)
+
+ninja_test = n.build(binary('ninja_test'), 'link', objs, implicit=ninja_lib,
+ variables=[('libs', libs)])
+n.newline()
+all_targets += ninja_test
+
+
+n.comment('Ancillary executables.')
+
+if platform.is_aix() and '-maix64' not in ldflags:
+ # Both hash_collision_bench and manifest_parser_perftest require more
+ # memory than will fit in the standard 32-bit AIX shared stack/heap (256M)
+ libs.append('-Wl,-bmaxdata:0x80000000')
+
+for name in ['build_log_perftest',
+ 'canon_perftest',
+ 'depfile_parser_perftest',
+ 'hash_collision_bench',
+ 'manifest_parser_perftest',
+ 'clparser_perftest']:
+ if platform.is_msvc():
+ cxxvariables = [('pdb', name + '.pdb')]
+ objs = cxx(name, variables=cxxvariables)
+ all_targets += n.build(binary(name), 'link', objs,
+ implicit=ninja_lib, variables=[('libs', libs)])
+
+n.newline()
+
+n.comment('Generate a graph using the "graph" tool.')
+n.rule('gendot',
+ command='./ninja -t graph all > $out')
+n.rule('gengraph',
+ command='dot -Tpng $in > $out')
+dot = n.build(built('graph.dot'), 'gendot', ['ninja', 'build.ninja'])
+n.build('graph.png', 'gengraph', dot)
+n.newline()
+
+n.comment('Generate the manual using asciidoc.')
+n.rule('asciidoc',
+ command='asciidoc -b docbook -d book -o $out $in',
+ description='ASCIIDOC $out')
+n.rule('xsltproc',
+ command='xsltproc --nonet doc/docbook.xsl $in > $out',
+ description='XSLTPROC $out')
+docbookxml = n.build(built('manual.xml'), 'asciidoc', doc('manual.asciidoc'))
+manual = n.build(doc('manual.html'), 'xsltproc', docbookxml,
+ implicit=[doc('style.css'), doc('docbook.xsl')])
+n.build('manual', 'phony',
+ order_only=manual)
+n.newline()
+
+n.rule('dblatex',
+ command='dblatex -q -o $out -p doc/dblatex.xsl $in',
+ description='DBLATEX $out')
+n.build(doc('manual.pdf'), 'dblatex', docbookxml,
+ implicit=[doc('dblatex.xsl')])
+
+n.comment('Generate Doxygen.')
+n.rule('doxygen',
+ command='doxygen $in',
+ description='DOXYGEN $in')
+n.variable('doxygen_mainpage_generator',
+ src('gen_doxygen_mainpage.sh'))
+n.rule('doxygen_mainpage',
+ command='$doxygen_mainpage_generator $in > $out',
+ description='DOXYGEN_MAINPAGE $out')
+mainpage = n.build(built('doxygen_mainpage'), 'doxygen_mainpage',
+ ['README.md', 'COPYING'],
+ implicit=['$doxygen_mainpage_generator'])
+n.build('doxygen', 'doxygen', doc('doxygen.config'),
+ implicit=mainpage)
+n.newline()
+
+if not host.is_mingw():
+ n.comment('Regenerate build files if build script changes.')
+ n.rule('configure',
+ command='${configure_env}%s $root/configure.py $configure_args' %
+ options.with_python,
+ generator=True)
+ n.build('build.ninja', 'configure',
+ implicit=['$root/configure.py',
+ os.path.normpath('$root/misc/ninja_syntax.py')])
+ n.newline()
+
+n.default(ninja)
+n.newline()
+
+if host.is_linux():
+ n.comment('Packaging')
+ n.rule('rpmbuild',
+ command="misc/packaging/rpmbuild.sh",
+ description='Building rpms..')
+ n.build('rpm', 'rpmbuild')
+ n.newline()
+
+n.build('all', 'phony', all_targets)
+
+n.close()
+print('wrote %s.' % BUILD_FILENAME)
+
+if options.bootstrap:
+ print('bootstrap complete. rebuilding...')
+
+ rebuild_args = []
+
+ if platform.can_rebuild_in_place():
+ rebuild_args.append('./ninja')
+ else:
+ if platform.is_windows():
+ bootstrap_exe = 'ninja.bootstrap.exe'
+ final_exe = 'ninja.exe'
+ else:
+ bootstrap_exe = './ninja.bootstrap'
+ final_exe = './ninja'
+
+ if os.path.exists(bootstrap_exe):
+ os.unlink(bootstrap_exe)
+ os.rename(final_exe, bootstrap_exe)
+
+ rebuild_args.append(bootstrap_exe)
+
+ if options.verbose:
+ rebuild_args.append('-v')
+
+ subprocess.check_call(rebuild_args)
diff --git a/doc/README.md b/doc/README.md
new file mode 100644
index 0000000..6afe5d4
--- /dev/null
+++ b/doc/README.md
@@ -0,0 +1,11 @@
+This directory contains the Ninja manual and support files used in
+building it. Here's a brief overview of how it works.
+
+The source text, `manual.asciidoc`, is written in the AsciiDoc format.
+AsciiDoc can generate HTML but it doesn't look great; instead, we use
+AsciiDoc to generate the Docbook XML format and then provide our own
+Docbook XSL tweaks to produce HTML from that.
+
+In theory using AsciiDoc and DocBook allows us to produce nice PDF
+documentation etc. In reality it's not clear anyone wants that, but the
+build rules are in place to generate it if you install dblatex.
diff --git a/doc/dblatex.xsl b/doc/dblatex.xsl
new file mode 100644
index 0000000..c0da212
--- /dev/null
+++ b/doc/dblatex.xsl
@@ -0,0 +1,7 @@
+<!-- This custom XSL tweaks the dblatex XML settings. -->
+<xsl:stylesheet xmlns:xsl='http://www.w3.org/1999/XSL/Transform' version='1.0'>
+ <!-- These parameters disable the list of collaborators and revisions.
+ Together remove a useless page from the front matter. -->
+ <xsl:param name='doc.collab.show'>0</xsl:param>
+ <xsl:param name='latex.output.revhistory'>0</xsl:param>
+</xsl:stylesheet>
diff --git a/doc/docbook.xsl b/doc/docbook.xsl
new file mode 100644
index 0000000..2235be2
--- /dev/null
+++ b/doc/docbook.xsl
@@ -0,0 +1,34 @@
+<!-- This custom XSL tweaks the DocBook XML -> HTML settings to produce
+ an OK-looking manual. -->
+<!DOCTYPE xsl:stylesheet [
+<!ENTITY css SYSTEM "style.css">
+]>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version='1.0'>
+ <xsl:import href="http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl"/>
+
+ <!-- Embed our stylesheet as the user-provided <head> content. -->
+ <xsl:template name="user.head.content"><style>&css;</style></xsl:template>
+
+ <!-- Remove the body.attributes block, which specifies a bunch of
+ useless bgcolor etc. attrs on the <body> tag. -->
+ <xsl:template name="body.attributes"></xsl:template>
+
+ <!-- Specify that in "book" form (which we're using), we only want a
+ single table of contents at the beginning of the document. -->
+ <xsl:param name="generate.toc">book toc</xsl:param>
+
+ <!-- Don't put the "Chapter 1." prefix on the "chapters". -->
+ <xsl:param name="chapter.autolabel">0</xsl:param>
+
+ <!-- Make builds reproducible by generating the same IDs from the same inputs -->
+ <xsl:param name="generate.consistent.ids">1</xsl:param>
+
+ <!-- Use <ul> for the table of contents. By default DocBook uses a
+ <dl>, which makes no semantic sense. I imagine they just did
+ it because it looks nice? -->
+ <xsl:param name="toc.list.type">ul</xsl:param>
+
+ <xsl:output method="html" encoding="utf-8" indent="no"
+ doctype-public=""/>
+</xsl:stylesheet>
diff --git a/doc/doxygen.config b/doc/doxygen.config
new file mode 100644
index 0000000..d933021
--- /dev/null
+++ b/doc/doxygen.config
@@ -0,0 +1,1250 @@
+# Doxyfile 1.4.5
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = "Ninja"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+# PROJECT_NUMBER = "0"
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = "doc/doxygen/"
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish,
+# Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese,
+# Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish,
+# Swedish, and Ukrainian.
+
+OUTPUT_LANGUAGE = English
+
+# This tag can be used to specify the encoding used in the generated output.
+# The encoding is not always determined by the language that is chosen,
+# but also whether or not the output is meant for Windows or non-Windows users.
+# In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES
+# forces the Windows encoding (this is the default for the Windows binary),
+# whereas setting the tag to NO uses a Unix-style encoding (the default for
+# all platforms other than Windows).
+
+# Obsolet option.
+#USE_WINDOWS_ENCODING = YES
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH = src
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH = src/
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like the Qt-style comments (thus requiring an
+# explicit @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF = YES
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the DETAILS_AT_TOP tag is set to YES then Doxygen
+# will output the detailed description near the top, like JavaDoc.
+# If set to NO, the detailed description appears after the member
+# documentation.
+
+# Has become obsolete.
+#DETAILS_AT_TOP = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 2
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for Java.
+# For instance, namespaces will be presented as packages, qualified scopes
+# will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to
+# include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+# BUILTIN_STL_SUPPORT = NO
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = YES
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = YES
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is YES.
+
+SHOW_DIRECTORIES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from the
+# version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text "
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = src \
+ build/doxygen_mainpage
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py
+
+FILE_PATTERNS = *.cc \
+ *.h
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH = src
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS = *.cpp \
+ *.cc \
+ *.h \
+ *.hh \
+ INSTALL DEPENDENCIES CHANGELOG LICENSE LGPL
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = YES
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH = src
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output. If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = YES
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = NO
+
+# If the REFERENCED_BY_RELATION tag is set to YES (the default)
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES (the default)
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 2
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+HTML_HEADER =
+
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+,
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are
+# probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = YES
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME =
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME =
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = YES
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader. This is useful
+# if you want to understand what is going on. On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = YES
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE = doc/doxygen/html/Ninja.TAGFILE
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = YES
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = YES
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = NO
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+# UML_LOOK = YES
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will
+# generate a call dependency graph for every global function or class method.
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+# Obsolet option.
+#MAX_DOT_GRAPH_WIDTH = 1280
+
+# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+# Obsolet option.
+#MAX_DOT_GRAPH_HEIGHT = 1024
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that a graph may be further truncated if the graph's
+# image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH
+# and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default),
+# the graph is not depth-constrained.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, which results in a white background.
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+# JW
+# DOT_MULTI_TARGETS = NO
+DOT_MULTI_TARGETS = YES
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be
+# used. If set to NO the values of all tags below this one will be ignored.
+
+# JW SEARCHENGINE = NO
+SEARCHENGINE = YES
diff --git a/doc/manual.asciidoc b/doc/manual.asciidoc
new file mode 100644
index 0000000..388410f
--- /dev/null
+++ b/doc/manual.asciidoc
@@ -0,0 +1,1173 @@
+The Ninja build system
+======================
+v1.10.2, Nov 2020
+
+
+Introduction
+------------
+
+Ninja is yet another build system. It takes as input the
+interdependencies of files (typically source code and output
+executables) and orchestrates building them, _quickly_.
+
+Ninja joins a sea of other build systems. Its distinguishing goal is
+to be fast. It is born from
+http://neugierig.org/software/chromium/notes/2011/02/ninja.html[my
+work on the Chromium browser project], which has over 30,000 source
+files and whose other build systems (including one built from custom
+non-recursive Makefiles) would take ten seconds to start building
+after changing one file. Ninja is under a second.
+
+Philosophical overview
+~~~~~~~~~~~~~~~~~~~~~~
+
+Where other build systems are high-level languages, Ninja aims to be
+an assembler.
+
+Build systems get slow when they need to make decisions. When you are
+in a edit-compile cycle you want it to be as fast as possible -- you
+want the build system to do the minimum work necessary to figure out
+what needs to be built immediately.
+
+Ninja contains the barest functionality necessary to describe
+arbitrary dependency graphs. Its lack of syntax makes it impossible
+to express complex decisions.
+
+Instead, Ninja is intended to be used with a separate program
+generating its input files. The generator program (like the
+`./configure` found in autotools projects) can analyze system
+dependencies and make as many decisions as possible up front so that
+incremental builds stay fast. Going beyond autotools, even build-time
+decisions like "which compiler flags should I use?" or "should I
+build a debug or release-mode binary?" belong in the `.ninja` file
+generator.
+
+Design goals
+~~~~~~~~~~~~
+
+Here are the design goals of Ninja:
+
+* very fast (i.e., instant) incremental builds, even for very large
+ projects.
+
+* very little policy about how code is built. Different projects and
+ higher-level build systems have different opinions about how code
+ should be built; for example, should built objects live alongside
+ the sources or should all build output go into a separate directory?
+ Is there a "package" rule that builds a distributable package of
+ the project? Sidestep these decisions by trying to allow either to
+ be implemented, rather than choosing, even if that results in
+ more verbosity.
+
+* get dependencies correct, and in particular situations that are
+ difficult to get right with Makefiles (e.g. outputs need an implicit
+ dependency on the command line used to generate them; to build C
+ source code you need to use gcc's `-M` flags for header
+ dependencies).
+
+* when convenience and speed are in conflict, prefer speed.
+
+Some explicit _non-goals_:
+
+* convenient syntax for writing build files by hand. _You should
+ generate your ninja files using another program_. This is how we
+ can sidestep many policy decisions.
+
+* built-in rules. _Out of the box, Ninja has no rules for
+ e.g. compiling C code._
+
+* build-time customization of the build. _Options belong in
+ the program that generates the ninja files_.
+
+* build-time decision-making ability such as conditionals or search
+ paths. _Making decisions is slow._
+
+To restate, Ninja is faster than other build systems because it is
+painfully simple. You must tell Ninja exactly what to do when you
+create your project's `.ninja` files.
+
+Comparison to Make
+~~~~~~~~~~~~~~~~~~
+
+Ninja is closest in spirit and functionality to Make, relying on
+simple dependencies between file timestamps.
+
+But fundamentally, make has a lot of _features_: suffix rules,
+functions, built-in rules that e.g. search for RCS files when building
+source. Make's language was designed to be written by humans. Many
+projects find make alone adequate for their build problems.
+
+In contrast, Ninja has almost no features; just those necessary to get
+builds correct while punting most complexity to generation of the
+ninja input files. Ninja by itself is unlikely to be useful for most
+projects.
+
+Here are some of the features Ninja adds to Make. (These sorts of
+features can often be implemented using more complicated Makefiles,
+but they are not part of make itself.)
+
+* Ninja has special support for discovering extra dependencies at build
+ time, making it easy to get <<ref_headers,header dependencies>>
+ correct for C/C++ code.
+
+* A build edge may have multiple outputs.
+
+* Outputs implicitly depend on the command line that was used to generate
+ them, which means that changing e.g. compilation flags will cause
+ the outputs to rebuild.
+
+* Output directories are always implicitly created before running the
+ command that relies on them.
+
+* Rules can provide shorter descriptions of the command being run, so
+ you can print e.g. `CC foo.o` instead of a long command line while
+ building.
+
+* Builds are always run in parallel, based by default on the number of
+ CPUs your system has. Underspecified build dependencies will result
+ in incorrect builds.
+
+* Command output is always buffered. This means commands running in
+ parallel don't interleave their output, and when a command fails we
+ can print its failure output next to the full command line that
+ produced the failure.
+
+
+Using Ninja for your project
+----------------------------
+
+Ninja currently works on Unix-like systems and Windows. It's seen the
+most testing on Linux (and has the best performance there) but it runs
+fine on Mac OS X and FreeBSD.
+
+If your project is small, Ninja's speed impact is likely unnoticeable.
+(However, even for small projects it sometimes turns out that Ninja's
+limited syntax forces simpler build rules that result in faster
+builds.) Another way to say this is that if you're happy with the
+edit-compile cycle time of your project already then Ninja won't help.
+
+There are many other build systems that are more user-friendly or
+featureful than Ninja itself. For some recommendations: the Ninja
+author found http://gittup.org/tup/[the tup build system] influential
+in Ninja's design, and thinks https://github.com/apenwarr/redo[redo]'s
+design is quite clever.
+
+Ninja's benefit comes from using it in conjunction with a smarter
+meta-build system.
+
+https://gn.googlesource.com/gn/[gn]:: The meta-build system used to
+generate build files for Google Chrome and related projects (v8,
+node.js), as well as Google Fuchsia. gn can generate Ninja files for
+all platforms supported by Chrome.
+
+https://cmake.org/[CMake]:: A widely used meta-build system that
+can generate Ninja files on Linux as of CMake version 2.8.8. Newer versions
+of CMake support generating Ninja files on Windows and Mac OS X too.
+
+https://github.com/ninja-build/ninja/wiki/List-of-generators-producing-ninja-build-files[others]:: Ninja ought to fit perfectly into other meta-build software
+like https://premake.github.io/[premake]. If you do this work,
+please let us know!
+
+Running Ninja
+~~~~~~~~~~~~~
+
+Run `ninja`. By default, it looks for a file named `build.ninja` in
+the current directory and builds all out-of-date targets. You can
+specify which targets (files) to build as command line arguments.
+
+There is also a special syntax `target^` for specifying a target
+as the first output of some rule containing the source you put in
+the command line, if one exists. For example, if you specify target as
+`foo.c^` then `foo.o` will get built (assuming you have those targets
+in your build files).
+
+`ninja -h` prints help output. Many of Ninja's flags intentionally
+match those of Make; e.g `ninja -C build -j 20` changes into the
+`build` directory and runs 20 build commands in parallel. (Note that
+Ninja defaults to running commands in parallel anyway, so typically
+you don't need to pass `-j`.)
+
+
+Environment variables
+~~~~~~~~~~~~~~~~~~~~~
+
+Ninja supports one environment variable to control its behavior:
+`NINJA_STATUS`, the progress status printed before the rule being run.
+
+Several placeholders are available:
+
+`%s`:: The number of started edges.
+`%t`:: The total number of edges that must be run to complete the build.
+`%p`:: The percentage of started edges.
+`%r`:: The number of currently running edges.
+`%u`:: The number of remaining edges to start.
+`%f`:: The number of finished edges.
+`%o`:: Overall rate of finished edges per second
+`%c`:: Current rate of finished edges per second (average over builds
+specified by `-j` or its default)
+`%e`:: Elapsed time in seconds. _(Available since Ninja 1.2.)_
+`%%`:: A plain `%` character.
+
+The default progress status is `"[%f/%t] "` (note the trailing space
+to separate from the build rule). Another example of possible progress status
+could be `"[%u/%r/%f] "`.
+
+Extra tools
+~~~~~~~~~~~
+
+The `-t` flag on the Ninja command line runs some tools that we have
+found useful during Ninja's development. The current tools are:
+
+[horizontal]
+`query`:: dump the inputs and outputs of a given target.
+
+`browse`:: browse the dependency graph in a web browser. Clicking a
+file focuses the view on that file, showing inputs and outputs. This
+feature requires a Python installation. By default port 8000 is used
+and a web browser will be opened. This can be changed as follows:
++
+----
+ninja -t browse --port=8000 --no-browser mytarget
+----
++
+`graph`:: output a file in the syntax used by `graphviz`, a automatic
+graph layout tool. Use it like:
++
+----
+ninja -t graph mytarget | dot -Tpng -ograph.png
+----
++
+In the Ninja source tree, `ninja graph.png`
+generates an image for Ninja itself. If no target is given generate a
+graph for all root targets.
+
+`targets`:: output a list of targets either by rule or by depth. If used
+like +ninja -t targets rule _name_+ it prints the list of targets
+using the given rule to be built. If no rule is given, it prints the source
+files (the leaves of the graph). If used like
++ninja -t targets depth _digit_+ it
+prints the list of targets in a depth-first manner starting by the root
+targets (the ones with no outputs). Indentation is used to mark dependencies.
+If the depth is zero it prints all targets. If no arguments are provided
++ninja -t targets depth 1+ is assumed. In this mode targets may be listed
+several times. If used like this +ninja -t targets all+ it
+prints all the targets available without indentation and it is faster
+than the _depth_ mode.
+
+`commands`:: given a list of targets, print a list of commands which, if
+executed in order, may be used to rebuild those targets, assuming that all
+output files are out of date.
+
+`clean`:: remove built files. By default it removes all built files
+except for those created by the generator. Adding the `-g` flag also
+removes built files created by the generator (see <<ref_rule,the rule
+reference for the +generator+ attribute>>). Additional arguments are
+targets, which removes the given targets and recursively all files
+built for them.
++
+If used like +ninja -t clean -r _rules_+ it removes all files built using
+the given rules.
++
+Files created but not referenced in the graph are not removed. This
+tool takes in account the +-v+ and the +-n+ options (note that +-n+
+implies +-v+).
+
+`cleandead`:: remove files produced by previous builds that are no longer in the
+build file. _Available since Ninja 1.10._
+
+`compdb`:: given a list of rules, each of which is expected to be a
+C family language compiler rule whose first input is the name of the
+source file, prints on standard output a compilation database in the
+http://clang.llvm.org/docs/JSONCompilationDatabase.html[JSON format] expected
+by the Clang tooling interface.
+_Available since Ninja 1.2._
+
+`deps`:: show all dependencies stored in the `.ninja_deps` file. When given a
+target, show just the target's dependencies. _Available since Ninja 1.4._
+
+`recompact`:: recompact the `.ninja_deps` file. _Available since Ninja 1.4._
+
+`restat`:: updates all recorded file modification timestamps in the `.ninja_log`
+file. _Available since Ninja 1.10._
+
+`rules`:: output the list of all rules (eventually with their description
+if they have one). It can be used to know which rule name to pass to
++ninja -t targets rule _name_+ or +ninja -t compdb+.
+
+Writing your own Ninja files
+----------------------------
+
+The remainder of this manual is only useful if you are constructing
+Ninja files yourself: for example, if you're writing a meta-build
+system or supporting a new language.
+
+Conceptual overview
+~~~~~~~~~~~~~~~~~~~
+
+Ninja evaluates a graph of dependencies between files, and runs
+whichever commands are necessary to make your build target up to date
+as determined by file modification times. If you are familiar with
+Make, Ninja is very similar.
+
+A build file (default name: `build.ninja`) provides a list of _rules_
+-- short names for longer commands, like how to run the compiler --
+along with a list of _build_ statements saying how to build files
+using the rules -- which rule to apply to which inputs to produce
+which outputs.
+
+Conceptually, `build` statements describe the dependency graph of your
+project, while `rule` statements describe how to generate the files
+along a given edge of the graph.
+
+Syntax example
+~~~~~~~~~~~~~~
+
+Here's a basic `.ninja` file that demonstrates most of the syntax.
+It will be used as an example for the following sections.
+
+---------------------------------
+cflags = -Wall
+
+rule cc
+ command = gcc $cflags -c $in -o $out
+
+build foo.o: cc foo.c
+---------------------------------
+
+Variables
+~~~~~~~~~
+Despite the non-goal of being convenient to write by hand, to keep
+build files readable (debuggable), Ninja supports declaring shorter
+reusable names for strings. A declaration like the following
+
+----------------
+cflags = -g
+----------------
+
+can be used on the right side of an equals sign, dereferencing it with
+a dollar sign, like this:
+
+----------------
+rule cc
+ command = gcc $cflags -c $in -o $out
+----------------
+
+Variables can also be referenced using curly braces like `${in}`.
+
+Variables might better be called "bindings", in that a given variable
+cannot be changed, only shadowed. There is more on how shadowing works
+later in this document.
+
+Rules
+~~~~~
+
+Rules declare a short name for a command line. They begin with a line
+consisting of the `rule` keyword and a name for the rule. Then
+follows an indented set of `variable = value` lines.
+
+The basic example above declares a new rule named `cc`, along with the
+command to run. In the context of a rule, the `command` variable
+defines the command to run, `$in` expands to the list of
+input files (`foo.c`), and `$out` to the output files (`foo.o`) for the
+command. A full list of special variables is provided in
+<<ref_rule,the reference>>.
+
+Build statements
+~~~~~~~~~~~~~~~~
+
+Build statements declare a relationship between input and output
+files. They begin with the `build` keyword, and have the format
++build _outputs_: _rulename_ _inputs_+. Such a declaration says that
+all of the output files are derived from the input files. When the
+output files are missing or when the inputs change, Ninja will run the
+rule to regenerate the outputs.
+
+The basic example above describes how to build `foo.o`, using the `cc`
+rule.
+
+In the scope of a `build` block (including in the evaluation of its
+associated `rule`), the variable `$in` is the list of inputs and the
+variable `$out` is the list of outputs.
+
+A build statement may be followed by an indented set of `key = value`
+pairs, much like a rule. These variables will shadow any variables
+when evaluating the variables in the command. For example:
+
+----------------
+cflags = -Wall -Werror
+rule cc
+ command = gcc $cflags -c $in -o $out
+
+# If left unspecified, builds get the outer $cflags.
+build foo.o: cc foo.c
+
+# But you can shadow variables like cflags for a particular build.
+build special.o: cc special.c
+ cflags = -Wall
+
+# The variable was only shadowed for the scope of special.o;
+# Subsequent build lines get the outer (original) cflags.
+build bar.o: cc bar.c
+
+----------------
+
+For more discussion of how scoping works, consult <<ref_scope,the
+reference>>.
+
+If you need more complicated information passed from the build
+statement to the rule (for example, if the rule needs "the file
+extension of the first input"), pass that through as an extra
+variable, like how `cflags` is passed above.
+
+If the top-level Ninja file is specified as an output of any build
+statement and it is out of date, Ninja will rebuild and reload it
+before building the targets requested by the user.
+
+Generating Ninja files from code
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+`misc/ninja_syntax.py` in the Ninja distribution is a tiny Python
+module to facilitate generating Ninja files. It allows you to make
+Python calls like `ninja.rule(name='foo', command='bar',
+depfile='$out.d')` and it will generate the appropriate syntax. Feel
+free to just inline it into your project's build system if it's
+useful.
+
+
+More details
+------------
+
+The `phony` rule
+~~~~~~~~~~~~~~~~
+
+The special rule name `phony` can be used to create aliases for other
+targets. For example:
+
+----------------
+build foo: phony some/file/in/a/faraway/subdir/foo
+----------------
+
+This makes `ninja foo` build the longer path. Semantically, the
+`phony` rule is equivalent to a plain rule where the `command` does
+nothing, but phony rules are handled specially in that they aren't
+printed when run, logged (see below), nor do they contribute to the
+command count printed as part of the build process.
+
+`phony` can also be used to create dummy targets for files which
+may not exist at build time. If a phony build statement is written
+without any dependencies, the target will be considered out of date if
+it does not exist. Without a phony build statement, Ninja will report
+an error if the file does not exist and is required by the build.
+
+To create a rule that never rebuilds, use a build rule without any input:
+----------------
+rule touch
+ command = touch $out
+build file_that_always_exists.dummy: touch
+build dummy_target_to_follow_a_pattern: phony file_that_always_exists.dummy
+----------------
+
+
+Default target statements
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+By default, if no targets are specified on the command line, Ninja
+will build every output that is not named as an input elsewhere.
+You can override this behavior using a default target statement.
+A default target statement causes Ninja to build only a given subset
+of output files if none are specified on the command line.
+
+Default target statements begin with the `default` keyword, and have
+the format +default _targets_+. A default target statement must appear
+after the build statement that declares the target as an output file.
+They are cumulative, so multiple statements may be used to extend
+the list of default targets. For example:
+
+----------------
+default foo bar
+default baz
+----------------
+
+This causes Ninja to build the `foo`, `bar` and `baz` targets by
+default.
+
+
+[[ref_log]]
+The Ninja log
+~~~~~~~~~~~~~
+
+For each built file, Ninja keeps a log of the command used to build
+it. Using this log Ninja can know when an existing output was built
+with a different command line than the build files specify (i.e., the
+command line changed) and knows to rebuild the file.
+
+The log file is kept in the build root in a file called `.ninja_log`.
+If you provide a variable named `builddir` in the outermost scope,
+`.ninja_log` will be kept in that directory instead.
+
+
+[[ref_versioning]]
+Version compatibility
+~~~~~~~~~~~~~~~~~~~~~
+
+_Available since Ninja 1.2._
+
+Ninja version labels follow the standard major.minor.patch format,
+where the major version is increased on backwards-incompatible
+syntax/behavioral changes and the minor version is increased on new
+behaviors. Your `build.ninja` may declare a variable named
+`ninja_required_version` that asserts the minimum Ninja version
+required to use the generated file. For example,
+
+-----
+ninja_required_version = 1.1
+-----
+
+declares that the build file relies on some feature that was
+introduced in Ninja 1.1 (perhaps the `pool` syntax), and that
+Ninja 1.1 or greater must be used to build. Unlike other Ninja
+variables, this version requirement is checked immediately when
+the variable is encountered in parsing, so it's best to put it
+at the top of the build file.
+
+Ninja always warns if the major versions of Ninja and the
+`ninja_required_version` don't match; a major version change hasn't
+come up yet so it's difficult to predict what behavior might be
+required.
+
+[[ref_headers]]
+C/C++ header dependencies
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To get C/C++ header dependencies (or any other build dependency that
+works in a similar way) correct Ninja has some extra functionality.
+
+The problem with headers is that the full list of files that a given
+source file depends on can only be discovered by the compiler:
+different preprocessor defines and include paths cause different files
+to be used. Some compilers can emit this information while building,
+and Ninja can use that to get its dependencies perfect.
+
+Consider: if the file has never been compiled, it must be built anyway,
+generating the header dependencies as a side effect. If any file is
+later modified (even in a way that changes which headers it depends
+on) the modification will cause a rebuild as well, keeping the
+dependencies up to date.
+
+When loading these special dependencies, Ninja implicitly adds extra
+build edges such that it is not an error if the listed dependency is
+missing. This allows you to delete a header file and rebuild without
+the build aborting due to a missing input.
+
+depfile
+^^^^^^^
+
+`gcc` (and other compilers like `clang`) support emitting dependency
+information in the syntax of a Makefile. (Any command that can write
+dependencies in this form can be used, not just `gcc`.)
+
+To bring this information into Ninja requires cooperation. On the
+Ninja side, the `depfile` attribute on the `build` must point to a
+path where this data is written. (Ninja only supports the limited
+subset of the Makefile syntax emitted by compilers.) Then the command
+must know to write dependencies into the `depfile` path.
+Use it like in the following example:
+
+----
+rule cc
+ depfile = $out.d
+ command = gcc -MD -MF $out.d [other gcc flags here]
+----
+
+The `-MD` flag to `gcc` tells it to output header dependencies, and
+the `-MF` flag tells it where to write them.
+
+deps
+^^^^
+
+_(Available since Ninja 1.3.)_
+
+It turns out that for large projects (and particularly on Windows,
+where the file system is slow) loading these dependency files on
+startup is slow.
+
+Ninja 1.3 can instead process dependencies just after they're generated
+and save a compacted form of the same information in a Ninja-internal
+database.
+
+Ninja supports this processing in two forms.
+
+1. `deps = gcc` specifies that the tool outputs `gcc`-style dependencies
+ in the form of Makefiles. Adding this to the above example will
+ cause Ninja to process the `depfile` immediately after the
+ compilation finishes, then delete the `.d` file (which is only used
+ as a temporary).
+
+2. `deps = msvc` specifies that the tool outputs header dependencies
+ in the form produced by Visual Studio's compiler's
+ http://msdn.microsoft.com/en-us/library/hdkef6tk(v=vs.90).aspx[`/showIncludes`
+ flag]. Briefly, this means the tool outputs specially-formatted lines
+ to its stdout. Ninja then filters these lines from the displayed
+ output. No `depfile` attribute is necessary, but the localized string
+ in front of the the header file path. For instance
+ `msvc_deps_prefix = Note: including file:`
+ for a English Visual Studio (the default). Should be globally defined.
++
+----
+msvc_deps_prefix = Note: including file:
+rule cc
+ deps = msvc
+ command = cl /showIncludes -c $in /Fo$out
+----
+
+If the include directory directives are using absolute paths, your depfile
+may result in a mixture of relative and absolute paths. Paths used by other
+build rules need to match exactly. Therefore, it is recommended to use
+relative paths in these cases.
+
+[[ref_pool]]
+Pools
+~~~~~
+
+_Available since Ninja 1.1._
+
+Pools allow you to allocate one or more rules or edges a finite number
+of concurrent jobs which is more tightly restricted than the default
+parallelism.
+
+This can be useful, for example, to restrict a particular expensive rule
+(like link steps for huge executables), or to restrict particular build
+statements which you know perform poorly when run concurrently.
+
+Each pool has a `depth` variable which is specified in the build file.
+The pool is then referred to with the `pool` variable on either a rule
+or a build statement.
+
+No matter what pools you specify, ninja will never run more concurrent jobs
+than the default parallelism, or the number of jobs specified on the command
+line (with `-j`).
+
+----------------
+# No more than 4 links at a time.
+pool link_pool
+ depth = 4
+
+# No more than 1 heavy object at a time.
+pool heavy_object_pool
+ depth = 1
+
+rule link
+ ...
+ pool = link_pool
+
+rule cc
+ ...
+
+# The link_pool is used here. Only 4 links will run concurrently.
+build foo.exe: link input.obj
+
+# A build statement can be exempted from its rule's pool by setting an
+# empty pool. This effectively puts the build statement back into the default
+# pool, which has infinite depth.
+build other.exe: link input.obj
+ pool =
+
+# A build statement can specify a pool directly.
+# Only one of these builds will run at a time.
+build heavy_object1.obj: cc heavy_obj1.cc
+ pool = heavy_object_pool
+build heavy_object2.obj: cc heavy_obj2.cc
+ pool = heavy_object_pool
+
+----------------
+
+The `console` pool
+^^^^^^^^^^^^^^^^^^
+
+_Available since Ninja 1.5._
+
+There exists a pre-defined pool named `console` with a depth of 1. It has
+the special property that any task in the pool has direct access to the
+standard input, output and error streams provided to Ninja, which are
+normally connected to the user's console (hence the name) but could be
+redirected. This can be useful for interactive tasks or long-running tasks
+which produce status updates on the console (such as test suites).
+
+While a task in the `console` pool is running, Ninja's regular output (such
+as progress status and output from concurrent tasks) is buffered until
+it completes.
+
+[[ref_ninja_file]]
+Ninja file reference
+--------------------
+
+A file is a series of declarations. A declaration can be one of:
+
+1. A rule declaration, which begins with +rule _rulename_+, and
+ then has a series of indented lines defining variables.
+
+2. A build edge, which looks like +build _output1_ _output2_:
+ _rulename_ _input1_ _input2_+. +
+ Implicit dependencies may be tacked on the end with +|
+ _dependency1_ _dependency2_+. +
+ Order-only dependencies may be tacked on the end with +||
+ _dependency1_ _dependency2_+. (See <<ref_dependencies,the reference on
+ dependency types>>.)
++
+Implicit outputs _(available since Ninja 1.7)_ may be added before
+the `:` with +| _output1_ _output2_+ and do not appear in `$out`.
+(See <<ref_outputs,the reference on output types>>.)
+
+3. Variable declarations, which look like +_variable_ = _value_+.
+
+4. Default target statements, which look like +default _target1_ _target2_+.
+
+5. References to more files, which look like +subninja _path_+ or
+ +include _path_+. The difference between these is explained below
+ <<ref_scope,in the discussion about scoping>>.
+
+6. A pool declaration, which looks like +pool _poolname_+. Pools are explained
+ <<ref_pool, in the section on pools>>.
+
+[[ref_lexer]]
+Lexical syntax
+~~~~~~~~~~~~~~
+
+Ninja is mostly encoding agnostic, as long as the bytes Ninja cares
+about (like slashes in paths) are ASCII. This means e.g. UTF-8 or
+ISO-8859-1 input files ought to work.
+
+Comments begin with `#` and extend to the end of the line.
+
+Newlines are significant. Statements like `build foo bar` are a set
+of space-separated tokens that end at the newline. Newlines and
+spaces within a token must be escaped.
+
+There is only one escape character, `$`, and it has the following
+behaviors:
+
+`$` followed by a newline:: escape the newline (continue the current line
+across a line break).
+
+`$` followed by text:: a variable reference.
+
+`${varname}`:: alternate syntax for `$varname`.
+
+`$` followed by space:: a space. (This is only necessary in lists of
+paths, where a space would otherwise separate filenames. See below.)
+
+`$:` :: a colon. (This is only necessary in `build` lines, where a colon
+would otherwise terminate the list of outputs.)
+
+`$$`:: a literal `$`.
+
+A `build` or `default` statement is first parsed as a space-separated
+list of filenames and then each name is expanded. This means that
+spaces within a variable will result in spaces in the expanded
+filename.
+
+----
+spaced = foo bar
+build $spaced/baz other$ file: ...
+# The above build line has two outputs: "foo bar/baz" and "other file".
+----
+
+In a `name = value` statement, whitespace at the beginning of a value
+is always stripped. Whitespace at the beginning of a line after a
+line continuation is also stripped.
+
+----
+two_words_with_one_space = foo $
+ bar
+one_word_with_no_space = foo$
+ bar
+----
+
+Other whitespace is only significant if it's at the beginning of a
+line. If a line is indented more than the previous one, it's
+considered part of its parent's scope; if it is indented less than the
+previous one, it closes the previous scope.
+
+[[ref_toplevel]]
+Top-level variables
+~~~~~~~~~~~~~~~~~~~
+
+Two variables are significant when declared in the outermost file scope.
+
+`builddir`:: a directory for some Ninja output files. See <<ref_log,the
+ discussion of the build log>>. (You can also store other build output
+ in this directory.)
+
+`ninja_required_version`:: the minimum version of Ninja required to process
+ the build correctly. See <<ref_versioning,the discussion of versioning>>.
+
+
+[[ref_rule]]
+Rule variables
+~~~~~~~~~~~~~~
+
+A `rule` block contains a list of `key = value` declarations that
+affect the processing of the rule. Here is a full list of special
+keys.
+
+`command` (_required_):: the command line to run. Each `rule` may
+ have only one `command` declaration. See <<ref_rule_command,the next
+ section>> for more details on quoting and executing multiple commands.
+
+`depfile`:: path to an optional `Makefile` that contains extra
+ _implicit dependencies_ (see <<ref_dependencies,the reference on
+ dependency types>>). This is explicitly to support C/C++ header
+ dependencies; see <<ref_headers,the full discussion>>.
+
+`deps`:: _(Available since Ninja 1.3.)_ if present, must be one of
+ `gcc` or `msvc` to specify special dependency processing. See
+ <<ref_headers,the full discussion>>. The generated database is
+ stored as `.ninja_deps` in the `builddir`, see <<ref_toplevel,the
+ discussion of `builddir`>>.
+
+`msvc_deps_prefix`:: _(Available since Ninja 1.5.)_ defines the string
+ which should be stripped from msvc's /showIncludes output. Only
+ needed when `deps = msvc` and no English Visual Studio version is used.
+
+`description`:: a short description of the command, used to pretty-print
+ the command as it's running. The `-v` flag controls whether to print
+ the full command or its description; if a command fails, the full command
+ line will always be printed before the command's output.
+
+`dyndep`:: _(Available since Ninja 1.10.)_ Used only on build statements.
+ If present, must name one of the build statement inputs. Dynamically
+ discovered dependency information will be loaded from the file.
+ See the <<ref_dyndep,dynamic dependencies>> section for details.
+
+`generator`:: if present, specifies that this rule is used to
+ re-invoke the generator program. Files built using `generator`
+ rules are treated specially in two ways: firstly, they will not be
+ rebuilt if the command line changes; and secondly, they are not
+ cleaned by default.
+
+`in`:: the space-separated list of files provided as inputs to the build line
+ referencing this `rule`, shell-quoted if it appears in commands. (`$in` is
+ provided solely for convenience; if you need some subset or variant of this
+ list of files, just construct a new variable with that list and use
+ that instead.)
+
+`in_newline`:: the same as `$in` except that multiple inputs are
+ separated by newlines rather than spaces. (For use with
+ `$rspfile_content`; this works around a bug in the MSVC linker where
+ it uses a fixed-size buffer for processing input.)
+
+`out`:: the space-separated list of files provided as outputs to the build line
+ referencing this `rule`, shell-quoted if it appears in commands.
+
+`restat`:: if present, causes Ninja to re-stat the command's outputs
+ after execution of the command. Each output whose modification time
+ the command did not change will be treated as though it had never
+ needed to be built. This may cause the output's reverse
+ dependencies to be removed from the list of pending build actions.
+
+`rspfile`, `rspfile_content`:: if present (both), Ninja will use a
+ response file for the given command, i.e. write the selected string
+ (`rspfile_content`) to the given file (`rspfile`) before calling the
+ command and delete the file after successful execution of the
+ command.
++
+This is particularly useful on Windows OS, where the maximal length of
+a command line is limited and response files must be used instead.
++
+Use it like in the following example:
++
+----
+rule link
+ command = link.exe /OUT$out [usual link flags here] @$out.rsp
+ rspfile = $out.rsp
+ rspfile_content = $in
+
+build myapp.exe: link a.obj b.obj [possibly many other .obj files]
+----
+
+[[ref_rule_command]]
+Interpretation of the `command` variable
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Fundamentally, command lines behave differently on Unixes and Windows.
+
+On Unixes, commands are arrays of arguments. The Ninja `command`
+variable is passed directly to `sh -c`, which is then responsible for
+interpreting that string into an argv array. Therefore the quoting
+rules are those of the shell, and you can use all the normal shell
+operators, like `&&` to chain multiple commands, or `VAR=value cmd` to
+set environment variables.
+
+On Windows, commands are strings, so Ninja passes the `command` string
+directly to `CreateProcess`. (In the common case of simply executing
+a compiler this means there is less overhead.) Consequently the
+quoting rules are determined by the called program, which on Windows
+are usually provided by the C library. If you need shell
+interpretation of the command (such as the use of `&&` to chain
+multiple commands), make the command execute the Windows shell by
+prefixing the command with `cmd /c`. Ninja may error with "invalid parameter"
+which usually indicates that the command line length has been exceeded.
+
+[[ref_outputs]]
+Build outputs
+~~~~~~~~~~~~~
+
+There are two types of build outputs which are subtly different.
+
+1. _Explicit outputs_, as listed in a build line. These are
+ available as the `$out` variable in the rule.
++
+This is the standard form of output to be used for e.g. the
+object file of a compile command.
+
+2. _Implicit outputs_, as listed in a build line with the syntax +|
+ _out1_ _out2_+ + before the `:` of a build line _(available since
+ Ninja 1.7)_. The semantics are identical to explicit outputs,
+ the only difference is that implicit outputs don't show up in the
+ `$out` variable.
++
+This is for expressing outputs that don't show up on the
+command line of the command.
+
+[[ref_dependencies]]
+Build dependencies
+~~~~~~~~~~~~~~~~~~
+
+There are three types of build dependencies which are subtly different.
+
+1. _Explicit dependencies_, as listed in a build line. These are
+ available as the `$in` variable in the rule. Changes in these files
+ cause the output to be rebuilt; if these files are missing and
+ Ninja doesn't know how to build them, the build is aborted.
++
+This is the standard form of dependency to be used e.g. for the
+source file of a compile command.
+
+2. _Implicit dependencies_, either as picked up from
+ a `depfile` attribute on a rule or from the syntax +| _dep1_
+ _dep2_+ on the end of a build line. The semantics are identical to
+ explicit dependencies, the only difference is that implicit dependencies
+ don't show up in the `$in` variable.
++
+This is for expressing dependencies that don't show up on the
+command line of the command; for example, for a rule that runs a
+script, the script itself should be an implicit dependency, as
+changes to the script should cause the output to rebuild.
++
+Note that dependencies as loaded through depfiles have slightly different
+semantics, as described in the <<ref_rule,rule reference>>.
+
+3. _Order-only dependencies_, expressed with the syntax +|| _dep1_
+ _dep2_+ on the end of a build line. When these are out of date, the
+ output is not rebuilt until they are built, but changes in order-only
+ dependencies alone do not cause the output to be rebuilt.
++
+Order-only dependencies can be useful for bootstrapping dependencies
+that are only discovered during build time: for example, to generate a
+header file before starting a subsequent compilation step. (Once the
+header is used in compilation, a generated dependency file will then
+express the implicit dependency.)
+
+File paths are compared as is, which means that an absolute path and a
+relative path, pointing to the same file, are considered different by Ninja.
+
+Variable expansion
+~~~~~~~~~~~~~~~~~~
+
+Variables are expanded in paths (in a `build` or `default` statement)
+and on the right side of a `name = value` statement.
+
+When a `name = value` statement is evaluated, its right-hand side is
+expanded immediately (according to the below scoping rules), and
+from then on `$name` expands to the static string as the result of the
+expansion. It is never the case that you'll need to "double-escape" a
+value to prevent it from getting expanded twice.
+
+All variables are expanded immediately as they're encountered in parsing,
+with one important exception: variables in `rule` blocks are expanded
+when the rule is _used_, not when it is declared. In the following
+example, the `demo` rule prints "this is a demo of bar".
+
+----
+rule demo
+ command = echo "this is a demo of $foo"
+
+build out: demo
+ foo = bar
+----
+
+[[ref_scope]]
+Evaluation and scoping
+~~~~~~~~~~~~~~~~~~~~~~
+
+Top-level variable declarations are scoped to the file they occur in.
+
+Rule declarations are also scoped to the file they occur in.
+_(Available since Ninja 1.6)_
+
+The `subninja` keyword, used to include another `.ninja` file,
+introduces a new scope. The included `subninja` file may use the
+variables and rules from the parent file, and shadow their values for the file's
+scope, but it won't affect values of the variables in the parent.
+
+To include another `.ninja` file in the current scope, much like a C
+`#include` statement, use `include` instead of `subninja`.
+
+Variable declarations indented in a `build` block are scoped to the
+`build` block. The full lookup order for a variable expanded in a
+`build` block (or the `rule` is uses) is:
+
+1. Special built-in variables (`$in`, `$out`).
+
+2. Build-level variables from the `build` block.
+
+3. Rule-level variables from the `rule` block (i.e. `$command`).
+ (Note from the above discussion on expansion that these are
+ expanded "late", and may make use of in-scope bindings like `$in`.)
+
+4. File-level variables from the file that the `build` line was in.
+
+5. Variables from the file that included that file using the
+ `subninja` keyword.
+
+[[ref_dyndep]]
+Dynamic Dependencies
+--------------------
+
+_Available since Ninja 1.10._
+
+Some use cases require implicit dependency information to be dynamically
+discovered from source file content _during the build_ in order to build
+correctly on the first run (e.g. Fortran module dependencies). This is
+unlike <<ref_headers,header dependencies>> which are only needed on the
+second run and later to rebuild correctly. A build statement may have a
+`dyndep` binding naming one of its inputs to specify that dynamic
+dependency information must be loaded from the file. For example:
+
+----
+build out: ... || foo
+ dyndep = foo
+build foo: ...
+----
+
+This specifies that file `foo` is a dyndep file. Since it is an input,
+the build statement for `out` can never be executed before `foo` is built.
+As soon as `foo` is finished Ninja will read it to load dynamically
+discovered dependency information for `out`. This may include additional
+implicit inputs and/or outputs. Ninja will update the build graph
+accordingly and the build will proceed as if the information was known
+originally.
+
+Dyndep file reference
+~~~~~~~~~~~~~~~~~~~~~
+
+Files specified by `dyndep` bindings use the same <<ref_lexer,lexical syntax>>
+as <<ref_ninja_file,ninja build files>> and have the following layout.
+
+1. A version number in the form `<major>[.<minor>][<suffix>]`:
++
+----
+ninja_dyndep_version = 1
+----
++
+Currently the version number must always be `1` or `1.0` but may have
+an arbitrary suffix.
+
+2. One or more build statements of the form:
++
+----
+build out | imp-outs... : dyndep | imp-ins...
+----
++
+Every statement must specify exactly one explicit output and must use
+the rule name `dyndep`. The `| imp-outs...` and `| imp-ins...` portions
+are optional.
+
+3. An optional `restat` <<ref_rule,variable binding>> on each build statement.
+
+The build statements in a dyndep file must have a one-to-one correspondence
+to build statements in the <<ref_ninja_file,ninja build file>> that name the
+dyndep file in a `dyndep` binding. No dyndep build statement may be omitted
+and no extra build statements may be specified.
+
+Dyndep Examples
+~~~~~~~~~~~~~~~
+
+Fortran Modules
+^^^^^^^^^^^^^^^
+
+Consider a Fortran source file `foo.f90` that provides a module
+`foo.mod` (an implicit output of compilation) and another source file
+`bar.f90` that uses the module (an implicit input of compilation). This
+implicit dependency must be discovered before we compile either source
+in order to ensure that `bar.f90` never compiles before `foo.f90`, and
+that `bar.f90` recompiles when `foo.mod` changes. We can achieve this
+as follows:
+
+----
+rule f95
+ command = f95 -o $out -c $in
+rule fscan
+ command = fscan -o $out $in
+
+build foobar.dd: fscan foo.f90 bar.f90
+
+build foo.o: f95 foo.f90 || foobar.dd
+ dyndep = foobar.dd
+build bar.o: f95 bar.f90 || foobar.dd
+ dyndep = foobar.dd
+----
+
+In this example the order-only dependencies ensure that `foobar.dd` is
+generated before either source compiles. The hypothetical `fscan` tool
+scans the source files, assumes each will be compiled to a `.o` of the
+same name, and writes `foobar.dd` with content such as:
+
+----
+ninja_dyndep_version = 1
+build foo.o | foo.mod: dyndep
+build bar.o: dyndep | foo.mod
+----
+
+Ninja will load this file to add `foo.mod` as an implicit output of
+`foo.o` and implicit input of `bar.o`. This ensures that the Fortran
+sources are always compiled in the proper order and recompiled when
+needed.
+
+Tarball Extraction
+^^^^^^^^^^^^^^^^^^
+
+Consider a tarball `foo.tar` that we want to extract. The extraction time
+can be recorded with a `foo.tar.stamp` file so that extraction repeats if
+the tarball changes, but we also would like to re-extract if any of the
+outputs is missing. However, the list of outputs depends on the content
+of the tarball and cannot be spelled out explicitly in the ninja build file.
+We can achieve this as follows:
+
+----
+rule untar
+ command = tar xf $in && touch $out
+rule scantar
+ command = scantar --stamp=$stamp --dd=$out $in
+build foo.tar.dd: scantar foo.tar
+ stamp = foo.tar.stamp
+build foo.tar.stamp: untar foo.tar || foo.tar.dd
+ dyndep = foo.tar.dd
+----
+
+In this example the order-only dependency ensures that `foo.tar.dd` is
+built before the tarball extracts. The hypothetical `scantar` tool
+will read the tarball (e.g. via `tar tf`) and write `foo.tar.dd` with
+content such as:
+
+----
+ninja_dyndep_version = 1
+build foo.tar.stamp | file1.txt file2.txt : dyndep
+ restat = 1
+----
+
+Ninja will load this file to add `file1.txt` and `file2.txt` as implicit
+outputs of `foo.tar.stamp`, and to mark the build statement for `restat`.
+On future builds, if any implicit output is missing the tarball will be
+extracted again. The `restat` binding tells Ninja to tolerate the fact
+that the implicit outputs may not have modification times newer than
+the tarball itself (avoiding re-extraction on every build).
diff --git a/doc/style.css b/doc/style.css
new file mode 100644
index 0000000..9976c03
--- /dev/null
+++ b/doc/style.css
@@ -0,0 +1,29 @@
+body {
+ margin: 5ex 10ex;
+ max-width: 80ex;
+ line-height: 1.5;
+ font-family: sans-serif;
+}
+h1, h2, h3 {
+ font-weight: normal;
+}
+pre, code {
+ font-family: x, monospace;
+}
+pre {
+ padding: 1ex;
+ background: #eee;
+ border: solid 1px #ddd;
+ min-width: 0;
+ font-size: 90%;
+}
+code {
+ color: #007;
+}
+div.chapter {
+ margin-top: 4em;
+ border-top: solid 2px black;
+}
+p {
+ margin-top: 0;
+}
diff --git a/misc/afl-fuzz-tokens/kw_build b/misc/afl-fuzz-tokens/kw_build
new file mode 100644
index 0000000..c795b05
--- /dev/null
+++ b/misc/afl-fuzz-tokens/kw_build
@@ -0,0 +1 @@
+build \ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/kw_default b/misc/afl-fuzz-tokens/kw_default
new file mode 100644
index 0000000..331d858
--- /dev/null
+++ b/misc/afl-fuzz-tokens/kw_default
@@ -0,0 +1 @@
+default \ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/kw_include b/misc/afl-fuzz-tokens/kw_include
new file mode 100644
index 0000000..2996fba
--- /dev/null
+++ b/misc/afl-fuzz-tokens/kw_include
@@ -0,0 +1 @@
+include \ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/kw_pool b/misc/afl-fuzz-tokens/kw_pool
new file mode 100644
index 0000000..e783591
--- /dev/null
+++ b/misc/afl-fuzz-tokens/kw_pool
@@ -0,0 +1 @@
+pool \ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/kw_rule b/misc/afl-fuzz-tokens/kw_rule
new file mode 100644
index 0000000..841e840
--- /dev/null
+++ b/misc/afl-fuzz-tokens/kw_rule
@@ -0,0 +1 @@
+rule \ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/kw_subninja b/misc/afl-fuzz-tokens/kw_subninja
new file mode 100644
index 0000000..c4fe0c7
--- /dev/null
+++ b/misc/afl-fuzz-tokens/kw_subninja
@@ -0,0 +1 @@
+subninja \ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_a b/misc/afl-fuzz-tokens/misc_a
new file mode 100644
index 0000000..2e65efe
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_a
@@ -0,0 +1 @@
+a \ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_b b/misc/afl-fuzz-tokens/misc_b
new file mode 100644
index 0000000..63d8dbd
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_b
@@ -0,0 +1 @@
+b \ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_colon b/misc/afl-fuzz-tokens/misc_colon
new file mode 100644
index 0000000..22ded55
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_colon
@@ -0,0 +1 @@
+: \ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_cont b/misc/afl-fuzz-tokens/misc_cont
new file mode 100644
index 0000000..857f13a
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_cont
@@ -0,0 +1 @@
+$
diff --git a/misc/afl-fuzz-tokens/misc_dollar b/misc/afl-fuzz-tokens/misc_dollar
new file mode 100644
index 0000000..6f4f765
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_dollar
@@ -0,0 +1 @@
+$ \ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_eq b/misc/afl-fuzz-tokens/misc_eq
new file mode 100644
index 0000000..851c75c
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_eq
@@ -0,0 +1 @@
+= \ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_indent b/misc/afl-fuzz-tokens/misc_indent
new file mode 100644
index 0000000..136d063
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_indent
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_pipe b/misc/afl-fuzz-tokens/misc_pipe
new file mode 100644
index 0000000..a3871d4
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_pipe
@@ -0,0 +1 @@
+| \ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_pipepipe b/misc/afl-fuzz-tokens/misc_pipepipe
new file mode 100644
index 0000000..27cc728
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_pipepipe
@@ -0,0 +1 @@
+|| \ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_space b/misc/afl-fuzz-tokens/misc_space
new file mode 100644
index 0000000..0519ecb
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_space
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/misc/afl-fuzz/build.ninja b/misc/afl-fuzz/build.ninja
new file mode 100644
index 0000000..52cd2f1
--- /dev/null
+++ b/misc/afl-fuzz/build.ninja
@@ -0,0 +1,5 @@
+rule b
+ command = clang -MMD -MF $out.d -o $out -c $in
+ description = building $out
+
+build a.o: b a.c
diff --git a/misc/bash-completion b/misc/bash-completion
new file mode 100644
index 0000000..e604cd4
--- /dev/null
+++ b/misc/bash-completion
@@ -0,0 +1,57 @@
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Add the following to your .bashrc to tab-complete ninja targets
+# . path/to/ninja/misc/bash-completion
+
+_ninja_target() {
+ local cur prev targets dir line targets_command OPTIND
+
+ # When available, use bash_completion to:
+ # 1) Complete words when the cursor is in the middle of the word
+ # 2) Complete paths with files or directories, as appropriate
+ if _get_comp_words_by_ref cur prev &>/dev/null ; then
+ case $prev in
+ -f)
+ _filedir
+ return 0
+ ;;
+ -C)
+ _filedir -d
+ return 0
+ ;;
+ esac
+ else
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ fi
+
+ if [[ "$cur" == "--"* ]]; then
+ # there is currently only one argument that takes --
+ COMPREPLY=($(compgen -P '--' -W 'version' -- "${cur:2}"))
+ else
+ dir="."
+ line=$(echo ${COMP_LINE} | cut -d" " -f 2-)
+ # filter out all non relevant arguments but keep C for dirs
+ while getopts :C:f:j:l:k:nvd:t: opt $line; do
+ case $opt in
+ # eval for tilde expansion
+ C) eval dir="$OPTARG" ;;
+ esac
+ done;
+ targets_command="eval ninja -C \"${dir}\" -t targets all 2>/dev/null | cut -d: -f1"
+ COMPREPLY=($(compgen -W '`${targets_command}`' -- "$cur"))
+ fi
+ return
+}
+complete -F _ninja_target ninja
diff --git a/misc/ci.py b/misc/ci.py
new file mode 100755
index 0000000..17cbf14
--- /dev/null
+++ b/misc/ci.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python3
+
+import os
+
+ignores = [
+ '.git/',
+ 'misc/afl-fuzz-tokens/',
+ 'ninja_deps',
+ 'src/depfile_parser.cc',
+ 'src/lexer.cc',
+]
+
+error_count = 0
+
+def error(path, msg):
+ global error_count
+ error_count += 1
+ print('\x1b[1;31m{}\x1b[0;31m{}\x1b[0m'.format(path, msg))
+
+for root, directory, filenames in os.walk('.'):
+ for filename in filenames:
+ path = os.path.join(root, filename)[2:]
+ if any([path.startswith(x) for x in ignores]):
+ continue
+ with open(path, 'rb') as file:
+ line_nr = 1
+ try:
+ for line in [x.decode() for x in file.readlines()]:
+ if len(line) == 0 or line[-1] != '\n':
+ error(path, ' missing newline at end of file.')
+ if len(line) > 1:
+ if line[-2] == '\r':
+ error(path, ' has Windows line endings.')
+ break
+ if line[-2] == ' ' or line[-2] == '\t':
+ error(path, ':{} has trailing whitespace.'.format(line_nr))
+ line_nr += 1
+ except UnicodeError:
+ pass # binary file
+
+exit(error_count)
diff --git a/misc/inherited-fds.ninja b/misc/inherited-fds.ninja
new file mode 100644
index 0000000..671155e
--- /dev/null
+++ b/misc/inherited-fds.ninja
@@ -0,0 +1,23 @@
+# This build file prints out a list of open file descriptors in
+# Ninja subprocesses, to help verify we don't accidentally leak
+# any.
+
+# Because one fd leak was in the code managing multiple subprocesses,
+# this test brings up multiple subprocesses and then dumps the fd
+# table of the last one.
+
+# Use like: ./ninja -f misc/inherited-fds.ninja
+
+rule sleep
+ command = sleep 10000
+
+rule dump
+ command = sleep 1; ls -l /proc/self/fd; exit 1
+
+build all: phony a b c d e
+
+build a: sleep
+build b: sleep
+build c: sleep
+build d: sleep
+build e: dump
diff --git a/misc/long-slow-build.ninja b/misc/long-slow-build.ninja
new file mode 100644
index 0000000..46af6ba
--- /dev/null
+++ b/misc/long-slow-build.ninja
@@ -0,0 +1,38 @@
+# An input file for running a "slow" build.
+# Use like: ninja -f misc/long-slow-build.ninja all
+
+rule sleep
+ command = sleep 1
+ description = SLEEP $out
+
+build 0: sleep README
+build 1: sleep README
+build 2: sleep README
+build 3: sleep README
+build 4: sleep README
+build 5: sleep README
+build 6: sleep README
+build 7: sleep README
+build 8: sleep README
+build 9: sleep README
+build 10: sleep 0
+build 11: sleep 1
+build 12: sleep 2
+build 13: sleep 3
+build 14: sleep 4
+build 15: sleep 5
+build 16: sleep 6
+build 17: sleep 7
+build 18: sleep 8
+build 19: sleep 9
+build 20: sleep 10
+build 21: sleep 11
+build 22: sleep 12
+build 23: sleep 13
+build 24: sleep 14
+build 25: sleep 15
+build 26: sleep 16
+build 27: sleep 17
+build 28: sleep 18
+build 29: sleep 19
+build all: phony 20 21 22 23 24 25 26 27 28 29
diff --git a/misc/measure.py b/misc/measure.py
new file mode 100755
index 0000000..8ce95e6
--- /dev/null
+++ b/misc/measure.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""measure the runtime of a command by repeatedly running it.
+"""
+
+from __future__ import print_function
+
+import time
+import subprocess
+import sys
+
+devnull = open('/dev/null', 'w')
+
+def run(cmd, repeat=10):
+ print('sampling:', end=' ')
+ sys.stdout.flush()
+
+ samples = []
+ for _ in range(repeat):
+ start = time.time()
+ subprocess.call(cmd, stdout=devnull, stderr=devnull)
+ end = time.time()
+ dt = (end - start) * 1000
+ print('%dms' % int(dt), end=' ')
+ sys.stdout.flush()
+ samples.append(dt)
+ print()
+
+ # We're interested in the 'pure' runtime of the code, which is
+ # conceptually the smallest time we'd see if we ran it enough times
+ # such that it got the perfect time slices / disk cache hits.
+ best = min(samples)
+ # Also print how varied the outputs were in an attempt to make it
+ # more obvious if something has gone terribly wrong.
+ err = sum(s - best for s in samples) / float(len(samples))
+ print('estimate: %dms (mean err %.1fms)' % (best, err))
+
+if __name__ == '__main__':
+ if len(sys.argv) < 2:
+ print('usage: measure.py command args...')
+ sys.exit(1)
+ run(cmd=sys.argv[1:])
diff --git a/misc/ninja-mode.el b/misc/ninja-mode.el
new file mode 100644
index 0000000..8b975d5
--- /dev/null
+++ b/misc/ninja-mode.el
@@ -0,0 +1,85 @@
+;;; ninja-mode.el --- Major mode for editing .ninja files -*- lexical-binding: t -*-
+
+;; Package-Requires: ((emacs "24"))
+
+;; Copyright 2011 Google Inc. All Rights Reserved.
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+;;; Commentary:
+
+;; Simple emacs mode for editing .ninja files.
+;; Just some syntax highlighting for now.
+
+;;; Code:
+
+(defvar ninja-keywords
+ `((,(concat "^" (regexp-opt '("rule" "build" "subninja" "include"
+ "pool" "default")
+ 'words))
+ . font-lock-keyword-face)
+ ("\\([[:alnum:]_]+\\) =" 1 font-lock-variable-name-face)
+ ;; Variable expansion.
+ ("$[[:alnum:]_]+" . font-lock-variable-name-face)
+ ("${[[:alnum:]._]+}" . font-lock-variable-name-face)
+ ;; Rule names
+ ("rule +\\([[:alnum:]_.-]+\\)" 1 font-lock-function-name-face)
+ ;; Build Statement - highlight the rule used,
+ ;; allow for escaped $,: in outputs.
+ ("build +\\(?:[^:$\n]\\|$[:$]\\)+ *: *\\([[:alnum:]_.-]+\\)"
+ 1 font-lock-function-name-face)))
+
+(defvar ninja-mode-syntax-table
+ (let ((table (make-syntax-table)))
+ (modify-syntax-entry ?\" "." table)
+ table)
+ "Syntax table used in `ninja-mode'.")
+
+(defun ninja-syntax-propertize (start end)
+ (save-match-data
+ (goto-char start)
+ (while (search-forward "#" end t)
+ (let ((match-pos (match-beginning 0)))
+ (when (and
+ ;; Is it the first non-white character on the line?
+ (eq match-pos (save-excursion (back-to-indentation) (point)))
+ (save-excursion
+ (goto-char (line-end-position 0))
+ (or
+ ;; If we're continuing the previous line, it's not a
+ ;; comment.
+ (not (eq ?$ (char-before)))
+ ;; Except if the previous line is a comment as well, as the
+ ;; continuation dollar is ignored then.
+ (nth 4 (syntax-ppss)))))
+ (put-text-property match-pos (1+ match-pos) 'syntax-table '(11))
+ (let ((line-end (line-end-position)))
+ ;; Avoid putting properties past the end of the buffer.
+ ;; Otherwise we get an `args-out-of-range' error.
+ (unless (= line-end (1+ (buffer-size)))
+ (put-text-property line-end (1+ line-end) 'syntax-table '(12)))))))))
+
+;;;###autoload
+(define-derived-mode ninja-mode prog-mode "ninja"
+ (set (make-local-variable 'comment-start) "#")
+ (set (make-local-variable 'parse-sexp-lookup-properties) t)
+ (set (make-local-variable 'syntax-propertize-function) #'ninja-syntax-propertize)
+ (setq font-lock-defaults '(ninja-keywords)))
+
+;; Run ninja-mode for files ending in .ninja.
+;;;###autoload
+(add-to-list 'auto-mode-alist '("\\.ninja$" . ninja-mode))
+
+(provide 'ninja-mode)
+
+;;; ninja-mode.el ends here
diff --git a/misc/ninja.vim b/misc/ninja.vim
new file mode 100644
index 0000000..c1ffd50
--- /dev/null
+++ b/misc/ninja.vim
@@ -0,0 +1,87 @@
+" ninja build file syntax.
+" Language: ninja build file as described at
+" http://ninja-build.org/manual.html
+" Version: 1.5
+" Last Change: 2018/04/05
+" Maintainer: Nicolas Weber <nicolasweber@gmx.de>
+" Version 1.4 of this script is in the upstream vim repository and will be
+" included in the next vim release. If you change this, please send your change
+" upstream.
+
+" ninja lexer and parser are at
+" https://github.com/ninja-build/ninja/blob/master/src/lexer.in.cc
+" https://github.com/ninja-build/ninja/blob/master/src/manifest_parser.cc
+
+if exists("b:current_syntax")
+ finish
+endif
+
+let s:cpo_save = &cpo
+set cpo&vim
+
+syn case match
+
+" Comments are only matched when the # is at the beginning of the line (with
+" optional whitespace), as long as the prior line didn't end with a $
+" continuation.
+syn match ninjaComment /\(\$\n\)\@<!\_^\s*#.*$/ contains=@Spell
+
+" Toplevel statements are the ones listed here and
+" toplevel variable assignments (ident '=' value).
+" lexer.in.cc, ReadToken() and manifest_parser.cc, Parse()
+syn match ninjaKeyword "^build\>"
+syn match ninjaKeyword "^rule\>"
+syn match ninjaKeyword "^pool\>"
+syn match ninjaKeyword "^default\>"
+syn match ninjaKeyword "^include\>"
+syn match ninjaKeyword "^subninja\>"
+
+" Both 'build' and 'rule' begin a variable scope that ends
+" on the first line without indent. 'rule' allows only a
+" limited set of magic variables, 'build' allows general
+" let assignments.
+" manifest_parser.cc, ParseRule()
+syn region ninjaRule start="^rule" end="^\ze\S" contains=TOP transparent
+syn keyword ninjaRuleCommand contained containedin=ninjaRule command
+ \ deps depfile description generator
+ \ pool restat rspfile rspfile_content
+
+syn region ninjaPool start="^pool" end="^\ze\S" contains=TOP transparent
+syn keyword ninjaPoolCommand contained containedin=ninjaPool depth
+
+" Strings are parsed as follows:
+" lexer.in.cc, ReadEvalString()
+" simple_varname = [a-zA-Z0-9_-]+;
+" varname = [a-zA-Z0-9_.-]+;
+" $$ -> $
+" $\n -> line continuation
+" '$ ' -> escaped space
+" $simple_varname -> variable
+" ${varname} -> variable
+
+syn match ninjaDollar "\$\$"
+syn match ninjaWrapLineOperator "\$$"
+syn match ninjaSimpleVar "\$[a-zA-Z0-9_-]\+"
+syn match ninjaVar "\${[a-zA-Z0-9_.-]\+}"
+
+" operators are:
+" variable assignment =
+" rule definition :
+" implicit dependency |
+" order-only dependency ||
+syn match ninjaOperator "\(=\|:\||\|||\)\ze\s"
+
+hi def link ninjaComment Comment
+hi def link ninjaKeyword Keyword
+hi def link ninjaRuleCommand Statement
+hi def link ninjaPoolCommand Statement
+hi def link ninjaDollar ninjaOperator
+hi def link ninjaWrapLineOperator ninjaOperator
+hi def link ninjaOperator Operator
+hi def link ninjaSimpleVar ninjaVar
+hi def link ninjaVar Identifier
+
+let b:current_syntax = "ninja"
+
+let &cpo = s:cpo_save
+unlet s:cpo_save
diff --git a/misc/ninja_syntax.py b/misc/ninja_syntax.py
new file mode 100644
index 0000000..ab5c0d4
--- /dev/null
+++ b/misc/ninja_syntax.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Python module for generating .ninja files.
+
+Note that this is emphatically not a required piece of Ninja; it's
+just a helpful utility for build-file-generation systems that already
+use Python.
+"""
+
+import re
+import textwrap
+
+def escape_path(word):
+ return word.replace('$ ', '$$ ').replace(' ', '$ ').replace(':', '$:')
+
+class Writer(object):
+ def __init__(self, output, width=78):
+ self.output = output
+ self.width = width
+
+ def newline(self):
+ self.output.write('\n')
+
+ def comment(self, text):
+ for line in textwrap.wrap(text, self.width - 2, break_long_words=False,
+ break_on_hyphens=False):
+ self.output.write('# ' + line + '\n')
+
+ def variable(self, key, value, indent=0):
+ if value is None:
+ return
+ if isinstance(value, list):
+ value = ' '.join(filter(None, value)) # Filter out empty strings.
+ self._line('%s = %s' % (key, value), indent)
+
+ def pool(self, name, depth):
+ self._line('pool %s' % name)
+ self.variable('depth', depth, indent=1)
+
+ def rule(self, name, command, description=None, depfile=None,
+ generator=False, pool=None, restat=False, rspfile=None,
+ rspfile_content=None, deps=None):
+ self._line('rule %s' % name)
+ self.variable('command', command, indent=1)
+ if description:
+ self.variable('description', description, indent=1)
+ if depfile:
+ self.variable('depfile', depfile, indent=1)
+ if generator:
+ self.variable('generator', '1', indent=1)
+ if pool:
+ self.variable('pool', pool, indent=1)
+ if restat:
+ self.variable('restat', '1', indent=1)
+ if rspfile:
+ self.variable('rspfile', rspfile, indent=1)
+ if rspfile_content:
+ self.variable('rspfile_content', rspfile_content, indent=1)
+ if deps:
+ self.variable('deps', deps, indent=1)
+
+ def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
+ variables=None, implicit_outputs=None, pool=None):
+ outputs = as_list(outputs)
+ out_outputs = [escape_path(x) for x in outputs]
+ all_inputs = [escape_path(x) for x in as_list(inputs)]
+
+ if implicit:
+ implicit = [escape_path(x) for x in as_list(implicit)]
+ all_inputs.append('|')
+ all_inputs.extend(implicit)
+ if order_only:
+ order_only = [escape_path(x) for x in as_list(order_only)]
+ all_inputs.append('||')
+ all_inputs.extend(order_only)
+ if implicit_outputs:
+ implicit_outputs = [escape_path(x)
+ for x in as_list(implicit_outputs)]
+ out_outputs.append('|')
+ out_outputs.extend(implicit_outputs)
+
+ self._line('build %s: %s' % (' '.join(out_outputs),
+ ' '.join([rule] + all_inputs)))
+ if pool is not None:
+ self._line(' pool = %s' % pool)
+
+ if variables:
+ if isinstance(variables, dict):
+ iterator = iter(variables.items())
+ else:
+ iterator = iter(variables)
+
+ for key, val in iterator:
+ self.variable(key, val, indent=1)
+
+ return outputs
+
+ def include(self, path):
+ self._line('include %s' % path)
+
+ def subninja(self, path):
+ self._line('subninja %s' % path)
+
+ def default(self, paths):
+ self._line('default %s' % ' '.join(as_list(paths)))
+
+ def _count_dollars_before_index(self, s, i):
+ """Returns the number of '$' characters right in front of s[i]."""
+ dollar_count = 0
+ dollar_index = i - 1
+ while dollar_index > 0 and s[dollar_index] == '$':
+ dollar_count += 1
+ dollar_index -= 1
+ return dollar_count
+
+ def _line(self, text, indent=0):
+ """Write 'text' word-wrapped at self.width characters."""
+ leading_space = ' ' * indent
+ while len(leading_space) + len(text) > self.width:
+ # The text is too wide; wrap if possible.
+
+ # Find the rightmost space that would obey our width constraint and
+ # that's not an escaped space.
+ available_space = self.width - len(leading_space) - len(' $')
+ space = available_space
+ while True:
+ space = text.rfind(' ', 0, space)
+ if (space < 0 or
+ self._count_dollars_before_index(text, space) % 2 == 0):
+ break
+
+ if space < 0:
+ # No such space; just use the first unescaped space we can find.
+ space = available_space - 1
+ while True:
+ space = text.find(' ', space + 1)
+ if (space < 0 or
+ self._count_dollars_before_index(text, space) % 2 == 0):
+ break
+ if space < 0:
+ # Give up on breaking.
+ break
+
+ self.output.write(leading_space + text[0:space] + ' $\n')
+ text = text[space+1:]
+
+ # Subsequent lines are continuations, so indent them.
+ leading_space = ' ' * (indent+2)
+
+ self.output.write(leading_space + text + '\n')
+
+ def close(self):
+ self.output.close()
+
+
+def as_list(input):
+ if input is None:
+ return []
+ if isinstance(input, list):
+ return input
+ return [input]
+
+
+def escape(string):
+ """Escape a string such that it can be embedded into a Ninja file without
+ further interpretation."""
+ assert '\n' not in string, 'Ninja syntax does not allow newlines'
+ # We only have one special metacharacter: '$'.
+ return string.replace('$', '$$')
+
+
+def expand(string, vars, local_vars={}):
+ """Expand a string containing $vars as Ninja would.
+
+ Note: doesn't handle the full Ninja variable syntax, but it's enough
+ to make configure.py's use of it work.
+ """
+ def exp(m):
+ var = m.group(1)
+ if var == '$':
+ return '$'
+ return local_vars.get(var, vars.get(var, ''))
+ return re.sub(r'\$(\$|\w*)', exp, string)
diff --git a/misc/ninja_syntax_test.py b/misc/ninja_syntax_test.py
new file mode 100755
index 0000000..90ff9c6
--- /dev/null
+++ b/misc/ninja_syntax_test.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python
+
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO
+
+import ninja_syntax
+
+LONGWORD = 'a' * 10
+LONGWORDWITHSPACES = 'a'*5 + '$ ' + 'a'*5
+INDENT = ' '
+
+class TestLineWordWrap(unittest.TestCase):
+ def setUp(self):
+ self.out = StringIO()
+ self.n = ninja_syntax.Writer(self.out, width=8)
+
+ def test_single_long_word(self):
+ # We shouldn't wrap a single long word.
+ self.n._line(LONGWORD)
+ self.assertEqual(LONGWORD + '\n', self.out.getvalue())
+
+ def test_few_long_words(self):
+ # We should wrap a line where the second word is overlong.
+ self.n._line(' '.join(['x', LONGWORD, 'y']))
+ self.assertEqual(' $\n'.join(['x',
+ INDENT + LONGWORD,
+ INDENT + 'y']) + '\n',
+ self.out.getvalue())
+
+ def test_comment_wrap(self):
+ # Filenames should not be wrapped
+ self.n.comment('Hello /usr/local/build-tools/bin')
+ self.assertEqual('# Hello\n# /usr/local/build-tools/bin\n',
+ self.out.getvalue())
+
+ def test_short_words_indented(self):
+ # Test that indent is taking into account when breaking subsequent lines.
+ # The second line should not be ' to tree', as that's longer than the
+ # test layout width of 8.
+ self.n._line('line_one to tree')
+ self.assertEqual('''\
+line_one $
+ to $
+ tree
+''',
+ self.out.getvalue())
+
+ def test_few_long_words_indented(self):
+ # Check wrapping in the presence of indenting.
+ self.n._line(' '.join(['x', LONGWORD, 'y']), indent=1)
+ self.assertEqual(' $\n'.join([' ' + 'x',
+ ' ' + INDENT + LONGWORD,
+ ' ' + INDENT + 'y']) + '\n',
+ self.out.getvalue())
+
+ def test_escaped_spaces(self):
+ self.n._line(' '.join(['x', LONGWORDWITHSPACES, 'y']))
+ self.assertEqual(' $\n'.join(['x',
+ INDENT + LONGWORDWITHSPACES,
+ INDENT + 'y']) + '\n',
+ self.out.getvalue())
+
+ def test_fit_many_words(self):
+ self.n = ninja_syntax.Writer(self.out, width=78)
+ self.n._line('command = cd ../../chrome; python ../tools/grit/grit/format/repack.py ../out/Debug/obj/chrome/chrome_dll.gen/repack/theme_resources_large.pak ../out/Debug/gen/chrome/theme_resources_large.pak', 1)
+ self.assertEqual('''\
+ command = cd ../../chrome; python ../tools/grit/grit/format/repack.py $
+ ../out/Debug/obj/chrome/chrome_dll.gen/repack/theme_resources_large.pak $
+ ../out/Debug/gen/chrome/theme_resources_large.pak
+''',
+ self.out.getvalue())
+
+ def test_leading_space(self):
+ self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping
+ self.n.variable('foo', ['', '-bar', '-somethinglong'], 0)
+ self.assertEqual('''\
+foo = -bar $
+ -somethinglong
+''',
+ self.out.getvalue())
+
+ def test_embedded_dollar_dollar(self):
+ self.n = ninja_syntax.Writer(self.out, width=15) # force wrapping
+ self.n.variable('foo', ['a$$b', '-somethinglong'], 0)
+ self.assertEqual('''\
+foo = a$$b $
+ -somethinglong
+''',
+ self.out.getvalue())
+
+ def test_two_embedded_dollar_dollars(self):
+ self.n = ninja_syntax.Writer(self.out, width=17) # force wrapping
+ self.n.variable('foo', ['a$$b', '-somethinglong'], 0)
+ self.assertEqual('''\
+foo = a$$b $
+ -somethinglong
+''',
+ self.out.getvalue())
+
+ def test_leading_dollar_dollar(self):
+ self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping
+ self.n.variable('foo', ['$$b', '-somethinglong'], 0)
+ self.assertEqual('''\
+foo = $$b $
+ -somethinglong
+''',
+ self.out.getvalue())
+
+ def test_trailing_dollar_dollar(self):
+ self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping
+ self.n.variable('foo', ['a$$', '-somethinglong'], 0)
+ self.assertEqual('''\
+foo = a$$ $
+ -somethinglong
+''',
+ self.out.getvalue())
+
+class TestBuild(unittest.TestCase):
+ def setUp(self):
+ self.out = StringIO()
+ self.n = ninja_syntax.Writer(self.out)
+
+ def test_variables_dict(self):
+ self.n.build('out', 'cc', 'in', variables={'name': 'value'})
+ self.assertEqual('''\
+build out: cc in
+ name = value
+''',
+ self.out.getvalue())
+
+ def test_variables_list(self):
+ self.n.build('out', 'cc', 'in', variables=[('name', 'value')])
+ self.assertEqual('''\
+build out: cc in
+ name = value
+''',
+ self.out.getvalue())
+
+ def test_implicit_outputs(self):
+ self.n.build('o', 'cc', 'i', implicit_outputs='io')
+ self.assertEqual('''\
+build o | io: cc i
+''',
+ self.out.getvalue())
+
+class TestExpand(unittest.TestCase):
+ def test_basic(self):
+ vars = {'x': 'X'}
+ self.assertEqual('foo', ninja_syntax.expand('foo', vars))
+
+ def test_var(self):
+ vars = {'xyz': 'XYZ'}
+ self.assertEqual('fooXYZ', ninja_syntax.expand('foo$xyz', vars))
+
+ def test_vars(self):
+ vars = {'x': 'X', 'y': 'YYY'}
+ self.assertEqual('XYYY', ninja_syntax.expand('$x$y', vars))
+
+ def test_space(self):
+ vars = {}
+ self.assertEqual('x y z', ninja_syntax.expand('x$ y$ z', vars))
+
+ def test_locals(self):
+ vars = {'x': 'a'}
+ local_vars = {'x': 'b'}
+ self.assertEqual('a', ninja_syntax.expand('$x', vars))
+ self.assertEqual('b', ninja_syntax.expand('$x', vars, local_vars))
+
+ def test_double(self):
+ self.assertEqual('a b$c', ninja_syntax.expand('a$ b$$c', {}))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/misc/output_test.py b/misc/output_test.py
new file mode 100755
index 0000000..b63520f
--- /dev/null
+++ b/misc/output_test.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python3
+
+"""Runs ./ninja and checks if the output is correct.
+
+In order to simulate a smart terminal it uses the 'script' command.
+"""
+
+import os
+import platform
+import subprocess
+import sys
+import tempfile
+import unittest
+
+default_env = dict(os.environ)
+if 'NINJA_STATUS' in default_env:
+ del default_env['NINJA_STATUS']
+if 'CLICOLOR_FORCE' in default_env:
+ del default_env['CLICOLOR_FORCE']
+default_env['TERM'] = ''
+NINJA_PATH = os.path.abspath('./ninja')
+
+def run(build_ninja, flags='', pipe=False, env=default_env):
+ with tempfile.TemporaryDirectory() as d:
+ os.chdir(d)
+ with open('build.ninja', 'w') as f:
+ f.write(build_ninja)
+ f.flush()
+ ninja_cmd = '{} {}'.format(NINJA_PATH, flags)
+ try:
+ if pipe:
+ output = subprocess.check_output([ninja_cmd], shell=True, env=env)
+ elif platform.system() == 'Darwin':
+ output = subprocess.check_output(['script', '-q', '/dev/null', 'bash', '-c', ninja_cmd],
+ env=env)
+ else:
+ output = subprocess.check_output(['script', '-qfec', ninja_cmd, '/dev/null'],
+ env=env)
+ except subprocess.CalledProcessError as err:
+ sys.stdout.buffer.write(err.output)
+ raise err
+ final_output = ''
+ for line in output.decode('utf-8').splitlines(True):
+ if len(line) > 0 and line[-1] == '\r':
+ continue
+ final_output += line.replace('\r', '')
+ return final_output
+
+@unittest.skipIf(platform.system() == 'Windows', 'These test methods do not work on Windows')
+class Output(unittest.TestCase):
+ def test_issue_1418(self):
+ self.assertEqual(run(
+'''rule echo
+ command = sleep $delay && echo $out
+ description = echo $out
+
+build a: echo
+ delay = 3
+build b: echo
+ delay = 2
+build c: echo
+ delay = 1
+''', '-j3'),
+'''[1/3] echo c\x1b[K
+c
+[2/3] echo b\x1b[K
+b
+[3/3] echo a\x1b[K
+a
+''')
+
+ def test_issue_1214(self):
+ print_red = '''rule echo
+ command = printf '\x1b[31mred\x1b[0m'
+ description = echo $out
+
+build a: echo
+'''
+ # Only strip color when ninja's output is piped.
+ self.assertEqual(run(print_red),
+'''[1/1] echo a\x1b[K
+\x1b[31mred\x1b[0m
+''')
+ self.assertEqual(run(print_red, pipe=True),
+'''[1/1] echo a
+red
+''')
+ # Even in verbose mode, colors should still only be stripped when piped.
+ self.assertEqual(run(print_red, flags='-v'),
+'''[1/1] printf '\x1b[31mred\x1b[0m'
+\x1b[31mred\x1b[0m
+''')
+ self.assertEqual(run(print_red, flags='-v', pipe=True),
+'''[1/1] printf '\x1b[31mred\x1b[0m'
+red
+''')
+
+ # CLICOLOR_FORCE=1 can be used to disable escape code stripping.
+ env = default_env.copy()
+ env['CLICOLOR_FORCE'] = '1'
+ self.assertEqual(run(print_red, pipe=True, env=env),
+'''[1/1] echo a
+\x1b[31mred\x1b[0m
+''')
+
+ def test_pr_1685(self):
+ # Running those tools without .ninja_deps and .ninja_log shouldn't fail.
+ self.assertEqual(run('', flags='-t recompact'), '')
+ self.assertEqual(run('', flags='-t restat'), '')
+
+ def test_status(self):
+ self.assertEqual(run(''), 'ninja: no work to do.\n')
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/misc/packaging/ninja.spec b/misc/packaging/ninja.spec
new file mode 100644
index 0000000..36e5181
--- /dev/null
+++ b/misc/packaging/ninja.spec
@@ -0,0 +1,42 @@
+Summary: Ninja is a small build system with a focus on speed.
+Name: ninja
+Version: %{ver}
+Release: %{rel}%{?dist}
+Group: Development/Tools
+License: Apache 2.0
+URL: https://github.com/ninja-build/ninja
+Source0: %{name}-%{version}-%{rel}.tar.gz
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{rel}
+
+BuildRequires: asciidoc
+
+%description
+Ninja is yet another build system. It takes as input the interdependencies of files (typically source code and output executables) and
+orchestrates building them, quickly.
+
+Ninja joins a sea of other build systems. Its distinguishing goal is to be fast. It is born from my work on the Chromium browser project,
+which has over 30,000 source files and whose other build systems (including one built from custom non-recursive Makefiles) can take ten
+seconds to start building after changing one file. Ninja is under a second.
+
+%prep
+%setup -q -n %{name}-%{version}-%{rel}
+
+%build
+echo Building..
+./configure.py --bootstrap
+./ninja manual
+
+%install
+mkdir -p %{buildroot}%{_bindir} %{buildroot}%{_docdir}
+cp -p ninja %{buildroot}%{_bindir}/
+
+%files
+%defattr(-, root, root)
+%doc COPYING README.md doc/manual.html
+%{_bindir}/*
+
+%clean
+rm -rf %{buildroot}
+
+#The changelog is built automatically from Git history
+%changelog
diff --git a/misc/packaging/rpmbuild.sh b/misc/packaging/rpmbuild.sh
new file mode 100755
index 0000000..9b74c65
--- /dev/null
+++ b/misc/packaging/rpmbuild.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+echo Building ninja RPMs..
+GITROOT=$(git rev-parse --show-toplevel)
+cd $GITROOT
+
+VER=1.0
+REL=$(git rev-parse --short HEAD)git
+RPMTOPDIR=$GITROOT/rpm-build
+echo "Ver: $VER, Release: $REL"
+
+# Create tarball
+mkdir -p $RPMTOPDIR/{SOURCES,SPECS}
+git archive --format=tar --prefix=ninja-${VER}-${REL}/ HEAD | gzip -c > $RPMTOPDIR/SOURCES/ninja-${VER}-${REL}.tar.gz
+
+# Convert git log to RPM's ChangeLog format (shown with rpm -qp --changelog <rpm file>)
+sed -e "s/%{ver}/$VER/" -e "s/%{rel}/$REL/" misc/packaging/ninja.spec > $RPMTOPDIR/SPECS/ninja.spec
+git log --format="* %cd %aN%n- (%h) %s%d%n" --date=local | sed -r 's/[0-9]+:[0-9]+:[0-9]+ //' >> $RPMTOPDIR/SPECS/ninja.spec
+
+# Build SRC and binary RPMs
+rpmbuild --quiet \
+ --define "_topdir $RPMTOPDIR" \
+ --define "_rpmdir $PWD" \
+ --define "_srcrpmdir $PWD" \
+ --define '_rpmfilename %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm' \
+ -ba $RPMTOPDIR/SPECS/ninja.spec &&
+
+rm -rf $RPMTOPDIR &&
+echo Done
diff --git a/misc/write_fake_manifests.py b/misc/write_fake_manifests.py
new file mode 100644
index 0000000..b3594de
--- /dev/null
+++ b/misc/write_fake_manifests.py
@@ -0,0 +1,272 @@
+#!/usr/bin/env python
+
+"""Writes large manifest files, for manifest parser performance testing.
+
+The generated manifest files are (eerily) similar in appearance and size to the
+ones used in the Chromium project.
+
+Usage:
+ python misc/write_fake_manifests.py outdir # Will run for about 5s.
+
+The program contains a hardcoded random seed, so it will generate the same
+output every time it runs. By changing the seed, it's easy to generate many
+different sets of manifest files.
+"""
+
+import argparse
+import contextlib
+import os
+import random
+import sys
+
+import ninja_syntax
+
+
+def paretoint(avg, alpha):
+ """Returns a random integer that's avg on average, following a power law.
+ alpha determines the shape of the power curve. alpha has to be larger
+ than 1. The closer alpha is to 1, the higher the variation of the returned
+ numbers."""
+ return int(random.paretovariate(alpha) * avg / (alpha / (alpha - 1)))
+
+
+# Based on http://neugierig.org/software/chromium/class-name-generator.html
+def moar(avg_options, p_suffix):
+ kStart = ['render', 'web', 'browser', 'tab', 'content', 'extension', 'url',
+ 'file', 'sync', 'content', 'http', 'profile']
+ kOption = ['view', 'host', 'holder', 'container', 'impl', 'ref',
+ 'delegate', 'widget', 'proxy', 'stub', 'context',
+ 'manager', 'master', 'watcher', 'service', 'file', 'data',
+ 'resource', 'device', 'info', 'provider', 'internals', 'tracker',
+ 'api', 'layer']
+ kOS = ['win', 'mac', 'aura', 'linux', 'android', 'unittest', 'browsertest']
+ num_options = min(paretoint(avg_options, alpha=4), 5)
+ # The original allows kOption to repeat as long as no consecutive options
+ # repeat. This version doesn't allow any option repetition.
+ name = [random.choice(kStart)] + random.sample(kOption, num_options)
+ if random.random() < p_suffix:
+ name.append(random.choice(kOS))
+ return '_'.join(name)
+
+
+class GenRandom(object):
+ def __init__(self, src_dir):
+ self.seen_names = set([None])
+ self.seen_defines = set([None])
+ self.src_dir = src_dir
+
+ def _unique_string(self, seen, avg_options=1.3, p_suffix=0.1):
+ s = None
+ while s in seen:
+ s = moar(avg_options, p_suffix)
+ seen.add(s)
+ return s
+
+ def _n_unique_strings(self, n):
+ seen = set([None])
+ return [self._unique_string(seen, avg_options=3, p_suffix=0.4)
+ for _ in xrange(n)]
+
+ def target_name(self):
+ return self._unique_string(p_suffix=0, seen=self.seen_names)
+
+ def path(self):
+ return os.path.sep.join([
+ self._unique_string(self.seen_names, avg_options=1, p_suffix=0)
+ for _ in xrange(1 + paretoint(0.6, alpha=4))])
+
+ def src_obj_pairs(self, path, name):
+ num_sources = paretoint(55, alpha=2) + 1
+ return [(os.path.join(self.src_dir, path, s + '.cc'),
+ os.path.join('obj', path, '%s.%s.o' % (name, s)))
+ for s in self._n_unique_strings(num_sources)]
+
+ def defines(self):
+ return [
+ '-DENABLE_' + self._unique_string(self.seen_defines).upper()
+ for _ in xrange(paretoint(20, alpha=3))]
+
+
+LIB, EXE = 0, 1
+class Target(object):
+ def __init__(self, gen, kind):
+ self.name = gen.target_name()
+ self.dir_path = gen.path()
+ self.ninja_file_path = os.path.join(
+ 'obj', self.dir_path, self.name + '.ninja')
+ self.src_obj_pairs = gen.src_obj_pairs(self.dir_path, self.name)
+ if kind == LIB:
+ self.output = os.path.join('lib' + self.name + '.a')
+ elif kind == EXE:
+ self.output = os.path.join(self.name)
+ self.defines = gen.defines()
+ self.deps = []
+ self.kind = kind
+ self.has_compile_depends = random.random() < 0.4
+
+
+def write_target_ninja(ninja, target, src_dir):
+ compile_depends = None
+ if target.has_compile_depends:
+ compile_depends = os.path.join(
+ 'obj', target.dir_path, target.name + '.stamp')
+ ninja.build(compile_depends, 'stamp', target.src_obj_pairs[0][0])
+ ninja.newline()
+
+ ninja.variable('defines', target.defines)
+ ninja.variable('includes', '-I' + src_dir)
+ ninja.variable('cflags', ['-Wall', '-fno-rtti', '-fno-exceptions'])
+ ninja.newline()
+
+ for src, obj in target.src_obj_pairs:
+ ninja.build(obj, 'cxx', src, implicit=compile_depends)
+ ninja.newline()
+
+ deps = [dep.output for dep in target.deps]
+ libs = [dep.output for dep in target.deps if dep.kind == LIB]
+ if target.kind == EXE:
+ ninja.variable('libs', libs)
+ if sys.platform == "darwin":
+ ninja.variable('ldflags', '-Wl,-pie')
+ link = { LIB: 'alink', EXE: 'link'}[target.kind]
+ ninja.build(target.output, link, [obj for _, obj in target.src_obj_pairs],
+ implicit=deps)
+
+
+def write_sources(target, root_dir):
+ need_main = target.kind == EXE
+
+ includes = []
+
+ # Include siblings.
+ for cc_filename, _ in target.src_obj_pairs:
+ h_filename = os.path.basename(os.path.splitext(cc_filename)[0] + '.h')
+ includes.append(h_filename)
+
+ # Include deps.
+ for dep in target.deps:
+ for cc_filename, _ in dep.src_obj_pairs:
+ h_filename = os.path.basename(
+ os.path.splitext(cc_filename)[0] + '.h')
+ includes.append("%s/%s" % (dep.dir_path, h_filename))
+
+ for cc_filename, _ in target.src_obj_pairs:
+ cc_path = os.path.join(root_dir, cc_filename)
+ h_path = os.path.splitext(cc_path)[0] + '.h'
+ namespace = os.path.basename(target.dir_path)
+ class_ = os.path.splitext(os.path.basename(cc_filename))[0]
+ try:
+ os.makedirs(os.path.dirname(cc_path))
+ except OSError:
+ pass
+
+ with open(h_path, 'w') as f:
+ f.write('namespace %s { struct %s { %s(); }; }' % (namespace,
+ class_, class_))
+ with open(cc_path, 'w') as f:
+ for include in includes:
+ f.write('#include "%s"\n' % include)
+ f.write('\n')
+ f.write('namespace %s { %s::%s() {} }' % (namespace,
+ class_, class_))
+
+ if need_main:
+ f.write('int main(int argc, char **argv) {}\n')
+ need_main = False
+
+def write_master_ninja(master_ninja, targets):
+ """Writes master build.ninja file, referencing all given subninjas."""
+ master_ninja.variable('cxx', 'c++')
+ master_ninja.variable('ld', '$cxx')
+ if sys.platform == 'darwin':
+ master_ninja.variable('alink', 'libtool -static')
+ else:
+ master_ninja.variable('alink', 'ar rcs')
+ master_ninja.newline()
+
+ master_ninja.pool('link_pool', depth=4)
+ master_ninja.newline()
+
+ master_ninja.rule('cxx', description='CXX $out',
+ command='$cxx -MMD -MF $out.d $defines $includes $cflags -c $in -o $out',
+ depfile='$out.d', deps='gcc')
+ master_ninja.rule('alink', description='ARCHIVE $out',
+ command='rm -f $out && $alink -o $out $in')
+ master_ninja.rule('link', description='LINK $out', pool='link_pool',
+ command='$ld $ldflags -o $out $in $libs')
+ master_ninja.rule('stamp', description='STAMP $out', command='touch $out')
+ master_ninja.newline()
+
+ for target in targets:
+ master_ninja.subninja(target.ninja_file_path)
+ master_ninja.newline()
+
+ master_ninja.comment('Short names for targets.')
+ for target in targets:
+ if target.name != target.output:
+ master_ninja.build(target.name, 'phony', target.output)
+ master_ninja.newline()
+
+ master_ninja.build('all', 'phony', [target.output for target in targets])
+ master_ninja.default('all')
+
+
+@contextlib.contextmanager
+def FileWriter(path):
+ """Context manager for a ninja_syntax object writing to a file."""
+ try:
+ os.makedirs(os.path.dirname(path))
+ except OSError:
+ pass
+ f = open(path, 'w')
+ yield ninja_syntax.Writer(f)
+ f.close()
+
+
+def random_targets(num_targets, src_dir):
+ gen = GenRandom(src_dir)
+
+ # N-1 static libraries, and 1 executable depending on all of them.
+ targets = [Target(gen, LIB) for i in xrange(num_targets - 1)]
+ for i in range(len(targets)):
+ targets[i].deps = [t for t in targets[0:i] if random.random() < 0.05]
+
+ last_target = Target(gen, EXE)
+ last_target.deps = targets[:]
+ last_target.src_obj_pairs = last_target.src_obj_pairs[0:10] # Trim.
+ targets.append(last_target)
+ return targets
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-s', '--sources', nargs="?", const="src",
+ help='write sources to directory (relative to output directory)')
+ parser.add_argument('-t', '--targets', type=int, default=1500,
+ help='number of targets (default: 1500)')
+ parser.add_argument('-S', '--seed', type=int, help='random seed',
+ default=12345)
+ parser.add_argument('outdir', help='output directory')
+ args = parser.parse_args()
+ root_dir = args.outdir
+
+ random.seed(args.seed)
+
+ do_write_sources = args.sources is not None
+ src_dir = args.sources if do_write_sources else "src"
+
+ targets = random_targets(args.targets, src_dir)
+ for target in targets:
+ with FileWriter(os.path.join(root_dir, target.ninja_file_path)) as n:
+ write_target_ninja(n, target, src_dir)
+
+ if do_write_sources:
+ write_sources(target, root_dir)
+
+ with FileWriter(os.path.join(root_dir, 'build.ninja')) as master_ninja:
+ master_ninja.width = 120
+ write_master_ninja(master_ninja, targets)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/misc/zsh-completion b/misc/zsh-completion
new file mode 100644
index 0000000..4cee3b8
--- /dev/null
+++ b/misc/zsh-completion
@@ -0,0 +1,72 @@
+#compdef ninja
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Add the following to your .zshrc to tab-complete ninja targets
+# fpath=(path/to/ninja/misc/zsh-completion $fpath)
+
+__get_targets() {
+ dir="."
+ if [ -n "${opt_args[-C]}" ];
+ then
+ eval dir="${opt_args[-C]}"
+ fi
+ file="build.ninja"
+ if [ -n "${opt_args[-f]}" ];
+ then
+ eval file="${opt_args[-f]}"
+ fi
+ targets_command="ninja -f \"${file}\" -C \"${dir}\" -t targets all"
+ eval ${targets_command} 2>/dev/null | cut -d: -f1
+}
+
+__get_tools() {
+ ninja -t list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2
+}
+
+__get_modes() {
+ ninja -d list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2 | sed '$d'
+}
+
+__modes() {
+ local -a modes
+ modes=(${(fo)"$(__get_modes)"})
+ _describe 'modes' modes
+}
+
+__tools() {
+ local -a tools
+ tools=(${(fo)"$(__get_tools)"})
+ _describe 'tools' tools
+}
+
+__targets() {
+ local -a targets
+ targets=(${(fo)"$(__get_targets)"})
+ _describe 'targets' targets
+}
+
+_arguments \
+ {-h,--help}'[Show help]' \
+ '--version[Print ninja version]' \
+ '-C+[Change to directory before doing anything else]:directories:_directories' \
+ '-f+[Specify input build file (default=build.ninja)]:files:_files' \
+ '-j+[Run N jobs in parallel (default=number of CPUs available)]:number of jobs' \
+ '-l+[Do not start new jobs if the load average is greater than N]:number of jobs' \
+ '-k+[Keep going until N jobs fail (default=1)]:number of jobs' \
+ '-n[Dry run (do not run commands but act like they succeeded)]' \
+ '-v[Show all command lines while building]' \
+ '-d+[Enable debugging (use -d list to list modes)]:modes:__modes' \
+ '-t+[Run a subtool (use -t list to list subtools)]:tools:__tools' \
+ '*::targets:__targets'
diff --git a/src/browse.cc b/src/browse.cc
new file mode 100644
index 0000000..76bee07
--- /dev/null
+++ b/src/browse.cc
@@ -0,0 +1,80 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "browse.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <vector>
+
+#include "build/browse_py.h"
+
+using namespace std;
+
+void RunBrowsePython(State* state, const char* ninja_command,
+ const char* input_file, int argc, char* argv[]) {
+ // Fork off a Python process and have it run our code via its stdin.
+ // (Actually the Python process becomes the parent.)
+ int pipefd[2];
+ if (pipe(pipefd) < 0) {
+ perror("ninja: pipe");
+ return;
+ }
+
+ pid_t pid = fork();
+ if (pid < 0) {
+ perror("ninja: fork");
+ return;
+ }
+
+ if (pid > 0) { // Parent.
+ close(pipefd[1]);
+ do {
+ if (dup2(pipefd[0], 0) < 0) {
+ perror("ninja: dup2");
+ break;
+ }
+
+ std::vector<const char *> command;
+ command.push_back(NINJA_PYTHON);
+ command.push_back("-");
+ command.push_back("--ninja-command");
+ command.push_back(ninja_command);
+ command.push_back("-f");
+ command.push_back(input_file);
+ for (int i = 0; i < argc; i++) {
+ command.push_back(argv[i]);
+ }
+ command.push_back(NULL);
+ execvp(command[0], (char**)&command[0]);
+ if (errno == ENOENT) {
+ printf("ninja: %s is required for the browse tool\n", NINJA_PYTHON);
+ } else {
+ perror("ninja: execvp");
+ }
+ } while (false);
+ _exit(1);
+ } else { // Child.
+ close(pipefd[0]);
+
+ // Write the script file into the stdin of the Python process.
+ ssize_t len = write(pipefd[1], kBrowsePy, sizeof(kBrowsePy));
+ if (len < (ssize_t)sizeof(kBrowsePy))
+ perror("ninja: write");
+ close(pipefd[1]);
+ exit(0);
+ }
+}
diff --git a/src/browse.h b/src/browse.h
new file mode 100644
index 0000000..8d6d285
--- /dev/null
+++ b/src/browse.h
@@ -0,0 +1,28 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_BROWSE_H_
+#define NINJA_BROWSE_H_
+
+struct State;
+
+/// Run in "browse" mode, which execs a Python webserver.
+/// \a ninja_command is the command used to invoke ninja.
+/// \a args are the number of arguments to be passed to the Python script.
+/// \a argv are arguments to be passed to the Python script.
+/// This function does not return if it runs successfully.
+void RunBrowsePython(State* state, const char* ninja_command,
+ const char* input_file, int argc, char* argv[]);
+
+#endif // NINJA_BROWSE_H_
diff --git a/src/browse.py b/src/browse.py
new file mode 100755
index 0000000..653cbe9
--- /dev/null
+++ b/src/browse.py
@@ -0,0 +1,233 @@
+#!/usr/bin/env python
+#
+# Copyright 2001 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Simple web server for browsing dependency graph data.
+
+This script is inlined into the final executable and spawned by
+it when needed.
+"""
+
+from __future__ import print_function
+
+try:
+ import http.server as httpserver
+ import socketserver
+except ImportError:
+ import BaseHTTPServer as httpserver
+ import SocketServer as socketserver
+import argparse
+import os
+import socket
+import subprocess
+import sys
+import webbrowser
+if sys.version_info >= (3, 2):
+ from html import escape
+else:
+ from cgi import escape
+try:
+ from urllib.request import unquote
+except ImportError:
+ from urllib2 import unquote
+from collections import namedtuple
+
+Node = namedtuple('Node', ['inputs', 'rule', 'target', 'outputs'])
+
+# Ideally we'd allow you to navigate to a build edge or a build node,
+# with appropriate views for each. But there's no way to *name* a build
+# edge so we can only display nodes.
+#
+# For a given node, it has at most one input edge, which has n
+# different inputs. This becomes node.inputs. (We leave out the
+# outputs of the input edge due to what follows.) The node can have
+# multiple dependent output edges. Rather than attempting to display
+# those, they are summarized by taking the union of all their outputs.
+#
+# This means there's no single view that shows you all inputs and outputs
+# of an edge. But I think it's less confusing than alternatives.
+
+def match_strip(line, prefix):
+ if not line.startswith(prefix):
+ return (False, line)
+ return (True, line[len(prefix):])
+
+def html_escape(text):
+ return escape(text, quote=True)
+
+def parse(text):
+ lines = iter(text.split('\n'))
+
+ target = None
+ rule = None
+ inputs = []
+ outputs = []
+
+ try:
+ target = next(lines)[:-1] # strip trailing colon
+
+ line = next(lines)
+ (match, rule) = match_strip(line, ' input: ')
+ if match:
+ (match, line) = match_strip(next(lines), ' ')
+ while match:
+ type = None
+ (match, line) = match_strip(line, '| ')
+ if match:
+ type = 'implicit'
+ (match, line) = match_strip(line, '|| ')
+ if match:
+ type = 'order-only'
+ inputs.append((line, type))
+ (match, line) = match_strip(next(lines), ' ')
+
+ match, _ = match_strip(line, ' outputs:')
+ if match:
+ (match, line) = match_strip(next(lines), ' ')
+ while match:
+ outputs.append(line)
+ (match, line) = match_strip(next(lines), ' ')
+ except StopIteration:
+ pass
+
+ return Node(inputs, rule, target, outputs)
+
+def create_page(body):
+ return '''<!DOCTYPE html>
+<style>
+body {
+ font-family: sans;
+ font-size: 0.8em;
+ margin: 4ex;
+}
+h1 {
+ font-weight: normal;
+ font-size: 140%;
+ text-align: center;
+ margin: 0;
+}
+h2 {
+ font-weight: normal;
+ font-size: 120%;
+}
+tt {
+ font-family: WebKitHack, monospace;
+ white-space: nowrap;
+}
+.filelist {
+ -webkit-columns: auto 2;
+}
+</style>
+''' + body
+
+def generate_html(node):
+ document = ['<h1><tt>%s</tt></h1>' % html_escape(node.target)]
+
+ if node.inputs:
+ document.append('<h2>target is built using rule <tt>%s</tt> of</h2>' %
+ html_escape(node.rule))
+ if len(node.inputs) > 0:
+ document.append('<div class=filelist>')
+ for input, type in sorted(node.inputs):
+ extra = ''
+ if type:
+ extra = ' (%s)' % html_escape(type)
+ document.append('<tt><a href="?%s">%s</a>%s</tt><br>' %
+ (html_escape(input), html_escape(input), extra))
+ document.append('</div>')
+
+ if node.outputs:
+ document.append('<h2>dependent edges build:</h2>')
+ document.append('<div class=filelist>')
+ for output in sorted(node.outputs):
+ document.append('<tt><a href="?%s">%s</a></tt><br>' %
+ (html_escape(output), html_escape(output)))
+ document.append('</div>')
+
+ return '\n'.join(document)
+
+def ninja_dump(target):
+ cmd = [args.ninja_command, '-f', args.f, '-t', 'query', target]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ universal_newlines=True)
+ return proc.communicate() + (proc.returncode,)
+
+class RequestHandler(httpserver.BaseHTTPRequestHandler):
+ def do_GET(self):
+ assert self.path[0] == '/'
+ target = unquote(self.path[1:])
+
+ if target == '':
+ self.send_response(302)
+ self.send_header('Location', '?' + args.initial_target)
+ self.end_headers()
+ return
+
+ if not target.startswith('?'):
+ self.send_response(404)
+ self.end_headers()
+ return
+ target = target[1:]
+
+ ninja_output, ninja_error, exit_code = ninja_dump(target)
+ if exit_code == 0:
+ page_body = generate_html(parse(ninja_output.strip()))
+ else:
+ # Relay ninja's error message.
+ page_body = '<h1><tt>%s</tt></h1>' % html_escape(ninja_error)
+
+ self.send_response(200)
+ self.end_headers()
+ self.wfile.write(create_page(page_body).encode('utf-8'))
+
+ def log_message(self, format, *args):
+ pass # Swallow console spam.
+
+parser = argparse.ArgumentParser(prog='ninja -t browse')
+parser.add_argument('--port', '-p', default=8000, type=int,
+ help='Port number to use (default %(default)d)')
+parser.add_argument('--hostname', '-a', default='localhost', type=str,
+ help='Hostname to bind to (default %(default)s)')
+parser.add_argument('--no-browser', action='store_true',
+ help='Do not open a webbrowser on startup.')
+
+parser.add_argument('--ninja-command', default='ninja',
+ help='Path to ninja binary (default %(default)s)')
+parser.add_argument('-f', default='build.ninja',
+ help='Path to build.ninja file (default %(default)s)')
+parser.add_argument('initial_target', default='all', nargs='?',
+ help='Initial target to show (default %(default)s)')
+
+class HTTPServer(socketserver.ThreadingMixIn, httpserver.HTTPServer):
+ # terminate server immediately when Python exits.
+ daemon_threads = True
+
+args = parser.parse_args()
+port = args.port
+hostname = args.hostname
+httpd = HTTPServer((hostname,port), RequestHandler)
+try:
+ if hostname == "":
+ hostname = socket.gethostname()
+ print('Web server running on %s:%d, ctl-C to abort...' % (hostname,port) )
+ print('Web server pid %d' % os.getpid(), file=sys.stderr )
+ if not args.no_browser:
+ webbrowser.open_new('http://%s:%s' % (hostname, port) )
+ httpd.serve_forever()
+except KeyboardInterrupt:
+ print()
+ pass # Swallow console spam.
+
+
diff --git a/src/build.cc b/src/build.cc
new file mode 100644
index 0000000..2fb2aa4
--- /dev/null
+++ b/src/build.cc
@@ -0,0 +1,1138 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "build.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <functional>
+
+#ifdef _WIN32
+#include <fcntl.h>
+#include <io.h>
+#endif
+
+#if defined(__SVR4) && defined(__sun)
+#include <sys/termios.h>
+#endif
+
+#include "build_log.h"
+#include "clparser.h"
+#include "debug_flags.h"
+#include "depfile_parser.h"
+#include "deps_log.h"
+#include "disk_interface.h"
+#include "graph.h"
+#include "state.h"
+#include "subprocess.h"
+#include "util.h"
+
+using namespace std;
+
+namespace {
+
+/// A CommandRunner that doesn't actually run the commands.
+struct DryRunCommandRunner : public CommandRunner {
+ virtual ~DryRunCommandRunner() {}
+
+ // Overridden from CommandRunner:
+ virtual bool CanRunMore() const;
+ virtual bool StartCommand(Edge* edge);
+ virtual bool WaitForCommand(Result* result);
+
+ private:
+ queue<Edge*> finished_;
+};
+
+bool DryRunCommandRunner::CanRunMore() const {
+ return true;
+}
+
+bool DryRunCommandRunner::StartCommand(Edge* edge) {
+ finished_.push(edge);
+ return true;
+}
+
+bool DryRunCommandRunner::WaitForCommand(Result* result) {
+ if (finished_.empty())
+ return false;
+
+ result->status = ExitSuccess;
+ result->edge = finished_.front();
+ finished_.pop();
+ return true;
+}
+
+} // namespace
+
+BuildStatus::BuildStatus(const BuildConfig& config)
+ : config_(config), start_time_millis_(GetTimeMillis()), started_edges_(0),
+ finished_edges_(0), total_edges_(0), progress_status_format_(NULL),
+ current_rate_(config.parallelism) {
+ // Don't do anything fancy in verbose mode.
+ if (config_.verbosity != BuildConfig::NORMAL)
+ printer_.set_smart_terminal(false);
+
+ progress_status_format_ = getenv("NINJA_STATUS");
+ if (!progress_status_format_)
+ progress_status_format_ = "[%f/%t] ";
+}
+
+void BuildStatus::PlanHasTotalEdges(int total) {
+ total_edges_ = total;
+}
+
+void BuildStatus::BuildEdgeStarted(const Edge* edge) {
+ assert(running_edges_.find(edge) == running_edges_.end());
+ int start_time = (int)(GetTimeMillis() - start_time_millis_);
+ running_edges_.insert(make_pair(edge, start_time));
+ ++started_edges_;
+
+ if (edge->use_console() || printer_.is_smart_terminal())
+ PrintStatus(edge, kEdgeStarted);
+
+ if (edge->use_console())
+ printer_.SetConsoleLocked(true);
+}
+
+void BuildStatus::BuildEdgeFinished(Edge* edge,
+ bool success,
+ const string& output,
+ int* start_time,
+ int* end_time) {
+ int64_t now = GetTimeMillis();
+
+ ++finished_edges_;
+
+ RunningEdgeMap::iterator i = running_edges_.find(edge);
+ *start_time = i->second;
+ *end_time = (int)(now - start_time_millis_);
+ running_edges_.erase(i);
+
+ if (edge->use_console())
+ printer_.SetConsoleLocked(false);
+
+ if (config_.verbosity == BuildConfig::QUIET)
+ return;
+
+ if (!edge->use_console())
+ PrintStatus(edge, kEdgeFinished);
+
+ // Print the command that is spewing before printing its output.
+ if (!success) {
+ string outputs;
+ for (vector<Node*>::const_iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o)
+ outputs += (*o)->path() + " ";
+
+ if (printer_.supports_color()) {
+ printer_.PrintOnNewLine("\x1B[31m" "FAILED: " "\x1B[0m" + outputs + "\n");
+ } else {
+ printer_.PrintOnNewLine("FAILED: " + outputs + "\n");
+ }
+ printer_.PrintOnNewLine(edge->EvaluateCommand() + "\n");
+ }
+
+ if (!output.empty()) {
+ // ninja sets stdout and stderr of subprocesses to a pipe, to be able to
+ // check if the output is empty. Some compilers, e.g. clang, check
+ // isatty(stderr) to decide if they should print colored output.
+ // To make it possible to use colored output with ninja, subprocesses should
+ // be run with a flag that forces them to always print color escape codes.
+ // To make sure these escape codes don't show up in a file if ninja's output
+ // is piped to a file, ninja strips ansi escape codes again if it's not
+ // writing to a |smart_terminal_|.
+ // (Launching subprocesses in pseudo ttys doesn't work because there are
+ // only a few hundred available on some systems, and ninja can launch
+ // thousands of parallel compile commands.)
+ string final_output;
+ if (!printer_.supports_color())
+ final_output = StripAnsiEscapeCodes(output);
+ else
+ final_output = output;
+
+#ifdef _WIN32
+ // Fix extra CR being added on Windows, writing out CR CR LF (#773)
+ _setmode(_fileno(stdout), _O_BINARY); // Begin Windows extra CR fix
+#endif
+
+ printer_.PrintOnNewLine(final_output);
+
+#ifdef _WIN32
+ _setmode(_fileno(stdout), _O_TEXT); // End Windows extra CR fix
+#endif
+ }
+}
+
+void BuildStatus::BuildLoadDyndeps() {
+ // The DependencyScan calls EXPLAIN() to print lines explaining why
+ // it considers a portion of the graph to be out of date. Normally
+ // this is done before the build starts, but our caller is about to
+ // load a dyndep file during the build. Doing so may generate more
+ // explanation lines (via fprintf directly to stderr), but in an
+ // interactive console the cursor is currently at the end of a status
+ // line. Start a new line so that the first explanation does not
+ // append to the status line. After the explanations are done a
+ // new build status line will appear.
+ if (g_explaining)
+ printer_.PrintOnNewLine("");
+}
+
+void BuildStatus::BuildStarted() {
+ overall_rate_.Restart();
+ current_rate_.Restart();
+}
+
+void BuildStatus::BuildFinished() {
+ printer_.SetConsoleLocked(false);
+ printer_.PrintOnNewLine("");
+}
+
+string BuildStatus::FormatProgressStatus(
+ const char* progress_status_format, EdgeStatus status) const {
+ string out;
+ char buf[32];
+ int percent;
+ for (const char* s = progress_status_format; *s != '\0'; ++s) {
+ if (*s == '%') {
+ ++s;
+ switch (*s) {
+ case '%':
+ out.push_back('%');
+ break;
+
+ // Started edges.
+ case 's':
+ snprintf(buf, sizeof(buf), "%d", started_edges_);
+ out += buf;
+ break;
+
+ // Total edges.
+ case 't':
+ snprintf(buf, sizeof(buf), "%d", total_edges_);
+ out += buf;
+ break;
+
+ // Running edges.
+ case 'r': {
+ int running_edges = started_edges_ - finished_edges_;
+ // count the edge that just finished as a running edge
+ if (status == kEdgeFinished)
+ running_edges++;
+ snprintf(buf, sizeof(buf), "%d", running_edges);
+ out += buf;
+ break;
+ }
+
+ // Unstarted edges.
+ case 'u':
+ snprintf(buf, sizeof(buf), "%d", total_edges_ - started_edges_);
+ out += buf;
+ break;
+
+ // Finished edges.
+ case 'f':
+ snprintf(buf, sizeof(buf), "%d", finished_edges_);
+ out += buf;
+ break;
+
+ // Overall finished edges per second.
+ case 'o':
+ overall_rate_.UpdateRate(finished_edges_);
+ SnprintfRate(overall_rate_.rate(), buf, "%.1f");
+ out += buf;
+ break;
+
+ // Current rate, average over the last '-j' jobs.
+ case 'c':
+ current_rate_.UpdateRate(finished_edges_);
+ SnprintfRate(current_rate_.rate(), buf, "%.1f");
+ out += buf;
+ break;
+
+ // Percentage
+ case 'p':
+ percent = (100 * finished_edges_) / total_edges_;
+ snprintf(buf, sizeof(buf), "%3i%%", percent);
+ out += buf;
+ break;
+
+ case 'e': {
+ double elapsed = overall_rate_.Elapsed();
+ snprintf(buf, sizeof(buf), "%.3f", elapsed);
+ out += buf;
+ break;
+ }
+
+ default:
+ Fatal("unknown placeholder '%%%c' in $NINJA_STATUS", *s);
+ return "";
+ }
+ } else {
+ out.push_back(*s);
+ }
+ }
+
+ return out;
+}
+
+void BuildStatus::PrintStatus(const Edge* edge, EdgeStatus status) {
+ if (config_.verbosity == BuildConfig::QUIET)
+ return;
+
+ bool force_full_command = config_.verbosity == BuildConfig::VERBOSE;
+
+ string to_print = edge->GetBinding("description");
+ if (to_print.empty() || force_full_command)
+ to_print = edge->GetBinding("command");
+
+ to_print = FormatProgressStatus(progress_status_format_, status) + to_print;
+
+ printer_.Print(to_print,
+ force_full_command ? LinePrinter::FULL : LinePrinter::ELIDE);
+}
+
+Plan::Plan(Builder* builder)
+ : builder_(builder)
+ , command_edges_(0)
+ , wanted_edges_(0)
+{}
+
+void Plan::Reset() {
+ command_edges_ = 0;
+ wanted_edges_ = 0;
+ ready_.clear();
+ want_.clear();
+}
+
+bool Plan::AddTarget(const Node* node, string* err) {
+ return AddSubTarget(node, NULL, err, NULL);
+}
+
+bool Plan::AddSubTarget(const Node* node, const Node* dependent, string* err,
+ set<Edge*>* dyndep_walk) {
+ Edge* edge = node->in_edge();
+ if (!edge) { // Leaf node.
+ if (node->dirty()) {
+ string referenced;
+ if (dependent)
+ referenced = ", needed by '" + dependent->path() + "',";
+ *err = "'" + node->path() + "'" + referenced + " missing "
+ "and no known rule to make it";
+ }
+ return false;
+ }
+
+ if (edge->outputs_ready())
+ return false; // Don't need to do anything.
+
+ // If an entry in want_ does not already exist for edge, create an entry which
+ // maps to kWantNothing, indicating that we do not want to build this entry itself.
+ pair<map<Edge*, Want>::iterator, bool> want_ins =
+ want_.insert(make_pair(edge, kWantNothing));
+ Want& want = want_ins.first->second;
+
+ if (dyndep_walk && want == kWantToFinish)
+ return false; // Don't need to do anything with already-scheduled edge.
+
+ // If we do need to build edge and we haven't already marked it as wanted,
+ // mark it now.
+ if (node->dirty() && want == kWantNothing) {
+ want = kWantToStart;
+ EdgeWanted(edge);
+ if (!dyndep_walk && edge->AllInputsReady())
+ ScheduleWork(want_ins.first);
+ }
+
+ if (dyndep_walk)
+ dyndep_walk->insert(edge);
+
+ if (!want_ins.second)
+ return true; // We've already processed the inputs.
+
+ for (vector<Node*>::iterator i = edge->inputs_.begin();
+ i != edge->inputs_.end(); ++i) {
+ if (!AddSubTarget(*i, node, err, dyndep_walk) && !err->empty())
+ return false;
+ }
+
+ return true;
+}
+
+void Plan::EdgeWanted(const Edge* edge) {
+ ++wanted_edges_;
+ if (!edge->is_phony())
+ ++command_edges_;
+}
+
+Edge* Plan::FindWork() {
+ if (ready_.empty())
+ return NULL;
+ set<Edge*>::iterator e = ready_.begin();
+ Edge* edge = *e;
+ ready_.erase(e);
+ return edge;
+}
+
+void Plan::ScheduleWork(map<Edge*, Want>::iterator want_e) {
+ if (want_e->second == kWantToFinish) {
+ // This edge has already been scheduled. We can get here again if an edge
+ // and one of its dependencies share an order-only input, or if a node
+ // duplicates an out edge (see https://github.com/ninja-build/ninja/pull/519).
+ // Avoid scheduling the work again.
+ return;
+ }
+ assert(want_e->second == kWantToStart);
+ want_e->second = kWantToFinish;
+
+ Edge* edge = want_e->first;
+ Pool* pool = edge->pool();
+ if (pool->ShouldDelayEdge()) {
+ pool->DelayEdge(edge);
+ pool->RetrieveReadyEdges(&ready_);
+ } else {
+ pool->EdgeScheduled(*edge);
+ ready_.insert(edge);
+ }
+}
+
+bool Plan::EdgeFinished(Edge* edge, EdgeResult result, string* err) {
+ map<Edge*, Want>::iterator e = want_.find(edge);
+ assert(e != want_.end());
+ bool directly_wanted = e->second != kWantNothing;
+
+ // See if this job frees up any delayed jobs.
+ if (directly_wanted)
+ edge->pool()->EdgeFinished(*edge);
+ edge->pool()->RetrieveReadyEdges(&ready_);
+
+ // The rest of this function only applies to successful commands.
+ if (result != kEdgeSucceeded)
+ return true;
+
+ if (directly_wanted)
+ --wanted_edges_;
+ want_.erase(e);
+ edge->outputs_ready_ = true;
+
+ // Check off any nodes we were waiting for with this edge.
+ for (vector<Node*>::iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
+ if (!NodeFinished(*o, err))
+ return false;
+ }
+ return true;
+}
+
+bool Plan::NodeFinished(Node* node, string* err) {
+ // If this node provides dyndep info, load it now.
+ if (node->dyndep_pending()) {
+ assert(builder_ && "dyndep requires Plan to have a Builder");
+ // Load the now-clean dyndep file. This will also update the
+ // build plan and schedule any new work that is ready.
+ return builder_->LoadDyndeps(node, err);
+ }
+
+ // See if we we want any edges from this node.
+ for (vector<Edge*>::const_iterator oe = node->out_edges().begin();
+ oe != node->out_edges().end(); ++oe) {
+ map<Edge*, Want>::iterator want_e = want_.find(*oe);
+ if (want_e == want_.end())
+ continue;
+
+ // See if the edge is now ready.
+ if (!EdgeMaybeReady(want_e, err))
+ return false;
+ }
+ return true;
+}
+
+bool Plan::EdgeMaybeReady(map<Edge*, Want>::iterator want_e, string* err) {
+ Edge* edge = want_e->first;
+ if (edge->AllInputsReady()) {
+ if (want_e->second != kWantNothing) {
+ ScheduleWork(want_e);
+ } else {
+ // We do not need to build this edge, but we might need to build one of
+ // its dependents.
+ if (!EdgeFinished(edge, kEdgeSucceeded, err))
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Plan::CleanNode(DependencyScan* scan, Node* node, string* err) {
+ node->set_dirty(false);
+
+ for (vector<Edge*>::const_iterator oe = node->out_edges().begin();
+ oe != node->out_edges().end(); ++oe) {
+ // Don't process edges that we don't actually want.
+ map<Edge*, Want>::iterator want_e = want_.find(*oe);
+ if (want_e == want_.end() || want_e->second == kWantNothing)
+ continue;
+
+ // Don't attempt to clean an edge if it failed to load deps.
+ if ((*oe)->deps_missing_)
+ continue;
+
+ // If all non-order-only inputs for this edge are now clean,
+ // we might have changed the dirty state of the outputs.
+ vector<Node*>::iterator
+ begin = (*oe)->inputs_.begin(),
+ end = (*oe)->inputs_.end() - (*oe)->order_only_deps_;
+#if __cplusplus < 201703L
+#define MEM_FN mem_fun
+#else
+#define MEM_FN mem_fn // mem_fun was removed in C++17.
+#endif
+ if (find_if(begin, end, MEM_FN(&Node::dirty)) == end) {
+ // Recompute most_recent_input.
+ Node* most_recent_input = NULL;
+ for (vector<Node*>::iterator i = begin; i != end; ++i) {
+ if (!most_recent_input || (*i)->mtime() > most_recent_input->mtime())
+ most_recent_input = *i;
+ }
+
+ // Now, this edge is dirty if any of the outputs are dirty.
+ // If the edge isn't dirty, clean the outputs and mark the edge as not
+ // wanted.
+ bool outputs_dirty = false;
+ if (!scan->RecomputeOutputsDirty(*oe, most_recent_input,
+ &outputs_dirty, err)) {
+ return false;
+ }
+ if (!outputs_dirty) {
+ for (vector<Node*>::iterator o = (*oe)->outputs_.begin();
+ o != (*oe)->outputs_.end(); ++o) {
+ if (!CleanNode(scan, *o, err))
+ return false;
+ }
+
+ want_e->second = kWantNothing;
+ --wanted_edges_;
+ if (!(*oe)->is_phony())
+ --command_edges_;
+ }
+ }
+ }
+ return true;
+}
+
+bool Plan::DyndepsLoaded(DependencyScan* scan, const Node* node,
+ const DyndepFile& ddf, string* err) {
+ // Recompute the dirty state of all our direct and indirect dependents now
+ // that our dyndep information has been loaded.
+ if (!RefreshDyndepDependents(scan, node, err))
+ return false;
+
+ // We loaded dyndep information for those out_edges of the dyndep node that
+ // specify the node in a dyndep binding, but they may not be in the plan.
+ // Starting with those already in the plan, walk newly-reachable portion
+ // of the graph through the dyndep-discovered dependencies.
+
+ // Find edges in the the build plan for which we have new dyndep info.
+ std::vector<DyndepFile::const_iterator> dyndep_roots;
+ for (DyndepFile::const_iterator oe = ddf.begin(); oe != ddf.end(); ++oe) {
+ Edge* edge = oe->first;
+
+ // If the edge outputs are ready we do not need to consider it here.
+ if (edge->outputs_ready())
+ continue;
+
+ map<Edge*, Want>::iterator want_e = want_.find(edge);
+
+ // If the edge has not been encountered before then nothing already in the
+ // plan depends on it so we do not need to consider the edge yet either.
+ if (want_e == want_.end())
+ continue;
+
+ // This edge is already in the plan so queue it for the walk.
+ dyndep_roots.push_back(oe);
+ }
+
+ // Walk dyndep-discovered portion of the graph to add it to the build plan.
+ std::set<Edge*> dyndep_walk;
+ for (std::vector<DyndepFile::const_iterator>::iterator
+ oei = dyndep_roots.begin(); oei != dyndep_roots.end(); ++oei) {
+ DyndepFile::const_iterator oe = *oei;
+ for (vector<Node*>::const_iterator i = oe->second.implicit_inputs_.begin();
+ i != oe->second.implicit_inputs_.end(); ++i) {
+ if (!AddSubTarget(*i, oe->first->outputs_[0], err, &dyndep_walk) &&
+ !err->empty())
+ return false;
+ }
+ }
+
+ // Add out edges from this node that are in the plan (just as
+ // Plan::NodeFinished would have without taking the dyndep code path).
+ for (vector<Edge*>::const_iterator oe = node->out_edges().begin();
+ oe != node->out_edges().end(); ++oe) {
+ map<Edge*, Want>::iterator want_e = want_.find(*oe);
+ if (want_e == want_.end())
+ continue;
+ dyndep_walk.insert(want_e->first);
+ }
+
+ // See if any encountered edges are now ready.
+ for (set<Edge*>::iterator wi = dyndep_walk.begin();
+ wi != dyndep_walk.end(); ++wi) {
+ map<Edge*, Want>::iterator want_e = want_.find(*wi);
+ if (want_e == want_.end())
+ continue;
+ if (!EdgeMaybeReady(want_e, err))
+ return false;
+ }
+
+ return true;
+}
+
+bool Plan::RefreshDyndepDependents(DependencyScan* scan, const Node* node,
+ string* err) {
+ // Collect the transitive closure of dependents and mark their edges
+ // as not yet visited by RecomputeDirty.
+ set<Node*> dependents;
+ UnmarkDependents(node, &dependents);
+
+ // Update the dirty state of all dependents and check if their edges
+ // have become wanted.
+ for (set<Node*>::iterator i = dependents.begin();
+ i != dependents.end(); ++i) {
+ Node* n = *i;
+
+ // Check if this dependent node is now dirty. Also checks for new cycles.
+ if (!scan->RecomputeDirty(n, err))
+ return false;
+ if (!n->dirty())
+ continue;
+
+ // This edge was encountered before. However, we may not have wanted to
+ // build it if the outputs were not known to be dirty. With dyndep
+ // information an output is now known to be dirty, so we want the edge.
+ Edge* edge = n->in_edge();
+ assert(edge && !edge->outputs_ready());
+ map<Edge*, Want>::iterator want_e = want_.find(edge);
+ assert(want_e != want_.end());
+ if (want_e->second == kWantNothing) {
+ want_e->second = kWantToStart;
+ EdgeWanted(edge);
+ }
+ }
+ return true;
+}
+
+void Plan::UnmarkDependents(const Node* node, set<Node*>* dependents) {
+ for (vector<Edge*>::const_iterator oe = node->out_edges().begin();
+ oe != node->out_edges().end(); ++oe) {
+ Edge* edge = *oe;
+
+ map<Edge*, Want>::iterator want_e = want_.find(edge);
+ if (want_e == want_.end())
+ continue;
+
+ if (edge->mark_ != Edge::VisitNone) {
+ edge->mark_ = Edge::VisitNone;
+ for (vector<Node*>::iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
+ if (dependents->insert(*o).second)
+ UnmarkDependents(*o, dependents);
+ }
+ }
+ }
+}
+
+void Plan::Dump() const {
+ printf("pending: %d\n", (int)want_.size());
+ for (map<Edge*, Want>::const_iterator e = want_.begin(); e != want_.end(); ++e) {
+ if (e->second != kWantNothing)
+ printf("want ");
+ e->first->Dump();
+ }
+ printf("ready: %d\n", (int)ready_.size());
+}
+
+struct RealCommandRunner : public CommandRunner {
+ explicit RealCommandRunner(const BuildConfig& config) : config_(config) {}
+ virtual ~RealCommandRunner() {}
+ virtual bool CanRunMore() const;
+ virtual bool StartCommand(Edge* edge);
+ virtual bool WaitForCommand(Result* result);
+ virtual vector<Edge*> GetActiveEdges();
+ virtual void Abort();
+
+ const BuildConfig& config_;
+ SubprocessSet subprocs_;
+ map<const Subprocess*, Edge*> subproc_to_edge_;
+};
+
+vector<Edge*> RealCommandRunner::GetActiveEdges() {
+ vector<Edge*> edges;
+ for (map<const Subprocess*, Edge*>::iterator e = subproc_to_edge_.begin();
+ e != subproc_to_edge_.end(); ++e)
+ edges.push_back(e->second);
+ return edges;
+}
+
+void RealCommandRunner::Abort() {
+ subprocs_.Clear();
+}
+
+bool RealCommandRunner::CanRunMore() const {
+ size_t subproc_number =
+ subprocs_.running_.size() + subprocs_.finished_.size();
+ return (int)subproc_number < config_.parallelism
+ && ((subprocs_.running_.empty() || config_.max_load_average <= 0.0f)
+ || GetLoadAverage() < config_.max_load_average);
+}
+
+bool RealCommandRunner::StartCommand(Edge* edge) {
+ string command = edge->EvaluateCommand();
+ Subprocess* subproc = subprocs_.Add(command, edge->use_console());
+ if (!subproc)
+ return false;
+ subproc_to_edge_.insert(make_pair(subproc, edge));
+
+ return true;
+}
+
+bool RealCommandRunner::WaitForCommand(Result* result) {
+ Subprocess* subproc;
+ while ((subproc = subprocs_.NextFinished()) == NULL) {
+ bool interrupted = subprocs_.DoWork();
+ if (interrupted)
+ return false;
+ }
+
+ result->status = subproc->Finish();
+ result->output = subproc->GetOutput();
+
+ map<const Subprocess*, Edge*>::iterator e = subproc_to_edge_.find(subproc);
+ result->edge = e->second;
+ subproc_to_edge_.erase(e);
+
+ delete subproc;
+ return true;
+}
+
+Builder::Builder(State* state, const BuildConfig& config,
+ BuildLog* build_log, DepsLog* deps_log,
+ DiskInterface* disk_interface)
+ : state_(state), config_(config),
+ plan_(this), disk_interface_(disk_interface),
+ scan_(state, build_log, deps_log, disk_interface,
+ &config_.depfile_parser_options) {
+ status_ = new BuildStatus(config);
+}
+
+Builder::~Builder() {
+ Cleanup();
+}
+
+void Builder::Cleanup() {
+ if (command_runner_.get()) {
+ vector<Edge*> active_edges = command_runner_->GetActiveEdges();
+ command_runner_->Abort();
+
+ for (vector<Edge*>::iterator e = active_edges.begin();
+ e != active_edges.end(); ++e) {
+ string depfile = (*e)->GetUnescapedDepfile();
+ for (vector<Node*>::iterator o = (*e)->outputs_.begin();
+ o != (*e)->outputs_.end(); ++o) {
+ // Only delete this output if it was actually modified. This is
+ // important for things like the generator where we don't want to
+ // delete the manifest file if we can avoid it. But if the rule
+ // uses a depfile, always delete. (Consider the case where we
+ // need to rebuild an output because of a modified header file
+ // mentioned in a depfile, and the command touches its depfile
+ // but is interrupted before it touches its output file.)
+ string err;
+ TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), &err);
+ if (new_mtime == -1) // Log and ignore Stat() errors.
+ Error("%s", err.c_str());
+ if (!depfile.empty() || (*o)->mtime() != new_mtime)
+ disk_interface_->RemoveFile((*o)->path());
+ }
+ if (!depfile.empty())
+ disk_interface_->RemoveFile(depfile);
+ }
+ }
+}
+
+Node* Builder::AddTarget(const string& name, string* err) {
+ Node* node = state_->LookupNode(name);
+ if (!node) {
+ *err = "unknown target: '" + name + "'";
+ return NULL;
+ }
+ if (!AddTarget(node, err))
+ return NULL;
+ return node;
+}
+
+bool Builder::AddTarget(Node* node, string* err) {
+ if (!scan_.RecomputeDirty(node, err))
+ return false;
+
+ if (Edge* in_edge = node->in_edge()) {
+ if (in_edge->outputs_ready())
+ return true; // Nothing to do.
+ }
+
+ if (!plan_.AddTarget(node, err))
+ return false;
+
+ return true;
+}
+
+bool Builder::AlreadyUpToDate() const {
+ return !plan_.more_to_do();
+}
+
+bool Builder::Build(string* err) {
+ assert(!AlreadyUpToDate());
+
+ status_->PlanHasTotalEdges(plan_.command_edge_count());
+ int pending_commands = 0;
+ int failures_allowed = config_.failures_allowed;
+
+ // Set up the command runner if we haven't done so already.
+ if (!command_runner_.get()) {
+ if (config_.dry_run)
+ command_runner_.reset(new DryRunCommandRunner);
+ else
+ command_runner_.reset(new RealCommandRunner(config_));
+ }
+
+ // We are about to start the build process.
+ status_->BuildStarted();
+
+ // This main loop runs the entire build process.
+ // It is structured like this:
+ // First, we attempt to start as many commands as allowed by the
+ // command runner.
+ // Second, we attempt to wait for / reap the next finished command.
+ while (plan_.more_to_do()) {
+ // See if we can start any more commands.
+ if (failures_allowed && command_runner_->CanRunMore()) {
+ if (Edge* edge = plan_.FindWork()) {
+ if (edge->GetBindingBool("generator")) {
+ scan_.build_log()->Close();
+ }
+
+ if (!StartEdge(edge, err)) {
+ Cleanup();
+ status_->BuildFinished();
+ return false;
+ }
+
+ if (edge->is_phony()) {
+ if (!plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, err)) {
+ Cleanup();
+ status_->BuildFinished();
+ return false;
+ }
+ } else {
+ ++pending_commands;
+ }
+
+ // We made some progress; go back to the main loop.
+ continue;
+ }
+ }
+
+ // See if we can reap any finished commands.
+ if (pending_commands) {
+ CommandRunner::Result result;
+ if (!command_runner_->WaitForCommand(&result) ||
+ result.status == ExitInterrupted) {
+ Cleanup();
+ status_->BuildFinished();
+ *err = "interrupted by user";
+ return false;
+ }
+
+ --pending_commands;
+ if (!FinishCommand(&result, err)) {
+ Cleanup();
+ status_->BuildFinished();
+ return false;
+ }
+
+ if (!result.success()) {
+ if (failures_allowed)
+ failures_allowed--;
+ }
+
+ // We made some progress; start the main loop over.
+ continue;
+ }
+
+ // If we get here, we cannot make any more progress.
+ status_->BuildFinished();
+ if (failures_allowed == 0) {
+ if (config_.failures_allowed > 1)
+ *err = "subcommands failed";
+ else
+ *err = "subcommand failed";
+ } else if (failures_allowed < config_.failures_allowed)
+ *err = "cannot make progress due to previous errors";
+ else
+ *err = "stuck [this is a bug]";
+
+ return false;
+ }
+
+ status_->BuildFinished();
+ return true;
+}
+
+bool Builder::StartEdge(Edge* edge, string* err) {
+ METRIC_RECORD("StartEdge");
+ if (edge->is_phony())
+ return true;
+
+ status_->BuildEdgeStarted(edge);
+
+ // Create directories necessary for outputs.
+ // XXX: this will block; do we care?
+ for (vector<Node*>::iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
+ if (!disk_interface_->MakeDirs((*o)->path()))
+ return false;
+ }
+
+ // Create response file, if needed
+ // XXX: this may also block; do we care?
+ string rspfile = edge->GetUnescapedRspfile();
+ if (!rspfile.empty()) {
+ string content = edge->GetBinding("rspfile_content");
+ if (!disk_interface_->WriteFile(rspfile, content))
+ return false;
+ }
+
+ // start command computing and run it
+ if (!command_runner_->StartCommand(edge)) {
+ err->assign("command '" + edge->EvaluateCommand() + "' failed.");
+ return false;
+ }
+
+ return true;
+}
+
+bool Builder::FinishCommand(CommandRunner::Result* result, string* err) {
+ METRIC_RECORD("FinishCommand");
+
+ Edge* edge = result->edge;
+
+ // First try to extract dependencies from the result, if any.
+ // This must happen first as it filters the command output (we want
+ // to filter /showIncludes output, even on compile failure) and
+ // extraction itself can fail, which makes the command fail from a
+ // build perspective.
+ vector<Node*> deps_nodes;
+ string deps_type = edge->GetBinding("deps");
+ const string deps_prefix = edge->GetBinding("msvc_deps_prefix");
+ if (!deps_type.empty()) {
+ string extract_err;
+ if (!ExtractDeps(result, deps_type, deps_prefix, &deps_nodes,
+ &extract_err) &&
+ result->success()) {
+ if (!result->output.empty())
+ result->output.append("\n");
+ result->output.append(extract_err);
+ result->status = ExitFailure;
+ }
+ }
+
+ int start_time, end_time;
+ status_->BuildEdgeFinished(edge, result->success(), result->output,
+ &start_time, &end_time);
+
+ // The rest of this function only applies to successful commands.
+ if (!result->success()) {
+ return plan_.EdgeFinished(edge, Plan::kEdgeFailed, err);
+ }
+
+ // Restat the edge outputs
+ TimeStamp output_mtime = 0;
+ bool restat = edge->GetBindingBool("restat");
+ if (!config_.dry_run) {
+ bool node_cleaned = false;
+
+ for (vector<Node*>::iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
+ TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), err);
+ if (new_mtime == -1)
+ return false;
+ if (new_mtime > output_mtime)
+ output_mtime = new_mtime;
+ if ((*o)->mtime() == new_mtime && restat) {
+ // The rule command did not change the output. Propagate the clean
+ // state through the build graph.
+ // Note that this also applies to nonexistent outputs (mtime == 0).
+ if (!plan_.CleanNode(&scan_, *o, err))
+ return false;
+ node_cleaned = true;
+ }
+ }
+
+ if (node_cleaned) {
+ TimeStamp restat_mtime = 0;
+ // If any output was cleaned, find the most recent mtime of any
+ // (existing) non-order-only input or the depfile.
+ for (vector<Node*>::iterator i = edge->inputs_.begin();
+ i != edge->inputs_.end() - edge->order_only_deps_; ++i) {
+ TimeStamp input_mtime = disk_interface_->Stat((*i)->path(), err);
+ if (input_mtime == -1)
+ return false;
+ if (input_mtime > restat_mtime)
+ restat_mtime = input_mtime;
+ }
+
+ string depfile = edge->GetUnescapedDepfile();
+ if (restat_mtime != 0 && deps_type.empty() && !depfile.empty()) {
+ TimeStamp depfile_mtime = disk_interface_->Stat(depfile, err);
+ if (depfile_mtime == -1)
+ return false;
+ if (depfile_mtime > restat_mtime)
+ restat_mtime = depfile_mtime;
+ }
+
+ // The total number of edges in the plan may have changed as a result
+ // of a restat.
+ status_->PlanHasTotalEdges(plan_.command_edge_count());
+
+ output_mtime = restat_mtime;
+ }
+ }
+
+ if (!plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, err))
+ return false;
+
+ // Delete any left over response file.
+ string rspfile = edge->GetUnescapedRspfile();
+ if (!rspfile.empty() && !g_keep_rsp)
+ disk_interface_->RemoveFile(rspfile);
+
+ if (scan_.build_log()) {
+ if (!scan_.build_log()->RecordCommand(edge, start_time, end_time,
+ output_mtime)) {
+ *err = string("Error writing to build log: ") + strerror(errno);
+ return false;
+ }
+ }
+
+ if (!deps_type.empty() && !config_.dry_run) {
+ assert(!edge->outputs_.empty() && "should have been rejected by parser");
+ for (std::vector<Node*>::const_iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
+ TimeStamp deps_mtime = disk_interface_->Stat((*o)->path(), err);
+ if (deps_mtime == -1)
+ return false;
+ if (!scan_.deps_log()->RecordDeps(*o, deps_mtime, deps_nodes)) {
+ *err = std::string("Error writing to deps log: ") + strerror(errno);
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+bool Builder::ExtractDeps(CommandRunner::Result* result,
+ const string& deps_type,
+ const string& deps_prefix,
+ vector<Node*>* deps_nodes,
+ string* err) {
+ if (deps_type == "msvc") {
+ CLParser parser;
+ string output;
+ if (!parser.Parse(result->output, deps_prefix, &output, err))
+ return false;
+ result->output = output;
+ for (set<string>::iterator i = parser.includes_.begin();
+ i != parser.includes_.end(); ++i) {
+ // ~0 is assuming that with MSVC-parsed headers, it's ok to always make
+ // all backslashes (as some of the slashes will certainly be backslashes
+ // anyway). This could be fixed if necessary with some additional
+ // complexity in IncludesNormalize::Relativize.
+ deps_nodes->push_back(state_->GetNode(*i, ~0u));
+ }
+ } else if (deps_type == "gcc") {
+ string depfile = result->edge->GetUnescapedDepfile();
+ if (depfile.empty()) {
+ *err = string("edge with deps=gcc but no depfile makes no sense");
+ return false;
+ }
+
+ // Read depfile content. Treat a missing depfile as empty.
+ string content;
+ switch (disk_interface_->ReadFile(depfile, &content, err)) {
+ case DiskInterface::Okay:
+ break;
+ case DiskInterface::NotFound:
+ err->clear();
+ break;
+ case DiskInterface::OtherError:
+ return false;
+ }
+ if (content.empty())
+ return true;
+
+ DepfileParser deps(config_.depfile_parser_options);
+ if (!deps.Parse(&content, err))
+ return false;
+
+ // XXX check depfile matches expected output.
+ deps_nodes->reserve(deps.ins_.size());
+ for (vector<StringPiece>::iterator i = deps.ins_.begin();
+ i != deps.ins_.end(); ++i) {
+ uint64_t slash_bits;
+ if (!CanonicalizePath(const_cast<char*>(i->str_), &i->len_, &slash_bits,
+ err))
+ return false;
+ deps_nodes->push_back(state_->GetNode(*i, slash_bits));
+ }
+
+ if (!g_keep_depfile) {
+ if (disk_interface_->RemoveFile(depfile) < 0) {
+ *err = string("deleting depfile: ") + strerror(errno) + string("\n");
+ return false;
+ }
+ }
+ } else {
+ Fatal("unknown deps type '%s'", deps_type.c_str());
+ }
+
+ return true;
+}
+
+bool Builder::LoadDyndeps(Node* node, string* err) {
+ status_->BuildLoadDyndeps();
+
+ // Load the dyndep information provided by this node.
+ DyndepFile ddf;
+ if (!scan_.LoadDyndeps(node, &ddf, err))
+ return false;
+
+ // Update the build plan to account for dyndep modifications to the graph.
+ if (!plan_.DyndepsLoaded(&scan_, node, ddf, err))
+ return false;
+
+ // New command edges may have been added to the plan.
+ status_->PlanHasTotalEdges(plan_.command_edge_count());
+
+ return true;
+}
diff --git a/src/build.h b/src/build.h
new file mode 100644
index 0000000..2798693
--- /dev/null
+++ b/src/build.h
@@ -0,0 +1,338 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_BUILD_H_
+#define NINJA_BUILD_H_
+
+#include <cstdio>
+#include <map>
+#include <memory>
+#include <queue>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "depfile_parser.h"
+#include "graph.h" // XXX needed for DependencyScan; should rearrange.
+#include "exit_status.h"
+#include "line_printer.h"
+#include "metrics.h"
+#include "util.h" // int64_t
+
+struct BuildLog;
+struct BuildStatus;
+struct Builder;
+struct DiskInterface;
+struct Edge;
+struct Node;
+struct State;
+
+/// Plan stores the state of a build plan: what we intend to build,
+/// which steps we're ready to execute.
+struct Plan {
+ Plan(Builder* builder = NULL);
+
+ /// Add a target to our plan (including all its dependencies).
+ /// Returns false if we don't need to build this target; may
+ /// fill in |err| with an error message if there's a problem.
+ bool AddTarget(const Node* node, std::string* err);
+
+ // Pop a ready edge off the queue of edges to build.
+ // Returns NULL if there's no work to do.
+ Edge* FindWork();
+
+ /// Returns true if there's more work to be done.
+ bool more_to_do() const { return wanted_edges_ > 0 && command_edges_ > 0; }
+
+ /// Dumps the current state of the plan.
+ void Dump() const;
+
+ enum EdgeResult {
+ kEdgeFailed,
+ kEdgeSucceeded
+ };
+
+ /// Mark an edge as done building (whether it succeeded or failed).
+ /// If any of the edge's outputs are dyndep bindings of their dependents,
+ /// this loads dynamic dependencies from the nodes' paths.
+ /// Returns 'false' if loading dyndep info fails and 'true' otherwise.
+ bool EdgeFinished(Edge* edge, EdgeResult result, std::string* err);
+
+ /// Clean the given node during the build.
+ /// Return false on error.
+ bool CleanNode(DependencyScan* scan, Node* node, std::string* err);
+
+ /// Number of edges with commands to run.
+ int command_edge_count() const { return command_edges_; }
+
+ /// Reset state. Clears want and ready sets.
+ void Reset();
+
+ /// Update the build plan to account for modifications made to the graph
+ /// by information loaded from a dyndep file.
+ bool DyndepsLoaded(DependencyScan* scan, const Node* node,
+ const DyndepFile& ddf, std::string* err);
+private:
+ bool RefreshDyndepDependents(DependencyScan* scan, const Node* node, std::string* err);
+ void UnmarkDependents(const Node* node, std::set<Node*>* dependents);
+ bool AddSubTarget(const Node* node, const Node* dependent, std::string* err,
+ std::set<Edge*>* dyndep_walk);
+
+ /// Update plan with knowledge that the given node is up to date.
+ /// If the node is a dyndep binding on any of its dependents, this
+ /// loads dynamic dependencies from the node's path.
+ /// Returns 'false' if loading dyndep info fails and 'true' otherwise.
+ bool NodeFinished(Node* node, std::string* err);
+
+ /// Enumerate possible steps we want for an edge.
+ enum Want
+ {
+ /// We do not want to build the edge, but we might want to build one of
+ /// its dependents.
+ kWantNothing,
+ /// We want to build the edge, but have not yet scheduled it.
+ kWantToStart,
+ /// We want to build the edge, have scheduled it, and are waiting
+ /// for it to complete.
+ kWantToFinish
+ };
+
+ void EdgeWanted(const Edge* edge);
+ bool EdgeMaybeReady(std::map<Edge*, Want>::iterator want_e, std::string* err);
+
+ /// Submits a ready edge as a candidate for execution.
+ /// The edge may be delayed from running, for example if it's a member of a
+ /// currently-full pool.
+ void ScheduleWork(std::map<Edge*, Want>::iterator want_e);
+
+ /// Keep track of which edges we want to build in this plan. If this map does
+ /// not contain an entry for an edge, we do not want to build the entry or its
+ /// dependents. If it does contain an entry, the enumeration indicates what
+ /// we want for the edge.
+ std::map<Edge*, Want> want_;
+
+ std::set<Edge*> ready_;
+
+ Builder* builder_;
+
+ /// Total number of edges that have commands (not phony).
+ int command_edges_;
+
+ /// Total remaining number of wanted edges.
+ int wanted_edges_;
+};
+
+/// CommandRunner is an interface that wraps running the build
+/// subcommands. This allows tests to abstract out running commands.
+/// RealCommandRunner is an implementation that actually runs commands.
+struct CommandRunner {
+ virtual ~CommandRunner() {}
+ virtual bool CanRunMore() const = 0;
+ virtual bool StartCommand(Edge* edge) = 0;
+
+ /// The result of waiting for a command.
+ struct Result {
+ Result() : edge(NULL) {}
+ Edge* edge;
+ ExitStatus status;
+ std::string output;
+ bool success() const { return status == ExitSuccess; }
+ };
+ /// Wait for a command to complete, or return false if interrupted.
+ virtual bool WaitForCommand(Result* result) = 0;
+
+ virtual std::vector<Edge*> GetActiveEdges() { return std::vector<Edge*>(); }
+ virtual void Abort() {}
+};
+
+/// Options (e.g. verbosity, parallelism) passed to a build.
+struct BuildConfig {
+ BuildConfig() : verbosity(NORMAL), dry_run(false), parallelism(1),
+ failures_allowed(1), max_load_average(-0.0f) {}
+
+ enum Verbosity {
+ NORMAL,
+ QUIET, // No output -- used when testing.
+ VERBOSE
+ };
+ Verbosity verbosity;
+ bool dry_run;
+ int parallelism;
+ int failures_allowed;
+ /// The maximum load average we must not exceed. A negative value
+ /// means that we do not have any limit.
+ double max_load_average;
+ DepfileParserOptions depfile_parser_options;
+};
+
+/// Builder wraps the build process: starting commands, updating status.
+struct Builder {
+ Builder(State* state, const BuildConfig& config,
+ BuildLog* build_log, DepsLog* deps_log,
+ DiskInterface* disk_interface);
+ ~Builder();
+
+ /// Clean up after interrupted commands by deleting output files.
+ void Cleanup();
+
+ Node* AddTarget(const std::string& name, std::string* err);
+
+ /// Add a target to the build, scanning dependencies.
+ /// @return false on error.
+ bool AddTarget(Node* target, std::string* err);
+
+ /// Returns true if the build targets are already up to date.
+ bool AlreadyUpToDate() const;
+
+ /// Run the build. Returns false on error.
+ /// It is an error to call this function when AlreadyUpToDate() is true.
+ bool Build(std::string* err);
+
+ bool StartEdge(Edge* edge, std::string* err);
+
+ /// Update status ninja logs following a command termination.
+ /// @return false if the build can not proceed further due to a fatal error.
+ bool FinishCommand(CommandRunner::Result* result, std::string* err);
+
+ /// Used for tests.
+ void SetBuildLog(BuildLog* log) {
+ scan_.set_build_log(log);
+ }
+
+ /// Load the dyndep information provided by the given node.
+ bool LoadDyndeps(Node* node, std::string* err);
+
+ State* state_;
+ const BuildConfig& config_;
+ Plan plan_;
+#if __cplusplus < 201703L
+ std::auto_ptr<CommandRunner> command_runner_;
+#else
+ std::unique_ptr<CommandRunner> command_runner_; // auto_ptr was removed in C++17.
+#endif
+ BuildStatus* status_;
+
+ private:
+ bool ExtractDeps(CommandRunner::Result* result, const std::string& deps_type,
+ const std::string& deps_prefix,
+ std::vector<Node*>* deps_nodes, std::string* err);
+
+ DiskInterface* disk_interface_;
+ DependencyScan scan_;
+
+ // Unimplemented copy ctor and operator= ensure we don't copy the auto_ptr.
+ Builder(const Builder &other); // DO NOT IMPLEMENT
+ void operator=(const Builder &other); // DO NOT IMPLEMENT
+};
+
+/// Tracks the status of a build: completion fraction, printing updates.
+struct BuildStatus {
+ explicit BuildStatus(const BuildConfig& config);
+ void PlanHasTotalEdges(int total);
+ void BuildEdgeStarted(const Edge* edge);
+ void BuildEdgeFinished(Edge* edge, bool success, const std::string& output,
+ int* start_time, int* end_time);
+ void BuildLoadDyndeps();
+ void BuildStarted();
+ void BuildFinished();
+
+ enum EdgeStatus {
+ kEdgeStarted,
+ kEdgeFinished,
+ };
+
+ /// Format the progress status string by replacing the placeholders.
+ /// See the user manual for more information about the available
+ /// placeholders.
+ /// @param progress_status_format The format of the progress status.
+ /// @param status The status of the edge.
+ std::string FormatProgressStatus(const char* progress_status_format,
+ EdgeStatus status) const;
+
+ private:
+ void PrintStatus(const Edge* edge, EdgeStatus status);
+
+ const BuildConfig& config_;
+
+ /// Time the build started.
+ int64_t start_time_millis_;
+
+ int started_edges_, finished_edges_, total_edges_;
+
+ /// Map of running edge to time the edge started running.
+ typedef std::map<const Edge*, int> RunningEdgeMap;
+ RunningEdgeMap running_edges_;
+
+ /// Prints progress output.
+ LinePrinter printer_;
+
+ /// The custom progress status format to use.
+ const char* progress_status_format_;
+
+ template<size_t S>
+ void SnprintfRate(double rate, char(&buf)[S], const char* format) const {
+ if (rate == -1)
+ snprintf(buf, S, "?");
+ else
+ snprintf(buf, S, format, rate);
+ }
+
+ struct RateInfo {
+ RateInfo() : rate_(-1) {}
+
+ void Restart() { stopwatch_.Restart(); }
+ double Elapsed() const { return stopwatch_.Elapsed(); }
+ double rate() { return rate_; }
+
+ void UpdateRate(int edges) {
+ if (edges && stopwatch_.Elapsed())
+ rate_ = edges / stopwatch_.Elapsed();
+ }
+
+ private:
+ double rate_;
+ Stopwatch stopwatch_;
+ };
+
+ struct SlidingRateInfo {
+ SlidingRateInfo(int n) : rate_(-1), N(n), last_update_(-1) {}
+
+ void Restart() { stopwatch_.Restart(); }
+ double rate() { return rate_; }
+
+ void UpdateRate(int update_hint) {
+ if (update_hint == last_update_)
+ return;
+ last_update_ = update_hint;
+
+ if (times_.size() == N)
+ times_.pop();
+ times_.push(stopwatch_.Elapsed());
+ if (times_.back() != times_.front())
+ rate_ = times_.size() / (times_.back() - times_.front());
+ }
+
+ private:
+ double rate_;
+ Stopwatch stopwatch_;
+ const size_t N;
+ std::queue<double> times_;
+ int last_update_;
+ };
+
+ mutable RateInfo overall_rate_;
+ mutable SlidingRateInfo current_rate_;
+};
+
+#endif // NINJA_BUILD_H_
diff --git a/src/build_log.cc b/src/build_log.cc
new file mode 100644
index 0000000..4dcd6ce
--- /dev/null
+++ b/src/build_log.cc
@@ -0,0 +1,494 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// On AIX, inttypes.h gets indirectly included by build_log.h.
+// It's easiest just to ask for the printf format macros right away.
+#ifndef _WIN32
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#endif
+
+#include "build_log.h"
+#include "disk_interface.h"
+
+#include <cassert>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef _WIN32
+#include <inttypes.h>
+#include <unistd.h>
+#endif
+
+#include "build.h"
+#include "graph.h"
+#include "metrics.h"
+#include "util.h"
+#if defined(_MSC_VER) && (_MSC_VER < 1800)
+#define strtoll _strtoi64
+#endif
+
+using namespace std;
+
+// Implementation details:
+// Each run's log appends to the log file.
+// To load, we run through all log entries in series, throwing away
+// older runs.
+// Once the number of redundant entries exceeds a threshold, we write
+// out a new file and replace the existing one with it.
+
+namespace {
+
+const char kFileSignature[] = "# ninja log v%d\n";
+const int kOldestSupportedVersion = 4;
+const int kCurrentVersion = 5;
+
+// 64bit MurmurHash2, by Austin Appleby
+#if defined(_MSC_VER)
+#define BIG_CONSTANT(x) (x)
+#else // defined(_MSC_VER)
+#define BIG_CONSTANT(x) (x##LLU)
+#endif // !defined(_MSC_VER)
+inline
+uint64_t MurmurHash64A(const void* key, size_t len) {
+ static const uint64_t seed = 0xDECAFBADDECAFBADull;
+ const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
+ const int r = 47;
+ uint64_t h = seed ^ (len * m);
+ const unsigned char* data = (const unsigned char*)key;
+ while (len >= 8) {
+ uint64_t k;
+ memcpy(&k, data, sizeof k);
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h ^= k;
+ h *= m;
+ data += 8;
+ len -= 8;
+ }
+ switch (len & 7)
+ {
+ case 7: h ^= uint64_t(data[6]) << 48;
+ NINJA_FALLTHROUGH;
+ case 6: h ^= uint64_t(data[5]) << 40;
+ NINJA_FALLTHROUGH;
+ case 5: h ^= uint64_t(data[4]) << 32;
+ NINJA_FALLTHROUGH;
+ case 4: h ^= uint64_t(data[3]) << 24;
+ NINJA_FALLTHROUGH;
+ case 3: h ^= uint64_t(data[2]) << 16;
+ NINJA_FALLTHROUGH;
+ case 2: h ^= uint64_t(data[1]) << 8;
+ NINJA_FALLTHROUGH;
+ case 1: h ^= uint64_t(data[0]);
+ h *= m;
+ };
+ h ^= h >> r;
+ h *= m;
+ h ^= h >> r;
+ return h;
+}
+#undef BIG_CONSTANT
+
+
+} // namespace
+
+// static
+uint64_t BuildLog::LogEntry::HashCommand(StringPiece command) {
+ return MurmurHash64A(command.str_, command.len_);
+}
+
+BuildLog::LogEntry::LogEntry(const string& output)
+ : output(output) {}
+
+BuildLog::LogEntry::LogEntry(const string& output, uint64_t command_hash,
+ int start_time, int end_time, TimeStamp restat_mtime)
+ : output(output), command_hash(command_hash),
+ start_time(start_time), end_time(end_time), mtime(restat_mtime)
+{}
+
+BuildLog::BuildLog()
+ : log_file_(NULL), needs_recompaction_(false) {}
+
+BuildLog::~BuildLog() {
+ Close();
+}
+
+bool BuildLog::OpenForWrite(const string& path, const BuildLogUser& user,
+ string* err) {
+ if (needs_recompaction_) {
+ if (!Recompact(path, user, err))
+ return false;
+ }
+
+ assert(!log_file_);
+ log_file_path_ = path; // we don't actually open the file right now, but will
+ // do so on the first write attempt
+ return true;
+}
+
+bool BuildLog::RecordCommand(Edge* edge, int start_time, int end_time,
+ TimeStamp mtime) {
+ string command = edge->EvaluateCommand(true);
+ uint64_t command_hash = LogEntry::HashCommand(command);
+ for (vector<Node*>::iterator out = edge->outputs_.begin();
+ out != edge->outputs_.end(); ++out) {
+ const string& path = (*out)->path();
+ Entries::iterator i = entries_.find(path);
+ LogEntry* log_entry;
+ if (i != entries_.end()) {
+ log_entry = i->second;
+ } else {
+ log_entry = new LogEntry(path);
+ entries_.insert(Entries::value_type(log_entry->output, log_entry));
+ }
+ log_entry->command_hash = command_hash;
+ log_entry->start_time = start_time;
+ log_entry->end_time = end_time;
+ log_entry->mtime = mtime;
+
+ if (!OpenForWriteIfNeeded()) {
+ return false;
+ }
+ if (log_file_) {
+ if (!WriteEntry(log_file_, *log_entry))
+ return false;
+ if (fflush(log_file_) != 0) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void BuildLog::Close() {
+ OpenForWriteIfNeeded(); // create the file even if nothing has been recorded
+ if (log_file_)
+ fclose(log_file_);
+ log_file_ = NULL;
+}
+
+bool BuildLog::OpenForWriteIfNeeded() {
+ if (log_file_ || log_file_path_.empty()) {
+ return true;
+ }
+ log_file_ = fopen(log_file_path_.c_str(), "ab");
+ if (!log_file_) {
+ return false;
+ }
+ if (setvbuf(log_file_, NULL, _IOLBF, BUFSIZ) != 0) {
+ return false;
+ }
+ SetCloseOnExec(fileno(log_file_));
+
+ // Opening a file in append mode doesn't set the file pointer to the file's
+ // end on Windows. Do that explicitly.
+ fseek(log_file_, 0, SEEK_END);
+
+ if (ftell(log_file_) == 0) {
+ if (fprintf(log_file_, kFileSignature, kCurrentVersion) < 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+struct LineReader {
+ explicit LineReader(FILE* file)
+ : file_(file), buf_end_(buf_), line_start_(buf_), line_end_(NULL) {
+ memset(buf_, 0, sizeof(buf_));
+ }
+
+ // Reads a \n-terminated line from the file passed to the constructor.
+ // On return, *line_start points to the beginning of the next line, and
+ // *line_end points to the \n at the end of the line. If no newline is seen
+ // in a fixed buffer size, *line_end is set to NULL. Returns false on EOF.
+ bool ReadLine(char** line_start, char** line_end) {
+ if (line_start_ >= buf_end_ || !line_end_) {
+ // Buffer empty, refill.
+ size_t size_read = fread(buf_, 1, sizeof(buf_), file_);
+ if (!size_read)
+ return false;
+ line_start_ = buf_;
+ buf_end_ = buf_ + size_read;
+ } else {
+ // Advance to next line in buffer.
+ line_start_ = line_end_ + 1;
+ }
+
+ line_end_ = (char*)memchr(line_start_, '\n', buf_end_ - line_start_);
+ if (!line_end_) {
+ // No newline. Move rest of data to start of buffer, fill rest.
+ size_t already_consumed = line_start_ - buf_;
+ size_t size_rest = (buf_end_ - buf_) - already_consumed;
+ memmove(buf_, line_start_, size_rest);
+
+ size_t read = fread(buf_ + size_rest, 1, sizeof(buf_) - size_rest, file_);
+ buf_end_ = buf_ + size_rest + read;
+ line_start_ = buf_;
+ line_end_ = (char*)memchr(line_start_, '\n', buf_end_ - line_start_);
+ }
+
+ *line_start = line_start_;
+ *line_end = line_end_;
+ return true;
+ }
+
+ private:
+ FILE* file_;
+ char buf_[256 << 10];
+ char* buf_end_; // Points one past the last valid byte in |buf_|.
+
+ char* line_start_;
+ // Points at the next \n in buf_ after line_start, or NULL.
+ char* line_end_;
+};
+
+LoadStatus BuildLog::Load(const string& path, string* err) {
+ METRIC_RECORD(".ninja_log load");
+ FILE* file = fopen(path.c_str(), "r");
+ if (!file) {
+ if (errno == ENOENT)
+ return LOAD_NOT_FOUND;
+ *err = strerror(errno);
+ return LOAD_ERROR;
+ }
+
+ int log_version = 0;
+ int unique_entry_count = 0;
+ int total_entry_count = 0;
+
+ LineReader reader(file);
+ char* line_start = 0;
+ char* line_end = 0;
+ while (reader.ReadLine(&line_start, &line_end)) {
+ if (!log_version) {
+ sscanf(line_start, kFileSignature, &log_version);
+
+ if (log_version < kOldestSupportedVersion) {
+ *err = ("build log version invalid, perhaps due to being too old; "
+ "starting over");
+ fclose(file);
+ unlink(path.c_str());
+ // Don't report this as a failure. An empty build log will cause
+ // us to rebuild the outputs anyway.
+ return LOAD_SUCCESS;
+ }
+ }
+
+ // If no newline was found in this chunk, read the next.
+ if (!line_end)
+ continue;
+
+ const char kFieldSeparator = '\t';
+
+ char* start = line_start;
+ char* end = (char*)memchr(start, kFieldSeparator, line_end - start);
+ if (!end)
+ continue;
+ *end = 0;
+
+ int start_time = 0, end_time = 0;
+ TimeStamp restat_mtime = 0;
+
+ start_time = atoi(start);
+ start = end + 1;
+
+ end = (char*)memchr(start, kFieldSeparator, line_end - start);
+ if (!end)
+ continue;
+ *end = 0;
+ end_time = atoi(start);
+ start = end + 1;
+
+ end = (char*)memchr(start, kFieldSeparator, line_end - start);
+ if (!end)
+ continue;
+ *end = 0;
+ restat_mtime = strtoll(start, NULL, 10);
+ start = end + 1;
+
+ end = (char*)memchr(start, kFieldSeparator, line_end - start);
+ if (!end)
+ continue;
+ string output = string(start, end - start);
+
+ start = end + 1;
+ end = line_end;
+
+ LogEntry* entry;
+ Entries::iterator i = entries_.find(output);
+ if (i != entries_.end()) {
+ entry = i->second;
+ } else {
+ entry = new LogEntry(output);
+ entries_.insert(Entries::value_type(entry->output, entry));
+ ++unique_entry_count;
+ }
+ ++total_entry_count;
+
+ entry->start_time = start_time;
+ entry->end_time = end_time;
+ entry->mtime = restat_mtime;
+ if (log_version >= 5) {
+ char c = *end; *end = '\0';
+ entry->command_hash = (uint64_t)strtoull(start, NULL, 16);
+ *end = c;
+ } else {
+ entry->command_hash = LogEntry::HashCommand(StringPiece(start,
+ end - start));
+ }
+ }
+ fclose(file);
+
+ if (!line_start) {
+ return LOAD_SUCCESS; // file was empty
+ }
+
+ // Decide whether it's time to rebuild the log:
+ // - if we're upgrading versions
+ // - if it's getting large
+ int kMinCompactionEntryCount = 100;
+ int kCompactionRatio = 3;
+ if (log_version < kCurrentVersion) {
+ needs_recompaction_ = true;
+ } else if (total_entry_count > kMinCompactionEntryCount &&
+ total_entry_count > unique_entry_count * kCompactionRatio) {
+ needs_recompaction_ = true;
+ }
+
+ return LOAD_SUCCESS;
+}
+
+BuildLog::LogEntry* BuildLog::LookupByOutput(const string& path) {
+ Entries::iterator i = entries_.find(path);
+ if (i != entries_.end())
+ return i->second;
+ return NULL;
+}
+
+bool BuildLog::WriteEntry(FILE* f, const LogEntry& entry) {
+ return fprintf(f, "%d\t%d\t%" PRId64 "\t%s\t%" PRIx64 "\n",
+ entry.start_time, entry.end_time, entry.mtime,
+ entry.output.c_str(), entry.command_hash) > 0;
+}
+
+bool BuildLog::Recompact(const string& path, const BuildLogUser& user,
+ string* err) {
+ METRIC_RECORD(".ninja_log recompact");
+
+ Close();
+ string temp_path = path + ".recompact";
+ FILE* f = fopen(temp_path.c_str(), "wb");
+ if (!f) {
+ *err = strerror(errno);
+ return false;
+ }
+
+ if (fprintf(f, kFileSignature, kCurrentVersion) < 0) {
+ *err = strerror(errno);
+ fclose(f);
+ return false;
+ }
+
+ vector<StringPiece> dead_outputs;
+ for (Entries::iterator i = entries_.begin(); i != entries_.end(); ++i) {
+ if (user.IsPathDead(i->first)) {
+ dead_outputs.push_back(i->first);
+ continue;
+ }
+
+ if (!WriteEntry(f, *i->second)) {
+ *err = strerror(errno);
+ fclose(f);
+ return false;
+ }
+ }
+
+ for (size_t i = 0; i < dead_outputs.size(); ++i)
+ entries_.erase(dead_outputs[i]);
+
+ fclose(f);
+ if (unlink(path.c_str()) < 0) {
+ *err = strerror(errno);
+ return false;
+ }
+
+ if (rename(temp_path.c_str(), path.c_str()) < 0) {
+ *err = strerror(errno);
+ return false;
+ }
+
+ return true;
+}
+
+bool BuildLog::Restat(const StringPiece path,
+ const DiskInterface& disk_interface,
+ const int output_count, char** outputs,
+ std::string* const err) {
+ METRIC_RECORD(".ninja_log restat");
+
+ Close();
+ std::string temp_path = path.AsString() + ".restat";
+ FILE* f = fopen(temp_path.c_str(), "wb");
+ if (!f) {
+ *err = strerror(errno);
+ return false;
+ }
+
+ if (fprintf(f, kFileSignature, kCurrentVersion) < 0) {
+ *err = strerror(errno);
+ fclose(f);
+ return false;
+ }
+ for (Entries::iterator i = entries_.begin(); i != entries_.end(); ++i) {
+ bool skip = output_count > 0;
+ for (int j = 0; j < output_count; ++j) {
+ if (i->second->output == outputs[j]) {
+ skip = false;
+ break;
+ }
+ }
+ if (!skip) {
+ const TimeStamp mtime = disk_interface.Stat(i->second->output, err);
+ if (mtime == -1) {
+ fclose(f);
+ return false;
+ }
+ i->second->mtime = mtime;
+ }
+
+ if (!WriteEntry(f, *i->second)) {
+ *err = strerror(errno);
+ fclose(f);
+ return false;
+ }
+ }
+
+ fclose(f);
+ if (unlink(path.str_) < 0) {
+ *err = strerror(errno);
+ return false;
+ }
+
+ if (rename(temp_path.c_str(), path.str_) < 0) {
+ *err = strerror(errno);
+ return false;
+ }
+
+ return true;
+}
diff --git a/src/build_log.h b/src/build_log.h
new file mode 100644
index 0000000..88551e3
--- /dev/null
+++ b/src/build_log.h
@@ -0,0 +1,107 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_BUILD_LOG_H_
+#define NINJA_BUILD_LOG_H_
+
+#include <string>
+#include <stdio.h>
+
+#include "hash_map.h"
+#include "load_status.h"
+#include "timestamp.h"
+#include "util.h" // uint64_t
+
+struct DiskInterface;
+struct Edge;
+
+/// Can answer questions about the manifest for the BuildLog.
+struct BuildLogUser {
+ /// Return if a given output is no longer part of the build manifest.
+ /// This is only called during recompaction and doesn't have to be fast.
+ virtual bool IsPathDead(StringPiece s) const = 0;
+};
+
+/// Store a log of every command ran for every build.
+/// It has a few uses:
+///
+/// 1) (hashes of) command lines for existing output files, so we know
+/// when we need to rebuild due to the command changing
+/// 2) timing information, perhaps for generating reports
+/// 3) restat information
+struct BuildLog {
+ BuildLog();
+ ~BuildLog();
+
+ /// Prepares writing to the log file without actually opening it - that will
+ /// happen when/if it's needed
+ bool OpenForWrite(const std::string& path, const BuildLogUser& user,
+ std::string* err);
+ bool RecordCommand(Edge* edge, int start_time, int end_time,
+ TimeStamp mtime = 0);
+ void Close();
+
+ /// Load the on-disk log.
+ LoadStatus Load(const std::string& path, std::string* err);
+
+ struct LogEntry {
+ std::string output;
+ uint64_t command_hash;
+ int start_time;
+ int end_time;
+ TimeStamp mtime;
+
+ static uint64_t HashCommand(StringPiece command);
+
+ // Used by tests.
+ bool operator==(const LogEntry& o) {
+ return output == o.output && command_hash == o.command_hash &&
+ start_time == o.start_time && end_time == o.end_time &&
+ mtime == o.mtime;
+ }
+
+ explicit LogEntry(const std::string& output);
+ LogEntry(const std::string& output, uint64_t command_hash,
+ int start_time, int end_time, TimeStamp restat_mtime);
+ };
+
+ /// Lookup a previously-run command by its output path.
+ LogEntry* LookupByOutput(const std::string& path);
+
+ /// Serialize an entry into a log file.
+ bool WriteEntry(FILE* f, const LogEntry& entry);
+
+ /// Rewrite the known log entries, throwing away old data.
+ bool Recompact(const std::string& path, const BuildLogUser& user,
+ std::string* err);
+
+ /// Restat all outputs in the log
+ bool Restat(StringPiece path, const DiskInterface& disk_interface,
+ int output_count, char** outputs, std::string* err);
+
+ typedef ExternalStringHashMap<LogEntry*>::Type Entries;
+ const Entries& entries() const { return entries_; }
+
+ private:
+ /// Should be called before using log_file_. When false is returned, errno
+ /// will be set.
+ bool OpenForWriteIfNeeded();
+
+ Entries entries_;
+ FILE* log_file_;
+ std::string log_file_path_;
+ bool needs_recompaction_;
+};
+
+#endif // NINJA_BUILD_LOG_H_
diff --git a/src/build_log_perftest.cc b/src/build_log_perftest.cc
new file mode 100644
index 0000000..ced0df9
--- /dev/null
+++ b/src/build_log_perftest.cc
@@ -0,0 +1,151 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "build_log.h"
+#include "graph.h"
+#include "manifest_parser.h"
+#include "state.h"
+#include "util.h"
+#include "metrics.h"
+
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+
+using namespace std;
+
+const char kTestFilename[] = "BuildLogPerfTest-tempfile";
+
+struct NoDeadPaths : public BuildLogUser {
+ virtual bool IsPathDead(StringPiece) const { return false; }
+};
+
+bool WriteTestData(string* err) {
+ BuildLog log;
+
+ NoDeadPaths no_dead_paths;
+ if (!log.OpenForWrite(kTestFilename, no_dead_paths, err))
+ return false;
+
+ /*
+ A histogram of command lengths in chromium. For example, 407 builds,
+ 1.4% of all builds, had commands longer than 32 bytes but shorter than 64.
+ 32 407 1.4%
+ 64 183 0.6%
+ 128 1461 5.1%
+ 256 791 2.8%
+ 512 1314 4.6%
+ 1024 6114 21.3%
+ 2048 11759 41.0%
+ 4096 2056 7.2%
+ 8192 4567 15.9%
+ 16384 13 0.0%
+ 32768 4 0.0%
+ 65536 5 0.0%
+ The average command length is 4.1 kB and there were 28674 commands in total,
+ which makes for a total log size of ~120 MB (also counting output filenames).
+
+ Based on this, write 30000 many 4 kB long command lines.
+ */
+
+ // ManifestParser is the only object allowed to create Rules.
+ const size_t kRuleSize = 4000;
+ string long_rule_command = "gcc ";
+ for (int i = 0; long_rule_command.size() < kRuleSize; ++i) {
+ char buf[80];
+ sprintf(buf, "-I../../and/arbitrary/but/fairly/long/path/suffixed/%d ", i);
+ long_rule_command += buf;
+ }
+ long_rule_command += "$in -o $out\n";
+
+ State state;
+ ManifestParser parser(&state, NULL);
+ if (!parser.ParseTest("rule cxx\n command = " + long_rule_command, err))
+ return false;
+
+ // Create build edges. Using ManifestParser is as fast as using the State api
+ // for edge creation, so just use that.
+ const int kNumCommands = 30000;
+ string build_rules;
+ for (int i = 0; i < kNumCommands; ++i) {
+ char buf[80];
+ sprintf(buf, "build input%d.o: cxx input%d.cc\n", i, i);
+ build_rules += buf;
+ }
+
+ if (!parser.ParseTest(build_rules, err))
+ return false;
+
+ for (int i = 0; i < kNumCommands; ++i) {
+ log.RecordCommand(state.edges_[i],
+ /*start_time=*/100 * i,
+ /*end_time=*/100 * i + 1,
+ /*mtime=*/0);
+ }
+
+ return true;
+}
+
+int main() {
+ vector<int> times;
+ string err;
+
+ if (!WriteTestData(&err)) {
+ fprintf(stderr, "Failed to write test data: %s\n", err.c_str());
+ return 1;
+ }
+
+ {
+ // Read once to warm up disk cache.
+ BuildLog log;
+ if (!log.Load(kTestFilename, &err)) {
+ fprintf(stderr, "Failed to read test data: %s\n", err.c_str());
+ return 1;
+ }
+ }
+ const int kNumRepetitions = 5;
+ for (int i = 0; i < kNumRepetitions; ++i) {
+ int64_t start = GetTimeMillis();
+ BuildLog log;
+ if (!log.Load(kTestFilename, &err)) {
+ fprintf(stderr, "Failed to read test data: %s\n", err.c_str());
+ return 1;
+ }
+ int delta = (int)(GetTimeMillis() - start);
+ printf("%dms\n", delta);
+ times.push_back(delta);
+ }
+
+ int min = times[0];
+ int max = times[0];
+ float total = 0;
+ for (size_t i = 0; i < times.size(); ++i) {
+ total += times[i];
+ if (times[i] < min)
+ min = times[i];
+ else if (times[i] > max)
+ max = times[i];
+ }
+
+ printf("min %dms max %dms avg %.1fms\n",
+ min, max, total / times.size());
+
+ unlink(kTestFilename);
+
+ return 0;
+}
+
diff --git a/src/build_log_test.cc b/src/build_log_test.cc
new file mode 100644
index 0000000..3718299
--- /dev/null
+++ b/src/build_log_test.cc
@@ -0,0 +1,358 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "build_log.h"
+
+#include "util.h"
+#include "test.h"
+
+#include <sys/stat.h>
+#ifdef _WIN32
+#include <fcntl.h>
+#include <share.h>
+#else
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+#include <cassert>
+
+using namespace std;
+
+namespace {
+
+const char kTestFilename[] = "BuildLogTest-tempfile";
+
+struct BuildLogTest : public StateTestWithBuiltinRules, public BuildLogUser {
+ virtual void SetUp() {
+ // In case a crashing test left a stale file behind.
+ unlink(kTestFilename);
+ }
+ virtual void TearDown() {
+ unlink(kTestFilename);
+ }
+ virtual bool IsPathDead(StringPiece s) const { return false; }
+};
+
+TEST_F(BuildLogTest, WriteRead) {
+ AssertParse(&state_,
+"build out: cat mid\n"
+"build mid: cat in\n");
+
+ BuildLog log1;
+ string err;
+ EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err));
+ ASSERT_EQ("", err);
+ log1.RecordCommand(state_.edges_[0], 15, 18);
+ log1.RecordCommand(state_.edges_[1], 20, 25);
+ log1.Close();
+
+ BuildLog log2;
+ EXPECT_TRUE(log2.Load(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ ASSERT_EQ(2u, log1.entries().size());
+ ASSERT_EQ(2u, log2.entries().size());
+ BuildLog::LogEntry* e1 = log1.LookupByOutput("out");
+ ASSERT_TRUE(e1);
+ BuildLog::LogEntry* e2 = log2.LookupByOutput("out");
+ ASSERT_TRUE(e2);
+ ASSERT_TRUE(*e1 == *e2);
+ ASSERT_EQ(15, e1->start_time);
+ ASSERT_EQ("out", e1->output);
+}
+
+TEST_F(BuildLogTest, FirstWriteAddsSignature) {
+ const char kExpectedVersion[] = "# ninja log vX\n";
+ const size_t kVersionPos = strlen(kExpectedVersion) - 2; // Points at 'X'.
+
+ BuildLog log;
+ string contents, err;
+
+ EXPECT_TRUE(log.OpenForWrite(kTestFilename, *this, &err));
+ ASSERT_EQ("", err);
+ log.Close();
+
+ ASSERT_EQ(0, ReadFile(kTestFilename, &contents, &err));
+ ASSERT_EQ("", err);
+ if (contents.size() >= kVersionPos)
+ contents[kVersionPos] = 'X';
+ EXPECT_EQ(kExpectedVersion, contents);
+
+ // Opening the file anew shouldn't add a second version string.
+ EXPECT_TRUE(log.OpenForWrite(kTestFilename, *this, &err));
+ ASSERT_EQ("", err);
+ log.Close();
+
+ contents.clear();
+ ASSERT_EQ(0, ReadFile(kTestFilename, &contents, &err));
+ ASSERT_EQ("", err);
+ if (contents.size() >= kVersionPos)
+ contents[kVersionPos] = 'X';
+ EXPECT_EQ(kExpectedVersion, contents);
+}
+
+TEST_F(BuildLogTest, DoubleEntry) {
+ FILE* f = fopen(kTestFilename, "wb");
+ fprintf(f, "# ninja log v4\n");
+ fprintf(f, "0\t1\t2\tout\tcommand abc\n");
+ fprintf(f, "3\t4\t5\tout\tcommand def\n");
+ fclose(f);
+
+ string err;
+ BuildLog log;
+ EXPECT_TRUE(log.Load(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ BuildLog::LogEntry* e = log.LookupByOutput("out");
+ ASSERT_TRUE(e);
+ ASSERT_NO_FATAL_FAILURE(AssertHash("command def", e->command_hash));
+}
+
+TEST_F(BuildLogTest, Truncate) {
+ AssertParse(&state_,
+"build out: cat mid\n"
+"build mid: cat in\n");
+
+ {
+ BuildLog log1;
+ string err;
+ EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err));
+ ASSERT_EQ("", err);
+ log1.RecordCommand(state_.edges_[0], 15, 18);
+ log1.RecordCommand(state_.edges_[1], 20, 25);
+ log1.Close();
+ }
+
+ struct stat statbuf;
+ ASSERT_EQ(0, stat(kTestFilename, &statbuf));
+ ASSERT_GT(statbuf.st_size, 0);
+
+ // For all possible truncations of the input file, assert that we don't
+ // crash when parsing.
+ for (off_t size = statbuf.st_size; size > 0; --size) {
+ BuildLog log2;
+ string err;
+ EXPECT_TRUE(log2.OpenForWrite(kTestFilename, *this, &err));
+ ASSERT_EQ("", err);
+ log2.RecordCommand(state_.edges_[0], 15, 18);
+ log2.RecordCommand(state_.edges_[1], 20, 25);
+ log2.Close();
+
+ ASSERT_TRUE(Truncate(kTestFilename, size, &err));
+
+ BuildLog log3;
+ err.clear();
+ ASSERT_TRUE(log3.Load(kTestFilename, &err) == LOAD_SUCCESS || !err.empty());
+ }
+}
+
+TEST_F(BuildLogTest, ObsoleteOldVersion) {
+ FILE* f = fopen(kTestFilename, "wb");
+ fprintf(f, "# ninja log v3\n");
+ fprintf(f, "123 456 0 out command\n");
+ fclose(f);
+
+ string err;
+ BuildLog log;
+ EXPECT_TRUE(log.Load(kTestFilename, &err));
+ ASSERT_NE(err.find("version"), string::npos);
+}
+
+TEST_F(BuildLogTest, SpacesInOutputV4) {
+ FILE* f = fopen(kTestFilename, "wb");
+ fprintf(f, "# ninja log v4\n");
+ fprintf(f, "123\t456\t456\tout with space\tcommand\n");
+ fclose(f);
+
+ string err;
+ BuildLog log;
+ EXPECT_TRUE(log.Load(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ BuildLog::LogEntry* e = log.LookupByOutput("out with space");
+ ASSERT_TRUE(e);
+ ASSERT_EQ(123, e->start_time);
+ ASSERT_EQ(456, e->end_time);
+ ASSERT_EQ(456, e->mtime);
+ ASSERT_NO_FATAL_FAILURE(AssertHash("command", e->command_hash));
+}
+
+TEST_F(BuildLogTest, DuplicateVersionHeader) {
+ // Old versions of ninja accidentally wrote multiple version headers to the
+ // build log on Windows. This shouldn't crash, and the second version header
+ // should be ignored.
+ FILE* f = fopen(kTestFilename, "wb");
+ fprintf(f, "# ninja log v4\n");
+ fprintf(f, "123\t456\t456\tout\tcommand\n");
+ fprintf(f, "# ninja log v4\n");
+ fprintf(f, "456\t789\t789\tout2\tcommand2\n");
+ fclose(f);
+
+ string err;
+ BuildLog log;
+ EXPECT_TRUE(log.Load(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ BuildLog::LogEntry* e = log.LookupByOutput("out");
+ ASSERT_TRUE(e);
+ ASSERT_EQ(123, e->start_time);
+ ASSERT_EQ(456, e->end_time);
+ ASSERT_EQ(456, e->mtime);
+ ASSERT_NO_FATAL_FAILURE(AssertHash("command", e->command_hash));
+
+ e = log.LookupByOutput("out2");
+ ASSERT_TRUE(e);
+ ASSERT_EQ(456, e->start_time);
+ ASSERT_EQ(789, e->end_time);
+ ASSERT_EQ(789, e->mtime);
+ ASSERT_NO_FATAL_FAILURE(AssertHash("command2", e->command_hash));
+}
+
+struct TestDiskInterface : public DiskInterface {
+ virtual TimeStamp Stat(const string& path, string* err) const {
+ return 4;
+ }
+ virtual bool WriteFile(const string& path, const string& contents) {
+ assert(false);
+ return true;
+ }
+ virtual bool MakeDir(const string& path) {
+ assert(false);
+ return false;
+ }
+ virtual Status ReadFile(const string& path, string* contents, string* err) {
+ assert(false);
+ return NotFound;
+ }
+ virtual int RemoveFile(const string& path) {
+ assert(false);
+ return 0;
+ }
+};
+
+TEST_F(BuildLogTest, Restat) {
+ FILE* f = fopen(kTestFilename, "wb");
+ fprintf(f, "# ninja log v4\n"
+ "1\t2\t3\tout\tcommand\n");
+ fclose(f);
+ std::string err;
+ BuildLog log;
+ EXPECT_TRUE(log.Load(kTestFilename, &err));
+ ASSERT_EQ("", err);
+ BuildLog::LogEntry* e = log.LookupByOutput("out");
+ ASSERT_EQ(3, e->mtime);
+
+ TestDiskInterface testDiskInterface;
+ char out2[] = { 'o', 'u', 't', '2', 0 };
+ char* filter2[] = { out2 };
+ EXPECT_TRUE(log.Restat(kTestFilename, testDiskInterface, 1, filter2, &err));
+ ASSERT_EQ("", err);
+ e = log.LookupByOutput("out");
+ ASSERT_EQ(3, e->mtime); // unchanged, since the filter doesn't match
+
+ EXPECT_TRUE(log.Restat(kTestFilename, testDiskInterface, 0, NULL, &err));
+ ASSERT_EQ("", err);
+ e = log.LookupByOutput("out");
+ ASSERT_EQ(4, e->mtime);
+}
+
+TEST_F(BuildLogTest, VeryLongInputLine) {
+ // Ninja's build log buffer is currently 256kB. Lines longer than that are
+ // silently ignored, but don't affect parsing of other lines.
+ FILE* f = fopen(kTestFilename, "wb");
+ fprintf(f, "# ninja log v4\n");
+ fprintf(f, "123\t456\t456\tout\tcommand start");
+ for (size_t i = 0; i < (512 << 10) / strlen(" more_command"); ++i)
+ fputs(" more_command", f);
+ fprintf(f, "\n");
+ fprintf(f, "456\t789\t789\tout2\tcommand2\n");
+ fclose(f);
+
+ string err;
+ BuildLog log;
+ EXPECT_TRUE(log.Load(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ BuildLog::LogEntry* e = log.LookupByOutput("out");
+ ASSERT_EQ(NULL, e);
+
+ e = log.LookupByOutput("out2");
+ ASSERT_TRUE(e);
+ ASSERT_EQ(456, e->start_time);
+ ASSERT_EQ(789, e->end_time);
+ ASSERT_EQ(789, e->mtime);
+ ASSERT_NO_FATAL_FAILURE(AssertHash("command2", e->command_hash));
+}
+
+TEST_F(BuildLogTest, MultiTargetEdge) {
+ AssertParse(&state_,
+"build out out.d: cat\n");
+
+ BuildLog log;
+ log.RecordCommand(state_.edges_[0], 21, 22);
+
+ ASSERT_EQ(2u, log.entries().size());
+ BuildLog::LogEntry* e1 = log.LookupByOutput("out");
+ ASSERT_TRUE(e1);
+ BuildLog::LogEntry* e2 = log.LookupByOutput("out.d");
+ ASSERT_TRUE(e2);
+ ASSERT_EQ("out", e1->output);
+ ASSERT_EQ("out.d", e2->output);
+ ASSERT_EQ(21, e1->start_time);
+ ASSERT_EQ(21, e2->start_time);
+ ASSERT_EQ(22, e2->end_time);
+ ASSERT_EQ(22, e2->end_time);
+}
+
+struct BuildLogRecompactTest : public BuildLogTest {
+ virtual bool IsPathDead(StringPiece s) const { return s == "out2"; }
+};
+
+TEST_F(BuildLogRecompactTest, Recompact) {
+ AssertParse(&state_,
+"build out: cat in\n"
+"build out2: cat in\n");
+
+ BuildLog log1;
+ string err;
+ EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err));
+ ASSERT_EQ("", err);
+ // Record the same edge several times, to trigger recompaction
+ // the next time the log is opened.
+ for (int i = 0; i < 200; ++i)
+ log1.RecordCommand(state_.edges_[0], 15, 18 + i);
+ log1.RecordCommand(state_.edges_[1], 21, 22);
+ log1.Close();
+
+ // Load...
+ BuildLog log2;
+ EXPECT_TRUE(log2.Load(kTestFilename, &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(2u, log2.entries().size());
+ ASSERT_TRUE(log2.LookupByOutput("out"));
+ ASSERT_TRUE(log2.LookupByOutput("out2"));
+ // ...and force a recompaction.
+ EXPECT_TRUE(log2.OpenForWrite(kTestFilename, *this, &err));
+ log2.Close();
+
+ // "out2" is dead, it should've been removed.
+ BuildLog log3;
+ EXPECT_TRUE(log2.Load(kTestFilename, &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, log2.entries().size());
+ ASSERT_TRUE(log2.LookupByOutput("out"));
+ ASSERT_FALSE(log2.LookupByOutput("out2"));
+}
+
+} // anonymous namespace
diff --git a/src/build_test.cc b/src/build_test.cc
new file mode 100644
index 0000000..078080d
--- /dev/null
+++ b/src/build_test.cc
@@ -0,0 +1,3304 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "build.h"
+
+#include <assert.h>
+
+#include "build_log.h"
+#include "deps_log.h"
+#include "graph.h"
+#include "test.h"
+
+using namespace std;
+
+struct CompareEdgesByOutput {
+ static bool cmp(const Edge* a, const Edge* b) {
+ return a->outputs_[0]->path() < b->outputs_[0]->path();
+ }
+};
+
+/// Fixture for tests involving Plan.
+// Though Plan doesn't use State, it's useful to have one around
+// to create Nodes and Edges.
+struct PlanTest : public StateTestWithBuiltinRules {
+ Plan plan_;
+
+ /// Because FindWork does not return Edges in any sort of predictable order,
+ // provide a means to get available Edges in order and in a format which is
+ // easy to write tests around.
+ void FindWorkSorted(deque<Edge*>* ret, int count) {
+ for (int i = 0; i < count; ++i) {
+ ASSERT_TRUE(plan_.more_to_do());
+ Edge* edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ret->push_back(edge);
+ }
+ ASSERT_FALSE(plan_.FindWork());
+ sort(ret->begin(), ret->end(), CompareEdgesByOutput::cmp);
+ }
+
+ void TestPoolWithDepthOne(const char *test_case);
+};
+
+TEST_F(PlanTest, Basic) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat mid\n"
+"build mid: cat in\n"));
+ GetNode("mid")->MarkDirty();
+ GetNode("out")->MarkDirty();
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+ ASSERT_TRUE(plan_.more_to_do());
+
+ Edge* edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_EQ("in", edge->inputs_[0]->path());
+ ASSERT_EQ("mid", edge->outputs_[0]->path());
+
+ ASSERT_FALSE(plan_.FindWork());
+
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_EQ("mid", edge->inputs_[0]->path());
+ ASSERT_EQ("out", edge->outputs_[0]->path());
+
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ ASSERT_FALSE(plan_.more_to_do());
+ edge = plan_.FindWork();
+ ASSERT_EQ(0, edge);
+}
+
+// Test that two outputs from one rule can be handled as inputs to the next.
+TEST_F(PlanTest, DoubleOutputDirect) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat mid1 mid2\n"
+"build mid1 mid2: cat in\n"));
+ GetNode("mid1")->MarkDirty();
+ GetNode("mid2")->MarkDirty();
+ GetNode("out")->MarkDirty();
+
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+ ASSERT_TRUE(plan_.more_to_do());
+
+ Edge* edge;
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat in
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat mid1 mid2
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_FALSE(edge); // done
+}
+
+// Test that two outputs from one rule can eventually be routed to another.
+TEST_F(PlanTest, DoubleOutputIndirect) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat b1 b2\n"
+"build b1: cat a1\n"
+"build b2: cat a2\n"
+"build a1 a2: cat in\n"));
+ GetNode("a1")->MarkDirty();
+ GetNode("a2")->MarkDirty();
+ GetNode("b1")->MarkDirty();
+ GetNode("b2")->MarkDirty();
+ GetNode("out")->MarkDirty();
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+ ASSERT_TRUE(plan_.more_to_do());
+
+ Edge* edge;
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat in
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat a1
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat a2
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat b1 b2
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_FALSE(edge); // done
+}
+
+// Test that two edges from one output can both execute.
+TEST_F(PlanTest, DoubleDependent) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat a1 a2\n"
+"build a1: cat mid\n"
+"build a2: cat mid\n"
+"build mid: cat in\n"));
+ GetNode("mid")->MarkDirty();
+ GetNode("a1")->MarkDirty();
+ GetNode("a2")->MarkDirty();
+ GetNode("out")->MarkDirty();
+
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+ ASSERT_TRUE(plan_.more_to_do());
+
+ Edge* edge;
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat in
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat mid
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat mid
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat a1 a2
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_FALSE(edge); // done
+}
+
+void PlanTest::TestPoolWithDepthOne(const char* test_case) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, test_case));
+ GetNode("out1")->MarkDirty();
+ GetNode("out2")->MarkDirty();
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("out1"), &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(plan_.AddTarget(GetNode("out2"), &err));
+ ASSERT_EQ("", err);
+ ASSERT_TRUE(plan_.more_to_do());
+
+ Edge* edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_EQ("in", edge->inputs_[0]->path());
+ ASSERT_EQ("out1", edge->outputs_[0]->path());
+
+ // This will be false since poolcat is serialized
+ ASSERT_FALSE(plan_.FindWork());
+
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_EQ("in", edge->inputs_[0]->path());
+ ASSERT_EQ("out2", edge->outputs_[0]->path());
+
+ ASSERT_FALSE(plan_.FindWork());
+
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ ASSERT_FALSE(plan_.more_to_do());
+ edge = plan_.FindWork();
+ ASSERT_EQ(0, edge);
+}
+
+TEST_F(PlanTest, PoolWithDepthOne) {
+ TestPoolWithDepthOne(
+"pool foobar\n"
+" depth = 1\n"
+"rule poolcat\n"
+" command = cat $in > $out\n"
+" pool = foobar\n"
+"build out1: poolcat in\n"
+"build out2: poolcat in\n");
+}
+
+TEST_F(PlanTest, ConsolePool) {
+ TestPoolWithDepthOne(
+"rule poolcat\n"
+" command = cat $in > $out\n"
+" pool = console\n"
+"build out1: poolcat in\n"
+"build out2: poolcat in\n");
+}
+
+TEST_F(PlanTest, PoolsWithDepthTwo) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"pool foobar\n"
+" depth = 2\n"
+"pool bazbin\n"
+" depth = 2\n"
+"rule foocat\n"
+" command = cat $in > $out\n"
+" pool = foobar\n"
+"rule bazcat\n"
+" command = cat $in > $out\n"
+" pool = bazbin\n"
+"build out1: foocat in\n"
+"build out2: foocat in\n"
+"build out3: foocat in\n"
+"build outb1: bazcat in\n"
+"build outb2: bazcat in\n"
+"build outb3: bazcat in\n"
+" pool =\n"
+"build allTheThings: cat out1 out2 out3 outb1 outb2 outb3\n"
+));
+ // Mark all the out* nodes dirty
+ for (int i = 0; i < 3; ++i) {
+ GetNode("out" + string(1, '1' + static_cast<char>(i)))->MarkDirty();
+ GetNode("outb" + string(1, '1' + static_cast<char>(i)))->MarkDirty();
+ }
+ GetNode("allTheThings")->MarkDirty();
+
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("allTheThings"), &err));
+ ASSERT_EQ("", err);
+
+ deque<Edge*> edges;
+ FindWorkSorted(&edges, 5);
+
+ for (int i = 0; i < 4; ++i) {
+ Edge *edge = edges[i];
+ ASSERT_EQ("in", edge->inputs_[0]->path());
+ string base_name(i < 2 ? "out" : "outb");
+ ASSERT_EQ(base_name + string(1, '1' + (i % 2)), edge->outputs_[0]->path());
+ }
+
+ // outb3 is exempt because it has an empty pool
+ Edge* edge = edges[4];
+ ASSERT_TRUE(edge);
+ ASSERT_EQ("in", edge->inputs_[0]->path());
+ ASSERT_EQ("outb3", edge->outputs_[0]->path());
+
+ // finish out1
+ plan_.EdgeFinished(edges.front(), Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+ edges.pop_front();
+
+ // out3 should be available
+ Edge* out3 = plan_.FindWork();
+ ASSERT_TRUE(out3);
+ ASSERT_EQ("in", out3->inputs_[0]->path());
+ ASSERT_EQ("out3", out3->outputs_[0]->path());
+
+ ASSERT_FALSE(plan_.FindWork());
+
+ plan_.EdgeFinished(out3, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ ASSERT_FALSE(plan_.FindWork());
+
+ for (deque<Edge*>::iterator it = edges.begin(); it != edges.end(); ++it) {
+ plan_.EdgeFinished(*it, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+ }
+
+ Edge* last = plan_.FindWork();
+ ASSERT_TRUE(last);
+ ASSERT_EQ("allTheThings", last->outputs_[0]->path());
+
+ plan_.EdgeFinished(last, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ ASSERT_FALSE(plan_.more_to_do());
+ ASSERT_FALSE(plan_.FindWork());
+}
+
+TEST_F(PlanTest, PoolWithRedundantEdges) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "pool compile\n"
+ " depth = 1\n"
+ "rule gen_foo\n"
+ " command = touch foo.cpp\n"
+ "rule gen_bar\n"
+ " command = touch bar.cpp\n"
+ "rule echo\n"
+ " command = echo $out > $out\n"
+ "build foo.cpp.obj: echo foo.cpp || foo.cpp\n"
+ " pool = compile\n"
+ "build bar.cpp.obj: echo bar.cpp || bar.cpp\n"
+ " pool = compile\n"
+ "build libfoo.a: echo foo.cpp.obj bar.cpp.obj\n"
+ "build foo.cpp: gen_foo\n"
+ "build bar.cpp: gen_bar\n"
+ "build all: phony libfoo.a\n"));
+ GetNode("foo.cpp")->MarkDirty();
+ GetNode("foo.cpp.obj")->MarkDirty();
+ GetNode("bar.cpp")->MarkDirty();
+ GetNode("bar.cpp.obj")->MarkDirty();
+ GetNode("libfoo.a")->MarkDirty();
+ GetNode("all")->MarkDirty();
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("all"), &err));
+ ASSERT_EQ("", err);
+ ASSERT_TRUE(plan_.more_to_do());
+
+ Edge* edge = NULL;
+
+ deque<Edge*> initial_edges;
+ FindWorkSorted(&initial_edges, 2);
+
+ edge = initial_edges[1]; // Foo first
+ ASSERT_EQ("foo.cpp", edge->outputs_[0]->path());
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_FALSE(plan_.FindWork());
+ ASSERT_EQ("foo.cpp", edge->inputs_[0]->path());
+ ASSERT_EQ("foo.cpp", edge->inputs_[1]->path());
+ ASSERT_EQ("foo.cpp.obj", edge->outputs_[0]->path());
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = initial_edges[0]; // Now for bar
+ ASSERT_EQ("bar.cpp", edge->outputs_[0]->path());
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_FALSE(plan_.FindWork());
+ ASSERT_EQ("bar.cpp", edge->inputs_[0]->path());
+ ASSERT_EQ("bar.cpp", edge->inputs_[1]->path());
+ ASSERT_EQ("bar.cpp.obj", edge->outputs_[0]->path());
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_FALSE(plan_.FindWork());
+ ASSERT_EQ("foo.cpp.obj", edge->inputs_[0]->path());
+ ASSERT_EQ("bar.cpp.obj", edge->inputs_[1]->path());
+ ASSERT_EQ("libfoo.a", edge->outputs_[0]->path());
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_FALSE(plan_.FindWork());
+ ASSERT_EQ("libfoo.a", edge->inputs_[0]->path());
+ ASSERT_EQ("all", edge->outputs_[0]->path());
+ plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_FALSE(edge);
+ ASSERT_FALSE(plan_.more_to_do());
+}
+
+TEST_F(PlanTest, PoolWithFailingEdge) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "pool foobar\n"
+ " depth = 1\n"
+ "rule poolcat\n"
+ " command = cat $in > $out\n"
+ " pool = foobar\n"
+ "build out1: poolcat in\n"
+ "build out2: poolcat in\n"));
+ GetNode("out1")->MarkDirty();
+ GetNode("out2")->MarkDirty();
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("out1"), &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(plan_.AddTarget(GetNode("out2"), &err));
+ ASSERT_EQ("", err);
+ ASSERT_TRUE(plan_.more_to_do());
+
+ Edge* edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_EQ("in", edge->inputs_[0]->path());
+ ASSERT_EQ("out1", edge->outputs_[0]->path());
+
+ // This will be false since poolcat is serialized
+ ASSERT_FALSE(plan_.FindWork());
+
+ plan_.EdgeFinished(edge, Plan::kEdgeFailed, &err);
+ ASSERT_EQ("", err);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_EQ("in", edge->inputs_[0]->path());
+ ASSERT_EQ("out2", edge->outputs_[0]->path());
+
+ ASSERT_FALSE(plan_.FindWork());
+
+ plan_.EdgeFinished(edge, Plan::kEdgeFailed, &err);
+ ASSERT_EQ("", err);
+
+ ASSERT_TRUE(plan_.more_to_do()); // Jobs have failed
+ edge = plan_.FindWork();
+ ASSERT_EQ(0, edge);
+}
+
+/// Fake implementation of CommandRunner, useful for tests.
+struct FakeCommandRunner : public CommandRunner {
+ explicit FakeCommandRunner(VirtualFileSystem* fs) :
+ max_active_edges_(1), fs_(fs) {}
+
+ // CommandRunner impl
+ virtual bool CanRunMore() const;
+ virtual bool StartCommand(Edge* edge);
+ virtual bool WaitForCommand(Result* result);
+ virtual vector<Edge*> GetActiveEdges();
+ virtual void Abort();
+
+ vector<string> commands_ran_;
+ vector<Edge*> active_edges_;
+ size_t max_active_edges_;
+ VirtualFileSystem* fs_;
+};
+
+struct BuildTest : public StateTestWithBuiltinRules, public BuildLogUser {
+ BuildTest() : config_(MakeConfig()), command_runner_(&fs_),
+ builder_(&state_, config_, NULL, NULL, &fs_),
+ status_(config_) {
+ }
+
+ BuildTest(DepsLog* log) : config_(MakeConfig()), command_runner_(&fs_),
+ builder_(&state_, config_, NULL, log, &fs_),
+ status_(config_) {
+ }
+
+ virtual void SetUp() {
+ StateTestWithBuiltinRules::SetUp();
+
+ builder_.command_runner_.reset(&command_runner_);
+ AssertParse(&state_,
+"build cat1: cat in1\n"
+"build cat2: cat in1 in2\n"
+"build cat12: cat cat1 cat2\n");
+
+ fs_.Create("in1", "");
+ fs_.Create("in2", "");
+ }
+
+ ~BuildTest() {
+ builder_.command_runner_.release();
+ }
+
+ virtual bool IsPathDead(StringPiece s) const { return false; }
+
+ /// Rebuild target in the 'working tree' (fs_).
+ /// State of command_runner_ and logs contents (if specified) ARE MODIFIED.
+ /// Handy to check for NOOP builds, and higher-level rebuild tests.
+ void RebuildTarget(const string& target, const char* manifest,
+ const char* log_path = NULL, const char* deps_path = NULL,
+ State* state = NULL);
+
+ // Mark a path dirty.
+ void Dirty(const string& path);
+
+ BuildConfig MakeConfig() {
+ BuildConfig config;
+ config.verbosity = BuildConfig::QUIET;
+ return config;
+ }
+
+ BuildConfig config_;
+ FakeCommandRunner command_runner_;
+ VirtualFileSystem fs_;
+ Builder builder_;
+
+ BuildStatus status_;
+};
+
+void BuildTest::RebuildTarget(const string& target, const char* manifest,
+ const char* log_path, const char* deps_path,
+ State* state) {
+ State local_state, *pstate = &local_state;
+ if (state)
+ pstate = state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(pstate));
+ AssertParse(pstate, manifest);
+
+ string err;
+ BuildLog build_log, *pbuild_log = NULL;
+ if (log_path) {
+ ASSERT_TRUE(build_log.Load(log_path, &err));
+ ASSERT_TRUE(build_log.OpenForWrite(log_path, *this, &err));
+ ASSERT_EQ("", err);
+ pbuild_log = &build_log;
+ }
+
+ DepsLog deps_log, *pdeps_log = NULL;
+ if (deps_path) {
+ ASSERT_TRUE(deps_log.Load(deps_path, pstate, &err));
+ ASSERT_TRUE(deps_log.OpenForWrite(deps_path, &err));
+ ASSERT_EQ("", err);
+ pdeps_log = &deps_log;
+ }
+
+ Builder builder(pstate, config_, pbuild_log, pdeps_log, &fs_);
+ EXPECT_TRUE(builder.AddTarget(target, &err));
+
+ command_runner_.commands_ran_.clear();
+ builder.command_runner_.reset(&command_runner_);
+ if (!builder.AlreadyUpToDate()) {
+ bool build_res = builder.Build(&err);
+ EXPECT_TRUE(build_res);
+ }
+ builder.command_runner_.release();
+}
+
+bool FakeCommandRunner::CanRunMore() const {
+ return active_edges_.size() < max_active_edges_;
+}
+
+bool FakeCommandRunner::StartCommand(Edge* edge) {
+ assert(active_edges_.size() < max_active_edges_);
+ assert(find(active_edges_.begin(), active_edges_.end(), edge)
+ == active_edges_.end());
+ commands_ran_.push_back(edge->EvaluateCommand());
+ if (edge->rule().name() == "cat" ||
+ edge->rule().name() == "cat_rsp" ||
+ edge->rule().name() == "cat_rsp_out" ||
+ edge->rule().name() == "cc" ||
+ edge->rule().name() == "cp_multi_msvc" ||
+ edge->rule().name() == "cp_multi_gcc" ||
+ edge->rule().name() == "touch" ||
+ edge->rule().name() == "touch-interrupt" ||
+ edge->rule().name() == "touch-fail-tick2") {
+ for (vector<Node*>::iterator out = edge->outputs_.begin();
+ out != edge->outputs_.end(); ++out) {
+ fs_->Create((*out)->path(), "");
+ }
+ } else if (edge->rule().name() == "true" ||
+ edge->rule().name() == "fail" ||
+ edge->rule().name() == "interrupt" ||
+ edge->rule().name() == "console") {
+ // Don't do anything.
+ } else if (edge->rule().name() == "cp") {
+ assert(!edge->inputs_.empty());
+ assert(edge->outputs_.size() == 1);
+ string content;
+ string err;
+ if (fs_->ReadFile(edge->inputs_[0]->path(), &content, &err) ==
+ DiskInterface::Okay)
+ fs_->WriteFile(edge->outputs_[0]->path(), content);
+ } else {
+ printf("unknown command\n");
+ return false;
+ }
+
+ active_edges_.push_back(edge);
+
+ // Allow tests to control the order by the name of the first output.
+ sort(active_edges_.begin(), active_edges_.end(),
+ CompareEdgesByOutput::cmp);
+
+ return true;
+}
+
+bool FakeCommandRunner::WaitForCommand(Result* result) {
+ if (active_edges_.empty())
+ return false;
+
+ // All active edges were already completed immediately when started,
+ // so we can pick any edge here. Pick the last edge. Tests can
+ // control the order of edges by the name of the first output.
+ vector<Edge*>::iterator edge_iter = active_edges_.end() - 1;
+
+ Edge* edge = *edge_iter;
+ result->edge = edge;
+
+ if (edge->rule().name() == "interrupt" ||
+ edge->rule().name() == "touch-interrupt") {
+ result->status = ExitInterrupted;
+ return true;
+ }
+
+ if (edge->rule().name() == "console") {
+ if (edge->use_console())
+ result->status = ExitSuccess;
+ else
+ result->status = ExitFailure;
+ active_edges_.erase(edge_iter);
+ return true;
+ }
+
+ if (edge->rule().name() == "cp_multi_msvc") {
+ const std::string prefix = edge->GetBinding("msvc_deps_prefix");
+ for (std::vector<Node*>::iterator in = edge->inputs_.begin();
+ in != edge->inputs_.end(); ++in) {
+ result->output += prefix + (*in)->path() + '\n';
+ }
+ }
+
+ if (edge->rule().name() == "fail" ||
+ (edge->rule().name() == "touch-fail-tick2" && fs_->now_ == 2))
+ result->status = ExitFailure;
+ else
+ result->status = ExitSuccess;
+
+ // Provide a way for test cases to verify when an edge finishes that
+ // some other edge is still active. This is useful for test cases
+ // covering behavior involving multiple active edges.
+ const string& verify_active_edge = edge->GetBinding("verify_active_edge");
+ if (!verify_active_edge.empty()) {
+ bool verify_active_edge_found = false;
+ for (vector<Edge*>::iterator i = active_edges_.begin();
+ i != active_edges_.end(); ++i) {
+ if (!(*i)->outputs_.empty() &&
+ (*i)->outputs_[0]->path() == verify_active_edge) {
+ verify_active_edge_found = true;
+ }
+ }
+ EXPECT_TRUE(verify_active_edge_found);
+ }
+
+ active_edges_.erase(edge_iter);
+ return true;
+}
+
+vector<Edge*> FakeCommandRunner::GetActiveEdges() {
+ return active_edges_;
+}
+
+void FakeCommandRunner::Abort() {
+ active_edges_.clear();
+}
+
+void BuildTest::Dirty(const string& path) {
+ Node* node = GetNode(path);
+ node->MarkDirty();
+
+ // If it's an input file, mark that we've already stat()ed it and
+ // it's missing.
+ if (!node->in_edge())
+ node->MarkMissing();
+}
+
+TEST_F(BuildTest, NoWork) {
+ string err;
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+}
+
+TEST_F(BuildTest, OneStep) {
+ // Given a dirty target with one ready input,
+ // we should rebuild the target.
+ Dirty("cat1");
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("cat1", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cat in1 > cat1", command_runner_.commands_ran_[0]);
+}
+
+TEST_F(BuildTest, OneStep2) {
+ // Given a target with one dirty input,
+ // we should rebuild the target.
+ Dirty("cat1");
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("cat1", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cat in1 > cat1", command_runner_.commands_ran_[0]);
+}
+
+TEST_F(BuildTest, TwoStep) {
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("cat12", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ // Depending on how the pointers work out, we could've ran
+ // the first two commands in either order.
+ EXPECT_TRUE((command_runner_.commands_ran_[0] == "cat in1 > cat1" &&
+ command_runner_.commands_ran_[1] == "cat in1 in2 > cat2") ||
+ (command_runner_.commands_ran_[1] == "cat in1 > cat1" &&
+ command_runner_.commands_ran_[0] == "cat in1 in2 > cat2"));
+
+ EXPECT_EQ("cat cat1 cat2 > cat12", command_runner_.commands_ran_[2]);
+
+ fs_.Tick();
+
+ // Modifying in2 requires rebuilding one intermediate file
+ // and the final file.
+ fs_.Create("in2", "");
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("cat12", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(5u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cat in1 in2 > cat2", command_runner_.commands_ran_[3]);
+ EXPECT_EQ("cat cat1 cat2 > cat12", command_runner_.commands_ran_[4]);
+}
+
+TEST_F(BuildTest, TwoOutputs) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out\n"
+"build out1 out2: touch in.txt\n"));
+
+ fs_.Create("in.txt", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("touch out1 out2", command_runner_.commands_ran_[0]);
+}
+
+TEST_F(BuildTest, ImplicitOutput) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out $out.imp\n"
+"build out | out.imp: touch in.txt\n"));
+ fs_.Create("in.txt", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out.imp", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("touch out out.imp", command_runner_.commands_ran_[0]);
+}
+
+// Test case from
+// https://github.com/ninja-build/ninja/issues/148
+TEST_F(BuildTest, MultiOutIn) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out\n"
+"build in1 otherfile: touch in\n"
+"build out: touch in | in1\n"));
+
+ fs_.Create("in", "");
+ fs_.Tick();
+ fs_.Create("in1", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+}
+
+TEST_F(BuildTest, Chain) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build c2: cat c1\n"
+"build c3: cat c2\n"
+"build c4: cat c3\n"
+"build c5: cat c4\n"));
+
+ fs_.Create("c1", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("c5", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(4u, command_runner_.commands_ran_.size());
+
+ err.clear();
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("c5", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+
+ fs_.Tick();
+
+ fs_.Create("c3", "");
+ err.clear();
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("c5", &err));
+ ASSERT_EQ("", err);
+ EXPECT_FALSE(builder_.AlreadyUpToDate());
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size()); // 3->4, 4->5
+}
+
+TEST_F(BuildTest, MissingInput) {
+ // Input is referenced by build file, but no rule for it.
+ string err;
+ Dirty("in1");
+ EXPECT_FALSE(builder_.AddTarget("cat1", &err));
+ EXPECT_EQ("'in1', needed by 'cat1', missing and no known rule to make it",
+ err);
+}
+
+TEST_F(BuildTest, MissingTarget) {
+ // Target is not referenced by build file.
+ string err;
+ EXPECT_FALSE(builder_.AddTarget("meow", &err));
+ EXPECT_EQ("unknown target: 'meow'", err);
+}
+
+TEST_F(BuildTest, MakeDirs) {
+ string err;
+
+#ifdef _WIN32
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "build subdir\\dir2\\file: cat in1\n"));
+#else
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "build subdir/dir2/file: cat in1\n"));
+#endif
+ EXPECT_TRUE(builder_.AddTarget("subdir/dir2/file", &err));
+
+ EXPECT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(2u, fs_.directories_made_.size());
+ EXPECT_EQ("subdir", fs_.directories_made_[0]);
+ EXPECT_EQ("subdir/dir2", fs_.directories_made_[1]);
+}
+
+TEST_F(BuildTest, DepFileMissing) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n command = cc $in\n depfile = $out.d\n"
+"build fo$ o.o: cc foo.c\n"));
+ fs_.Create("foo.c", "");
+
+ EXPECT_TRUE(builder_.AddTarget("fo o.o", &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, fs_.files_read_.size());
+ EXPECT_EQ("fo o.o.d", fs_.files_read_[0]);
+}
+
+TEST_F(BuildTest, DepFileOK) {
+ string err;
+ int orig_edges = state_.edges_.size();
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n command = cc $in\n depfile = $out.d\n"
+"build foo.o: cc foo.c\n"));
+ Edge* edge = state_.edges_.back();
+
+ fs_.Create("foo.c", "");
+ GetNode("bar.h")->MarkDirty(); // Mark bar.h as missing.
+ fs_.Create("foo.o.d", "foo.o: blah.h bar.h\n");
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, fs_.files_read_.size());
+ EXPECT_EQ("foo.o.d", fs_.files_read_[0]);
+
+ // Expect three new edges: one generating foo.o, and two more from
+ // loading the depfile.
+ ASSERT_EQ(orig_edges + 3, (int)state_.edges_.size());
+ // Expect our edge to now have three inputs: foo.c and two headers.
+ ASSERT_EQ(3u, edge->inputs_.size());
+
+ // Expect the command line we generate to only use the original input.
+ ASSERT_EQ("cc foo.c", edge->EvaluateCommand());
+}
+
+TEST_F(BuildTest, DepFileParseError) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n command = cc $in\n depfile = $out.d\n"
+"build foo.o: cc foo.c\n"));
+ fs_.Create("foo.c", "");
+ fs_.Create("foo.o.d", "randomtext\n");
+ EXPECT_FALSE(builder_.AddTarget("foo.o", &err));
+ EXPECT_EQ("foo.o.d: expected ':' in depfile", err);
+}
+
+TEST_F(BuildTest, EncounterReadyTwice) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out\n"
+"build c: touch\n"
+"build b: touch || c\n"
+"build a: touch | b || c\n"));
+
+ vector<Edge*> c_out = GetNode("c")->out_edges();
+ ASSERT_EQ(2u, c_out.size());
+ EXPECT_EQ("b", c_out[0]->outputs_[0]->path());
+ EXPECT_EQ("a", c_out[1]->outputs_[0]->path());
+
+ fs_.Create("b", "");
+ EXPECT_TRUE(builder_.AddTarget("a", &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildTest, OrderOnlyDeps) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n command = cc $in\n depfile = $out.d\n"
+"build foo.o: cc foo.c || otherfile\n"));
+ Edge* edge = state_.edges_.back();
+
+ fs_.Create("foo.c", "");
+ fs_.Create("otherfile", "");
+ fs_.Create("foo.o.d", "foo.o: blah.h bar.h\n");
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ ASSERT_EQ("", err);
+
+ // One explicit, two implicit, one order only.
+ ASSERT_EQ(4u, edge->inputs_.size());
+ EXPECT_EQ(2, edge->implicit_deps_);
+ EXPECT_EQ(1, edge->order_only_deps_);
+ // Verify the inputs are in the order we expect
+ // (explicit then implicit then orderonly).
+ EXPECT_EQ("foo.c", edge->inputs_[0]->path());
+ EXPECT_EQ("blah.h", edge->inputs_[1]->path());
+ EXPECT_EQ("bar.h", edge->inputs_[2]->path());
+ EXPECT_EQ("otherfile", edge->inputs_[3]->path());
+
+ // Expect the command line we generate to only use the original input.
+ ASSERT_EQ("cc foo.c", edge->EvaluateCommand());
+
+ // explicit dep dirty, expect a rebuild.
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ fs_.Tick();
+
+ // Recreate the depfile, as it should have been deleted by the build.
+ fs_.Create("foo.o.d", "foo.o: blah.h bar.h\n");
+
+ // implicit dep dirty, expect a rebuild.
+ fs_.Create("blah.h", "");
+ fs_.Create("bar.h", "");
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ fs_.Tick();
+
+ // Recreate the depfile, as it should have been deleted by the build.
+ fs_.Create("foo.o.d", "foo.o: blah.h bar.h\n");
+
+ // order only dep dirty, no rebuild.
+ fs_.Create("otherfile", "");
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ EXPECT_EQ("", err);
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+
+ // implicit dep missing, expect rebuild.
+ fs_.RemoveFile("bar.h");
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildTest, RebuildOrderOnlyDeps) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n command = cc $in\n"
+"rule true\n command = true\n"
+"build oo.h: cc oo.h.in\n"
+"build foo.o: cc foo.c || oo.h\n"));
+
+ fs_.Create("foo.c", "");
+ fs_.Create("oo.h.in", "");
+
+ // foo.o and order-only dep dirty, build both.
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+
+ // all clean, no rebuild.
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ EXPECT_EQ("", err);
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+
+ // order-only dep missing, build it only.
+ fs_.RemoveFile("oo.h");
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ ASSERT_EQ("cc oo.h.in", command_runner_.commands_ran_[0]);
+
+ fs_.Tick();
+
+ // order-only dep dirty, build it only.
+ fs_.Create("oo.h.in", "");
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ ASSERT_EQ("cc oo.h.in", command_runner_.commands_ran_[0]);
+}
+
+#ifdef _WIN32
+TEST_F(BuildTest, DepFileCanonicalize) {
+ string err;
+ int orig_edges = state_.edges_.size();
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n command = cc $in\n depfile = $out.d\n"
+"build gen/stuff\\things/foo.o: cc x\\y/z\\foo.c\n"));
+ Edge* edge = state_.edges_.back();
+
+ fs_.Create("x/y/z/foo.c", "");
+ GetNode("bar.h")->MarkDirty(); // Mark bar.h as missing.
+ // Note, different slashes from manifest.
+ fs_.Create("gen/stuff\\things/foo.o.d",
+ "gen\\stuff\\things\\foo.o: blah.h bar.h\n");
+ EXPECT_TRUE(builder_.AddTarget("gen/stuff/things/foo.o", &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, fs_.files_read_.size());
+ // The depfile path does not get Canonicalize as it seems unnecessary.
+ EXPECT_EQ("gen/stuff\\things/foo.o.d", fs_.files_read_[0]);
+
+ // Expect three new edges: one generating foo.o, and two more from
+ // loading the depfile.
+ ASSERT_EQ(orig_edges + 3, (int)state_.edges_.size());
+ // Expect our edge to now have three inputs: foo.c and two headers.
+ ASSERT_EQ(3u, edge->inputs_.size());
+
+ // Expect the command line we generate to only use the original input, and
+ // using the slashes from the manifest.
+ ASSERT_EQ("cc x\\y/z\\foo.c", edge->EvaluateCommand());
+}
+#endif
+
+TEST_F(BuildTest, Phony) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat bar.cc\n"
+"build all: phony out\n"));
+ fs_.Create("bar.cc", "");
+
+ EXPECT_TRUE(builder_.AddTarget("all", &err));
+ ASSERT_EQ("", err);
+
+ // Only one command to run, because phony runs no command.
+ EXPECT_FALSE(builder_.AlreadyUpToDate());
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildTest, PhonyNoWork) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat bar.cc\n"
+"build all: phony out\n"));
+ fs_.Create("bar.cc", "");
+ fs_.Create("out", "");
+
+ EXPECT_TRUE(builder_.AddTarget("all", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+}
+
+// Test a self-referencing phony. Ideally this should not work, but
+// ninja 1.7 and below tolerated and CMake 2.8.12.x and 3.0.x both
+// incorrectly produce it. We tolerate it for compatibility.
+TEST_F(BuildTest, PhonySelfReference) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build a: phony a\n"));
+
+ EXPECT_TRUE(builder_.AddTarget("a", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+}
+
+TEST_F(BuildTest, Fail) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule fail\n"
+" command = fail\n"
+"build out1: fail\n"));
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_FALSE(builder_.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ ASSERT_EQ("subcommand failed", err);
+}
+
+TEST_F(BuildTest, SwallowFailures) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule fail\n"
+" command = fail\n"
+"build out1: fail\n"
+"build out2: fail\n"
+"build out3: fail\n"
+"build all: phony out1 out2 out3\n"));
+
+ // Swallow two failures, die on the third.
+ config_.failures_allowed = 3;
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("all", &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_FALSE(builder_.Build(&err));
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ ASSERT_EQ("subcommands failed", err);
+}
+
+TEST_F(BuildTest, SwallowFailuresLimit) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule fail\n"
+" command = fail\n"
+"build out1: fail\n"
+"build out2: fail\n"
+"build out3: fail\n"
+"build final: cat out1 out2 out3\n"));
+
+ // Swallow ten failures; we should stop before building final.
+ config_.failures_allowed = 11;
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("final", &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_FALSE(builder_.Build(&err));
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ ASSERT_EQ("cannot make progress due to previous errors", err);
+}
+
+TEST_F(BuildTest, SwallowFailuresPool) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"pool failpool\n"
+" depth = 1\n"
+"rule fail\n"
+" command = fail\n"
+" pool = failpool\n"
+"build out1: fail\n"
+"build out2: fail\n"
+"build out3: fail\n"
+"build final: cat out1 out2 out3\n"));
+
+ // Swallow ten failures; we should stop before building final.
+ config_.failures_allowed = 11;
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("final", &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_FALSE(builder_.Build(&err));
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ ASSERT_EQ("cannot make progress due to previous errors", err);
+}
+
+TEST_F(BuildTest, PoolEdgesReadyButNotWanted) {
+ fs_.Create("x", "");
+
+ const char* manifest =
+ "pool some_pool\n"
+ " depth = 4\n"
+ "rule touch\n"
+ " command = touch $out\n"
+ " pool = some_pool\n"
+ "rule cc\n"
+ " command = touch grit\n"
+ "\n"
+ "build B.d.stamp: cc | x\n"
+ "build C.stamp: touch B.d.stamp\n"
+ "build final.stamp: touch || C.stamp\n";
+
+ RebuildTarget("final.stamp", manifest);
+
+ fs_.RemoveFile("B.d.stamp");
+
+ State save_state;
+ RebuildTarget("final.stamp", manifest, NULL, NULL, &save_state);
+ EXPECT_GE(save_state.LookupPool("some_pool")->current_use(), 0);
+}
+
+struct BuildWithLogTest : public BuildTest {
+ BuildWithLogTest() {
+ builder_.SetBuildLog(&build_log_);
+ }
+
+ BuildLog build_log_;
+};
+
+TEST_F(BuildWithLogTest, NotInLogButOnDisk) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n"
+" command = cc\n"
+"build out1: cc in\n"));
+
+ // Create input/output that would be considered up to date when
+ // not considering the command line hash.
+ fs_.Create("in", "");
+ fs_.Create("out1", "");
+ string err;
+
+ // Because it's not in the log, it should not be up-to-date until
+ // we build again.
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ EXPECT_FALSE(builder_.AlreadyUpToDate());
+
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+}
+
+TEST_F(BuildWithLogTest, RebuildAfterFailure) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch-fail-tick2\n"
+" command = touch-fail-tick2\n"
+"build out1: touch-fail-tick2 in\n"));
+
+ string err;
+
+ fs_.Create("in", "");
+
+ // Run once successfully to get out1 in the log
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ EXPECT_EQ(1u, command_runner_.commands_ran_.size());
+
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ builder_.Cleanup();
+ builder_.plan_.Reset();
+
+ fs_.Tick();
+ fs_.Create("in", "");
+
+ // Run again with a failure that updates the output file timestamp
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ EXPECT_FALSE(builder_.Build(&err));
+ EXPECT_EQ("subcommand failed", err);
+ EXPECT_EQ(1u, command_runner_.commands_ran_.size());
+
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ builder_.Cleanup();
+ builder_.plan_.Reset();
+
+ fs_.Tick();
+
+ // Run again, should rerun even though the output file is up to date on disk
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ EXPECT_FALSE(builder_.AlreadyUpToDate());
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("", err);
+}
+
+TEST_F(BuildWithLogTest, RebuildWithNoInputs) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch\n"
+"build out1: touch\n"
+"build out2: touch in\n"));
+
+ string err;
+
+ fs_.Create("in", "");
+
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ EXPECT_EQ(2u, command_runner_.commands_ran_.size());
+
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+
+ fs_.Tick();
+
+ fs_.Create("in", "");
+
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ EXPECT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildWithLogTest, RestatTest) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule true\n"
+" command = true\n"
+" restat = 1\n"
+"rule cc\n"
+" command = cc\n"
+" restat = 1\n"
+"build out1: cc in\n"
+"build out2: true out1\n"
+"build out3: cat out2\n"));
+
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+ fs_.Create("out3", "");
+
+ fs_.Tick();
+
+ fs_.Create("in", "");
+
+ // Do a pre-build so that there's commands in the log for the outputs,
+ // otherwise, the lack of an entry in the build log will cause out3 to rebuild
+ // regardless of restat.
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out3", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ EXPECT_EQ("[3/3]", builder_.status_->FormatProgressStatus("[%s/%t]",
+ BuildStatus::kEdgeStarted));
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+
+ fs_.Tick();
+
+ fs_.Create("in", "");
+ // "cc" touches out1, so we should build out2. But because "true" does not
+ // touch out2, we should cancel the build of out3.
+ EXPECT_TRUE(builder_.AddTarget("out3", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+
+ // If we run again, it should be a no-op, because the build log has recorded
+ // that we've already built out2 with an input timestamp of 2 (from out1).
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("out3", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+
+ fs_.Tick();
+
+ fs_.Create("in", "");
+
+ // The build log entry should not, however, prevent us from rebuilding out2
+ // if out1 changes.
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("out3", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildWithLogTest, RestatMissingFile) {
+ // If a restat rule doesn't create its output, and the output didn't
+ // exist before the rule was run, consider that behavior equivalent
+ // to a rule that doesn't modify its existent output file.
+
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule true\n"
+" command = true\n"
+" restat = 1\n"
+"rule cc\n"
+" command = cc\n"
+"build out1: true in\n"
+"build out2: cc out1\n"));
+
+ fs_.Create("in", "");
+ fs_.Create("out2", "");
+
+ // Do a pre-build so that there's commands in the log for the outputs,
+ // otherwise, the lack of an entry in the build log will cause out2 to rebuild
+ // regardless of restat.
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+
+ fs_.Tick();
+ fs_.Create("in", "");
+ fs_.Create("out2", "");
+
+ // Run a build, expect only the first command to run.
+ // It doesn't touch its output (due to being the "true" command), so
+ // we shouldn't run the dependent build.
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildWithLogTest, RestatSingleDependentOutputDirty) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "rule true\n"
+ " command = true\n"
+ " restat = 1\n"
+ "rule touch\n"
+ " command = touch\n"
+ "build out1: true in\n"
+ "build out2 out3: touch out1\n"
+ "build out4: touch out2\n"
+ ));
+
+ // Create the necessary files
+ fs_.Create("in", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out4", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+
+ fs_.Tick();
+ fs_.Create("in", "");
+ fs_.RemoveFile("out3");
+
+ // Since "in" is missing, out1 will be built. Since "out3" is missing,
+ // out2 and out3 will be built even though "in" is not touched when built.
+ // Then, since out2 is rebuilt, out4 should be rebuilt -- the restat on the
+ // "true" rule should not lead to the "touch" edge writing out2 and out3 being
+ // cleard.
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("out4", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+}
+
+// Test scenario, in which an input file is removed, but output isn't changed
+// https://github.com/ninja-build/ninja/issues/295
+TEST_F(BuildWithLogTest, RestatMissingInput) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "rule true\n"
+ " command = true\n"
+ " depfile = $out.d\n"
+ " restat = 1\n"
+ "rule cc\n"
+ " command = cc\n"
+ "build out1: true in\n"
+ "build out2: cc out1\n"));
+
+ // Create all necessary files
+ fs_.Create("in", "");
+
+ // The implicit dependencies and the depfile itself
+ // are newer than the output
+ TimeStamp restat_mtime = fs_.Tick();
+ fs_.Create("out1.d", "out1: will.be.deleted restat.file\n");
+ fs_.Create("will.be.deleted", "");
+ fs_.Create("restat.file", "");
+
+ // Run the build, out1 and out2 get built
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+
+ // See that an entry in the logfile is created, capturing
+ // the right mtime
+ BuildLog::LogEntry* log_entry = build_log_.LookupByOutput("out1");
+ ASSERT_TRUE(NULL != log_entry);
+ ASSERT_EQ(restat_mtime, log_entry->mtime);
+
+ // Now remove a file, referenced from depfile, so that target becomes
+ // dirty, but the output does not change
+ fs_.RemoveFile("will.be.deleted");
+
+ // Trigger the build again - only out1 gets built
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ // Check that the logfile entry remains correctly set
+ log_entry = build_log_.LookupByOutput("out1");
+ ASSERT_TRUE(NULL != log_entry);
+ ASSERT_EQ(restat_mtime, log_entry->mtime);
+}
+
+struct BuildDryRun : public BuildWithLogTest {
+ BuildDryRun() {
+ config_.dry_run = true;
+ }
+};
+
+TEST_F(BuildDryRun, AllCommandsShown) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule true\n"
+" command = true\n"
+" restat = 1\n"
+"rule cc\n"
+" command = cc\n"
+" restat = 1\n"
+"build out1: cc in\n"
+"build out2: true out1\n"
+"build out3: cat out2\n"));
+
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+ fs_.Create("out3", "");
+
+ fs_.Tick();
+
+ fs_.Create("in", "");
+
+ // "cc" touches out1, so we should build out2. But because "true" does not
+ // touch out2, we should cancel the build of out3.
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out3", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+}
+
+// Test that RSP files are created when & where appropriate and deleted after
+// successful execution.
+TEST_F(BuildTest, RspFileSuccess)
+{
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "rule cat_rsp\n"
+ " command = cat $rspfile > $out\n"
+ " rspfile = $rspfile\n"
+ " rspfile_content = $long_command\n"
+ "rule cat_rsp_out\n"
+ " command = cat $rspfile > $out\n"
+ " rspfile = $out.rsp\n"
+ " rspfile_content = $long_command\n"
+ "build out1: cat in\n"
+ "build out2: cat_rsp in\n"
+ " rspfile = out 2.rsp\n"
+ " long_command = Some very long command\n"
+ "build out$ 3: cat_rsp_out in\n"
+ " long_command = Some very long command\n"));
+
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+ fs_.Create("out 3", "");
+
+ fs_.Tick();
+
+ fs_.Create("in", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.AddTarget("out 3", &err));
+ ASSERT_EQ("", err);
+
+ size_t files_created = fs_.files_created_.size();
+ size_t files_removed = fs_.files_removed_.size();
+
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+
+ // The RSP files were created
+ ASSERT_EQ(files_created + 2, fs_.files_created_.size());
+ ASSERT_EQ(1u, fs_.files_created_.count("out 2.rsp"));
+ ASSERT_EQ(1u, fs_.files_created_.count("out 3.rsp"));
+
+ // The RSP files were removed
+ ASSERT_EQ(files_removed + 2, fs_.files_removed_.size());
+ ASSERT_EQ(1u, fs_.files_removed_.count("out 2.rsp"));
+ ASSERT_EQ(1u, fs_.files_removed_.count("out 3.rsp"));
+}
+
+// Test that RSP file is created but not removed for commands, which fail
+TEST_F(BuildTest, RspFileFailure) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "rule fail\n"
+ " command = fail\n"
+ " rspfile = $rspfile\n"
+ " rspfile_content = $long_command\n"
+ "build out: fail in\n"
+ " rspfile = out.rsp\n"
+ " long_command = Another very long command\n"));
+
+ fs_.Create("out", "");
+ fs_.Tick();
+ fs_.Create("in", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+
+ size_t files_created = fs_.files_created_.size();
+ size_t files_removed = fs_.files_removed_.size();
+
+ EXPECT_FALSE(builder_.Build(&err));
+ ASSERT_EQ("subcommand failed", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ // The RSP file was created
+ ASSERT_EQ(files_created + 1, fs_.files_created_.size());
+ ASSERT_EQ(1u, fs_.files_created_.count("out.rsp"));
+
+ // The RSP file was NOT removed
+ ASSERT_EQ(files_removed, fs_.files_removed_.size());
+ ASSERT_EQ(0u, fs_.files_removed_.count("out.rsp"));
+
+ // The RSP file contains what it should
+ ASSERT_EQ("Another very long command", fs_.files_["out.rsp"].contents);
+}
+
+// Test that contents of the RSP file behaves like a regular part of
+// command line, i.e. triggers a rebuild if changed
+TEST_F(BuildWithLogTest, RspFileCmdLineChange) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "rule cat_rsp\n"
+ " command = cat $rspfile > $out\n"
+ " rspfile = $rspfile\n"
+ " rspfile_content = $long_command\n"
+ "build out: cat_rsp in\n"
+ " rspfile = out.rsp\n"
+ " long_command = Original very long command\n"));
+
+ fs_.Create("out", "");
+ fs_.Tick();
+ fs_.Create("in", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+
+ // 1. Build for the 1st time (-> populate log)
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ // 2. Build again (no change)
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("", err);
+ ASSERT_TRUE(builder_.AlreadyUpToDate());
+
+ // 3. Alter the entry in the logfile
+ // (to simulate a change in the command line between 2 builds)
+ BuildLog::LogEntry* log_entry = build_log_.LookupByOutput("out");
+ ASSERT_TRUE(NULL != log_entry);
+ ASSERT_NO_FATAL_FAILURE(AssertHash(
+ "cat out.rsp > out;rspfile=Original very long command",
+ log_entry->command_hash));
+ log_entry->command_hash++; // Change the command hash to something else.
+ // Now expect the target to be rebuilt
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildTest, InterruptCleanup) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule interrupt\n"
+" command = interrupt\n"
+"rule touch-interrupt\n"
+" command = touch-interrupt\n"
+"build out1: interrupt in1\n"
+"build out2: touch-interrupt in2\n"));
+
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+ fs_.Tick();
+ fs_.Create("in1", "");
+ fs_.Create("in2", "");
+
+ // An untouched output of an interrupted command should be retained.
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ EXPECT_EQ("", err);
+ EXPECT_FALSE(builder_.Build(&err));
+ EXPECT_EQ("interrupted by user", err);
+ builder_.Cleanup();
+ EXPECT_GT(fs_.Stat("out1", &err), 0);
+ err = "";
+
+ // A touched output of an interrupted command should be deleted.
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ EXPECT_EQ("", err);
+ EXPECT_FALSE(builder_.Build(&err));
+ EXPECT_EQ("interrupted by user", err);
+ builder_.Cleanup();
+ EXPECT_EQ(0, fs_.Stat("out2", &err));
+}
+
+TEST_F(BuildTest, StatFailureAbortsBuild) {
+ const string kTooLongToStat(400, 'i');
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+("build " + kTooLongToStat + ": cat in\n").c_str()));
+ fs_.Create("in", "");
+
+ // This simulates a stat failure:
+ fs_.files_[kTooLongToStat].mtime = -1;
+ fs_.files_[kTooLongToStat].stat_error = "stat failed";
+
+ string err;
+ EXPECT_FALSE(builder_.AddTarget(kTooLongToStat, &err));
+ EXPECT_EQ("stat failed", err);
+}
+
+TEST_F(BuildTest, PhonyWithNoInputs) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build nonexistent: phony\n"
+"build out1: cat || nonexistent\n"
+"build out2: cat nonexistent\n"));
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+
+ // out1 should be up to date even though its input is dirty, because its
+ // order-only dependency has nothing to do.
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+
+ // out2 should still be out of date though, because its input is dirty.
+ err.clear();
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildTest, DepsGccWithEmptyDepfileErrorsOut) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n"
+" command = cc\n"
+" deps = gcc\n"
+"build out: cc\n"));
+ Dirty("out");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_FALSE(builder_.AlreadyUpToDate());
+
+ EXPECT_FALSE(builder_.Build(&err));
+ ASSERT_EQ("subcommand failed", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildTest, StatusFormatElapsed) {
+ status_.BuildStarted();
+ // Before any task is done, the elapsed time must be zero.
+ EXPECT_EQ("[%/e0.000]",
+ status_.FormatProgressStatus("[%%/e%e]",
+ BuildStatus::kEdgeStarted));
+}
+
+TEST_F(BuildTest, StatusFormatReplacePlaceholder) {
+ EXPECT_EQ("[%/s0/t0/r0/u0/f0]",
+ status_.FormatProgressStatus("[%%/s%s/t%t/r%r/u%u/f%f]",
+ BuildStatus::kEdgeStarted));
+}
+
+TEST_F(BuildTest, FailedDepsParse) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build bad_deps.o: cat in1\n"
+" deps = gcc\n"
+" depfile = in1.d\n"));
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("bad_deps.o", &err));
+ ASSERT_EQ("", err);
+
+ // These deps will fail to parse, as they should only have one
+ // path to the left of the colon.
+ fs_.Create("in1.d", "AAA BBB");
+
+ EXPECT_FALSE(builder_.Build(&err));
+ EXPECT_EQ("subcommand failed", err);
+}
+
+struct BuildWithQueryDepsLogTest : public BuildTest {
+ BuildWithQueryDepsLogTest() : BuildTest(&log_) {
+ }
+
+ ~BuildWithQueryDepsLogTest() {
+ log_.Close();
+ }
+
+ virtual void SetUp() {
+ BuildTest::SetUp();
+
+ temp_dir_.CreateAndEnter("BuildWithQueryDepsLogTest");
+
+ std::string err;
+ ASSERT_TRUE(log_.OpenForWrite("ninja_deps", &err));
+ ASSERT_EQ("", err);
+ }
+
+ ScopedTempDir temp_dir_;
+
+ DepsLog log_;
+};
+
+/// Test a MSVC-style deps log with multiple outputs.
+TEST_F(BuildWithQueryDepsLogTest, TwoOutputsDepFileMSVC) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cp_multi_msvc\n"
+" command = echo 'using $in' && for file in $out; do cp $in $$file; done\n"
+" deps = msvc\n"
+" msvc_deps_prefix = using \n"
+"build out1 out2: cp_multi_msvc in1\n"));
+
+ std::string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("echo 'using in1' && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]);
+
+ Node* out1_node = state_.LookupNode("out1");
+ DepsLog::Deps* out1_deps = log_.GetDeps(out1_node);
+ EXPECT_EQ(1, out1_deps->node_count);
+ EXPECT_EQ("in1", out1_deps->nodes[0]->path());
+
+ Node* out2_node = state_.LookupNode("out2");
+ DepsLog::Deps* out2_deps = log_.GetDeps(out2_node);
+ EXPECT_EQ(1, out2_deps->node_count);
+ EXPECT_EQ("in1", out2_deps->nodes[0]->path());
+}
+
+/// Test a GCC-style deps log with multiple outputs.
+TEST_F(BuildWithQueryDepsLogTest, TwoOutputsDepFileGCCOneLine) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cp_multi_gcc\n"
+" command = echo '$out: $in' > in.d && for file in $out; do cp in1 $$file; done\n"
+" deps = gcc\n"
+" depfile = in.d\n"
+"build out1 out2: cp_multi_gcc in1 in2\n"));
+
+ std::string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+ fs_.Create("in.d", "out1 out2: in1 in2");
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("echo 'out1 out2: in1 in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]);
+
+ Node* out1_node = state_.LookupNode("out1");
+ DepsLog::Deps* out1_deps = log_.GetDeps(out1_node);
+ EXPECT_EQ(2, out1_deps->node_count);
+ EXPECT_EQ("in1", out1_deps->nodes[0]->path());
+ EXPECT_EQ("in2", out1_deps->nodes[1]->path());
+
+ Node* out2_node = state_.LookupNode("out2");
+ DepsLog::Deps* out2_deps = log_.GetDeps(out2_node);
+ EXPECT_EQ(2, out2_deps->node_count);
+ EXPECT_EQ("in1", out2_deps->nodes[0]->path());
+ EXPECT_EQ("in2", out2_deps->nodes[1]->path());
+}
+
+/// Test a GCC-style deps log with multiple outputs using a line per input.
+TEST_F(BuildWithQueryDepsLogTest, TwoOutputsDepFileGCCMultiLineInput) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cp_multi_gcc\n"
+" command = echo '$out: in1\\n$out: in2' > in.d && for file in $out; do cp in1 $$file; done\n"
+" deps = gcc\n"
+" depfile = in.d\n"
+"build out1 out2: cp_multi_gcc in1 in2\n"));
+
+ std::string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+ fs_.Create("in.d", "out1 out2: in1\nout1 out2: in2");
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("echo 'out1 out2: in1\\nout1 out2: in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]);
+
+ Node* out1_node = state_.LookupNode("out1");
+ DepsLog::Deps* out1_deps = log_.GetDeps(out1_node);
+ EXPECT_EQ(2, out1_deps->node_count);
+ EXPECT_EQ("in1", out1_deps->nodes[0]->path());
+ EXPECT_EQ("in2", out1_deps->nodes[1]->path());
+
+ Node* out2_node = state_.LookupNode("out2");
+ DepsLog::Deps* out2_deps = log_.GetDeps(out2_node);
+ EXPECT_EQ(2, out2_deps->node_count);
+ EXPECT_EQ("in1", out2_deps->nodes[0]->path());
+ EXPECT_EQ("in2", out2_deps->nodes[1]->path());
+}
+
+/// Test a GCC-style deps log with multiple outputs using a line per output.
+TEST_F(BuildWithQueryDepsLogTest, TwoOutputsDepFileGCCMultiLineOutput) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cp_multi_gcc\n"
+" command = echo 'out1: $in\\nout2: $in' > in.d && for file in $out; do cp in1 $$file; done\n"
+" deps = gcc\n"
+" depfile = in.d\n"
+"build out1 out2: cp_multi_gcc in1 in2\n"));
+
+ std::string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+ fs_.Create("in.d", "out1: in1 in2\nout2: in1 in2");
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("echo 'out1: in1 in2\\nout2: in1 in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]);
+
+ Node* out1_node = state_.LookupNode("out1");
+ DepsLog::Deps* out1_deps = log_.GetDeps(out1_node);
+ EXPECT_EQ(2, out1_deps->node_count);
+ EXPECT_EQ("in1", out1_deps->nodes[0]->path());
+ EXPECT_EQ("in2", out1_deps->nodes[1]->path());
+
+ Node* out2_node = state_.LookupNode("out2");
+ DepsLog::Deps* out2_deps = log_.GetDeps(out2_node);
+ EXPECT_EQ(2, out2_deps->node_count);
+ EXPECT_EQ("in1", out2_deps->nodes[0]->path());
+ EXPECT_EQ("in2", out2_deps->nodes[1]->path());
+}
+
+/// Test a GCC-style deps log with multiple outputs mentioning only the main output.
+TEST_F(BuildWithQueryDepsLogTest, TwoOutputsDepFileGCCOnlyMainOutput) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cp_multi_gcc\n"
+" command = echo 'out1: $in' > in.d && for file in $out; do cp in1 $$file; done\n"
+" deps = gcc\n"
+" depfile = in.d\n"
+"build out1 out2: cp_multi_gcc in1 in2\n"));
+
+ std::string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+ fs_.Create("in.d", "out1: in1 in2");
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("echo 'out1: in1 in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]);
+
+ Node* out1_node = state_.LookupNode("out1");
+ DepsLog::Deps* out1_deps = log_.GetDeps(out1_node);
+ EXPECT_EQ(2, out1_deps->node_count);
+ EXPECT_EQ("in1", out1_deps->nodes[0]->path());
+ EXPECT_EQ("in2", out1_deps->nodes[1]->path());
+
+ Node* out2_node = state_.LookupNode("out2");
+ DepsLog::Deps* out2_deps = log_.GetDeps(out2_node);
+ EXPECT_EQ(2, out2_deps->node_count);
+ EXPECT_EQ("in1", out2_deps->nodes[0]->path());
+ EXPECT_EQ("in2", out2_deps->nodes[1]->path());
+}
+
+/// Test a GCC-style deps log with multiple outputs mentioning only the secondary output.
+TEST_F(BuildWithQueryDepsLogTest, TwoOutputsDepFileGCCOnlySecondaryOutput) {
+ // Note: This ends up short-circuiting the node creation due to the primary
+ // output not being present, but it should still work.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cp_multi_gcc\n"
+" command = echo 'out2: $in' > in.d && for file in $out; do cp in1 $$file; done\n"
+" deps = gcc\n"
+" depfile = in.d\n"
+"build out1 out2: cp_multi_gcc in1 in2\n"));
+
+ std::string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+ fs_.Create("in.d", "out2: in1 in2");
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("echo 'out2: in1 in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]);
+
+ Node* out1_node = state_.LookupNode("out1");
+ DepsLog::Deps* out1_deps = log_.GetDeps(out1_node);
+ EXPECT_EQ(2, out1_deps->node_count);
+ EXPECT_EQ("in1", out1_deps->nodes[0]->path());
+ EXPECT_EQ("in2", out1_deps->nodes[1]->path());
+
+ Node* out2_node = state_.LookupNode("out2");
+ DepsLog::Deps* out2_deps = log_.GetDeps(out2_node);
+ EXPECT_EQ(2, out2_deps->node_count);
+ EXPECT_EQ("in1", out2_deps->nodes[0]->path());
+ EXPECT_EQ("in2", out2_deps->nodes[1]->path());
+}
+
+/// Tests of builds involving deps logs necessarily must span
+/// multiple builds. We reuse methods on BuildTest but not the
+/// builder_ it sets up, because we want pristine objects for
+/// each build.
+struct BuildWithDepsLogTest : public BuildTest {
+ BuildWithDepsLogTest() {}
+
+ virtual void SetUp() {
+ BuildTest::SetUp();
+
+ temp_dir_.CreateAndEnter("BuildWithDepsLogTest");
+ }
+
+ virtual void TearDown() {
+ temp_dir_.Cleanup();
+ }
+
+ ScopedTempDir temp_dir_;
+
+ /// Shadow parent class builder_ so we don't accidentally use it.
+ void* builder_;
+};
+
+/// Run a straightforwad build where the deps log is used.
+TEST_F(BuildWithDepsLogTest, Straightforward) {
+ string err;
+ // Note: in1 was created by the superclass SetUp().
+ const char* manifest =
+ "build out: cat in1\n"
+ " deps = gcc\n"
+ " depfile = in1.d\n";
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // Run the build once, everything should be ok.
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+ ASSERT_EQ("", err);
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ fs_.Create("in1.d", "out: in2");
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ // The deps file should have been removed.
+ EXPECT_EQ(0, fs_.Stat("in1.d", &err));
+ // Recreate it for the next step.
+ fs_.Create("in1.d", "out: in2");
+ deps_log.Close();
+ builder.command_runner_.release();
+ }
+
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // Touch the file only mentioned in the deps.
+ fs_.Tick();
+ fs_.Create("in2", "");
+
+ // Run the build again.
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ // We should have rebuilt the output due to in2 being
+ // out of date.
+ EXPECT_EQ(1u, command_runner_.commands_ran_.size());
+
+ builder.command_runner_.release();
+ }
+}
+
+/// Verify that obsolete dependency info causes a rebuild.
+/// 1) Run a successful build where everything has time t, record deps.
+/// 2) Move input/output to time t+1 -- despite files in alignment,
+/// should still need to rebuild due to deps at older time.
+TEST_F(BuildWithDepsLogTest, ObsoleteDeps) {
+ string err;
+ // Note: in1 was created by the superclass SetUp().
+ const char* manifest =
+ "build out: cat in1\n"
+ " deps = gcc\n"
+ " depfile = in1.d\n";
+ {
+ // Run an ordinary build that gathers dependencies.
+ fs_.Create("in1", "");
+ fs_.Create("in1.d", "out: ");
+
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // Run the build once, everything should be ok.
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+ ASSERT_EQ("", err);
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ deps_log.Close();
+ builder.command_runner_.release();
+ }
+
+ // Push all files one tick forward so that only the deps are out
+ // of date.
+ fs_.Tick();
+ fs_.Create("in1", "");
+ fs_.Create("out", "");
+
+ // The deps file should have been removed, so no need to timestamp it.
+ EXPECT_EQ(0, fs_.Stat("in1.d", &err));
+
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+
+ // Recreate the deps file here because the build expects them to exist.
+ fs_.Create("in1.d", "out: ");
+
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ // We should have rebuilt the output due to the deps being
+ // out of date.
+ EXPECT_EQ(1u, command_runner_.commands_ran_.size());
+
+ builder.command_runner_.release();
+ }
+}
+
+TEST_F(BuildWithDepsLogTest, DepsIgnoredInDryRun) {
+ const char* manifest =
+ "build out: cat in1\n"
+ " deps = gcc\n"
+ " depfile = in1.d\n";
+
+ fs_.Create("out", "");
+ fs_.Tick();
+ fs_.Create("in1", "");
+
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // The deps log is NULL in dry runs.
+ config_.dry_run = true;
+ Builder builder(&state, config_, NULL, NULL, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+
+ string err;
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ builder.command_runner_.release();
+}
+
+/// Check that a restat rule generating a header cancels compilations correctly.
+TEST_F(BuildTest, RestatDepfileDependency) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule true\n"
+" command = true\n" // Would be "write if out-of-date" in reality.
+" restat = 1\n"
+"build header.h: true header.in\n"
+"build out: cat in1\n"
+" depfile = in1.d\n"));
+
+ fs_.Create("header.h", "");
+ fs_.Create("in1.d", "out: header.h");
+ fs_.Tick();
+ fs_.Create("header.in", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+}
+
+/// Check that a restat rule generating a header cancels compilations correctly,
+/// depslog case.
+TEST_F(BuildWithDepsLogTest, RestatDepfileDependencyDepsLog) {
+ string err;
+ // Note: in1 was created by the superclass SetUp().
+ const char* manifest =
+ "rule true\n"
+ " command = true\n" // Would be "write if out-of-date" in reality.
+ " restat = 1\n"
+ "build header.h: true header.in\n"
+ "build out: cat in1\n"
+ " deps = gcc\n"
+ " depfile = in1.d\n";
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // Run the build once, everything should be ok.
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+ ASSERT_EQ("", err);
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ fs_.Create("in1.d", "out: header.h");
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ deps_log.Close();
+ builder.command_runner_.release();
+ }
+
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // Touch the input of the restat rule.
+ fs_.Tick();
+ fs_.Create("header.in", "");
+
+ // Run the build again.
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ // Rule "true" should have run again, but the build of "out" should have
+ // been cancelled due to restat propagating through the depfile header.
+ EXPECT_EQ(1u, command_runner_.commands_ran_.size());
+
+ builder.command_runner_.release();
+ }
+}
+
+TEST_F(BuildWithDepsLogTest, DepFileOKDepsLog) {
+ string err;
+ const char* manifest =
+ "rule cc\n command = cc $in\n depfile = $out.d\n deps = gcc\n"
+ "build fo$ o.o: cc foo.c\n";
+
+ fs_.Create("foo.c", "");
+
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // Run the build once, everything should be ok.
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+ ASSERT_EQ("", err);
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ EXPECT_TRUE(builder.AddTarget("fo o.o", &err));
+ ASSERT_EQ("", err);
+ fs_.Create("fo o.o.d", "fo\\ o.o: blah.h bar.h\n");
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ deps_log.Close();
+ builder.command_runner_.release();
+ }
+
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+ ASSERT_EQ("", err);
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+
+ Edge* edge = state.edges_.back();
+
+ state.GetNode("bar.h", 0)->MarkDirty(); // Mark bar.h as missing.
+ EXPECT_TRUE(builder.AddTarget("fo o.o", &err));
+ ASSERT_EQ("", err);
+
+ // Expect three new edges: one generating fo o.o, and two more from
+ // loading the depfile.
+ ASSERT_EQ(3u, state.edges_.size());
+ // Expect our edge to now have three inputs: foo.c and two headers.
+ ASSERT_EQ(3u, edge->inputs_.size());
+
+ // Expect the command line we generate to only use the original input.
+ ASSERT_EQ("cc foo.c", edge->EvaluateCommand());
+
+ deps_log.Close();
+ builder.command_runner_.release();
+ }
+}
+
+#ifdef _WIN32
+TEST_F(BuildWithDepsLogTest, DepFileDepsLogCanonicalize) {
+ string err;
+ const char* manifest =
+ "rule cc\n command = cc $in\n depfile = $out.d\n deps = gcc\n"
+ "build a/b\\c\\d/e/fo$ o.o: cc x\\y/z\\foo.c\n";
+
+ fs_.Create("x/y/z/foo.c", "");
+
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // Run the build once, everything should be ok.
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+ ASSERT_EQ("", err);
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ EXPECT_TRUE(builder.AddTarget("a/b/c/d/e/fo o.o", &err));
+ ASSERT_EQ("", err);
+ // Note, different slashes from manifest.
+ fs_.Create("a/b\\c\\d/e/fo o.o.d",
+ "a\\b\\c\\d\\e\\fo\\ o.o: blah.h bar.h\n");
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ deps_log.Close();
+ builder.command_runner_.release();
+ }
+
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+ ASSERT_EQ("", err);
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+
+ Edge* edge = state.edges_.back();
+
+ state.GetNode("bar.h", 0)->MarkDirty(); // Mark bar.h as missing.
+ EXPECT_TRUE(builder.AddTarget("a/b/c/d/e/fo o.o", &err));
+ ASSERT_EQ("", err);
+
+ // Expect three new edges: one generating fo o.o, and two more from
+ // loading the depfile.
+ ASSERT_EQ(3u, state.edges_.size());
+ // Expect our edge to now have three inputs: foo.c and two headers.
+ ASSERT_EQ(3u, edge->inputs_.size());
+
+ // Expect the command line we generate to only use the original input.
+ // Note, slashes from manifest, not .d.
+ ASSERT_EQ("cc x\\y/z\\foo.c", edge->EvaluateCommand());
+
+ deps_log.Close();
+ builder.command_runner_.release();
+ }
+}
+#endif
+
+/// Check that a restat rule doesn't clear an edge if the depfile is missing.
+/// Follows from: https://github.com/ninja-build/ninja/issues/603
+TEST_F(BuildTest, RestatMissingDepfile) {
+const char* manifest =
+"rule true\n"
+" command = true\n" // Would be "write if out-of-date" in reality.
+" restat = 1\n"
+"build header.h: true header.in\n"
+"build out: cat header.h\n"
+" depfile = out.d\n";
+
+ fs_.Create("header.h", "");
+ fs_.Tick();
+ fs_.Create("out", "");
+ fs_.Create("header.in", "");
+
+ // Normally, only 'header.h' would be rebuilt, as
+ // its rule doesn't touch the output and has 'restat=1' set.
+ // But we are also missing the depfile for 'out',
+ // which should force its command to run anyway!
+ RebuildTarget("out", manifest);
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+}
+
+/// Check that a restat rule doesn't clear an edge if the deps are missing.
+/// https://github.com/ninja-build/ninja/issues/603
+TEST_F(BuildWithDepsLogTest, RestatMissingDepfileDepslog) {
+ string err;
+ const char* manifest =
+"rule true\n"
+" command = true\n" // Would be "write if out-of-date" in reality.
+" restat = 1\n"
+"build header.h: true header.in\n"
+"build out: cat header.h\n"
+" deps = gcc\n"
+" depfile = out.d\n";
+
+ // Build once to populate ninja deps logs from out.d
+ fs_.Create("header.in", "");
+ fs_.Create("out.d", "out: header.h");
+ fs_.Create("header.h", "");
+
+ RebuildTarget("out", manifest, "build_log", "ninja_deps");
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+
+ // Sanity: this rebuild should be NOOP
+ RebuildTarget("out", manifest, "build_log", "ninja_deps");
+ ASSERT_EQ(0u, command_runner_.commands_ran_.size());
+
+ // Touch 'header.in', blank dependencies log (create a different one).
+ // Building header.h triggers 'restat' outputs cleanup.
+ // Validate that out is rebuilt netherless, as deps are missing.
+ fs_.Tick();
+ fs_.Create("header.in", "");
+
+ // (switch to a new blank deps_log "ninja_deps2")
+ RebuildTarget("out", manifest, "build_log", "ninja_deps2");
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+
+ // Sanity: this build should be NOOP
+ RebuildTarget("out", manifest, "build_log", "ninja_deps2");
+ ASSERT_EQ(0u, command_runner_.commands_ran_.size());
+
+ // Check that invalidating deps by target timestamp also works here
+ // Repeat the test but touch target instead of blanking the log.
+ fs_.Tick();
+ fs_.Create("header.in", "");
+ fs_.Create("out", "");
+ RebuildTarget("out", manifest, "build_log", "ninja_deps2");
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+
+ // And this build should be NOOP again
+ RebuildTarget("out", manifest, "build_log", "ninja_deps2");
+ ASSERT_EQ(0u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildTest, WrongOutputInDepfileCausesRebuild) {
+ string err;
+ const char* manifest =
+"rule cc\n"
+" command = cc $in\n"
+" depfile = $out.d\n"
+"build foo.o: cc foo.c\n";
+
+ fs_.Create("foo.c", "");
+ fs_.Create("foo.o", "");
+ fs_.Create("header.h", "");
+ fs_.Create("foo.o.d", "bar.o.d: header.h\n");
+
+ RebuildTarget("foo.o", manifest, "build_log", "ninja_deps");
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildTest, Console) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule console\n"
+" command = console\n"
+" pool = console\n"
+"build cons: console in.txt\n"));
+
+ fs_.Create("in.txt", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("cons", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildTest, DyndepMissingAndNoRule) {
+ // Verify that we can diagnose when a dyndep file is missing and
+ // has no rule to build it.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out\n"
+"build out: touch || dd\n"
+" dyndep = dd\n"
+));
+
+ string err;
+ EXPECT_FALSE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("loading 'dd': No such file or directory", err);
+}
+
+TEST_F(BuildTest, DyndepReadyImplicitConnection) {
+ // Verify that a dyndep file can be loaded immediately to discover
+ // that one edge has an implicit output that is also an implicit
+ // input of another edge.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out $out.imp\n"
+"build tmp: touch || dd\n"
+" dyndep = dd\n"
+"build out: touch || dd\n"
+" dyndep = dd\n"
+));
+ fs_.Create("dd",
+"ninja_dyndep_version = 1\n"
+"build out | out.imp: dyndep | tmp.imp\n"
+"build tmp | tmp.imp: dyndep\n"
+);
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("touch tmp tmp.imp", command_runner_.commands_ran_[0]);
+ EXPECT_EQ("touch out out.imp", command_runner_.commands_ran_[1]);
+}
+
+TEST_F(BuildTest, DyndepReadySyntaxError) {
+ // Verify that a dyndep file can be loaded immediately to discover
+ // and reject a syntax error in it.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out\n"
+"build out: touch || dd\n"
+" dyndep = dd\n"
+));
+ fs_.Create("dd",
+"build out: dyndep\n"
+);
+
+ string err;
+ EXPECT_FALSE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("dd:1: expected 'ninja_dyndep_version = ...'\n", err);
+}
+
+TEST_F(BuildTest, DyndepReadyCircular) {
+ // Verify that a dyndep file can be loaded immediately to discover
+ // and reject a circular dependency.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build out: r in || dd\n"
+" dyndep = dd\n"
+"build in: r circ\n"
+ ));
+ fs_.Create("dd",
+"ninja_dyndep_version = 1\n"
+"build out | circ: dyndep\n"
+ );
+ fs_.Create("out", "");
+
+ string err;
+ EXPECT_FALSE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("dependency cycle: circ -> in -> circ", err);
+}
+
+TEST_F(BuildTest, DyndepBuild) {
+ // Verify that a dyndep file can be built and loaded to discover nothing.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd: cp dd-in\n"
+"build out: touch || dd\n"
+" dyndep = dd\n"
+));
+ fs_.Create("dd-in",
+"ninja_dyndep_version = 1\n"
+"build out: dyndep\n"
+);
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("", err);
+
+ size_t files_created = fs_.files_created_.size();
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
+ EXPECT_EQ("touch out", command_runner_.commands_ran_[1]);
+ ASSERT_EQ(2u, fs_.files_read_.size());
+ EXPECT_EQ("dd-in", fs_.files_read_[0]);
+ EXPECT_EQ("dd", fs_.files_read_[1]);
+ ASSERT_EQ(2u + files_created, fs_.files_created_.size());
+ EXPECT_EQ(1u, fs_.files_created_.count("dd"));
+ EXPECT_EQ(1u, fs_.files_created_.count("out"));
+}
+
+TEST_F(BuildTest, DyndepBuildSyntaxError) {
+ // Verify that a dyndep file can be built and loaded to discover
+ // and reject a syntax error in it.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd: cp dd-in\n"
+"build out: touch || dd\n"
+" dyndep = dd\n"
+));
+ fs_.Create("dd-in",
+"build out: dyndep\n"
+);
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("", err);
+
+ EXPECT_FALSE(builder_.Build(&err));
+ EXPECT_EQ("dd:1: expected 'ninja_dyndep_version = ...'\n", err);
+}
+
+TEST_F(BuildTest, DyndepBuildUnrelatedOutput) {
+ // Verify that a dyndep file can have dependents that do not specify
+ // it as their dyndep binding.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd: cp dd-in\n"
+"build unrelated: touch || dd\n"
+"build out: touch unrelated || dd\n"
+" dyndep = dd\n"
+ ));
+ fs_.Create("dd-in",
+"ninja_dyndep_version = 1\n"
+"build out: dyndep\n"
+);
+ fs_.Tick();
+ fs_.Create("out", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("", err);
+
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
+ EXPECT_EQ("touch unrelated", command_runner_.commands_ran_[1]);
+ EXPECT_EQ("touch out", command_runner_.commands_ran_[2]);
+}
+
+TEST_F(BuildTest, DyndepBuildDiscoverNewOutput) {
+ // Verify that a dyndep file can be built and loaded to discover
+ // a new output of an edge.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out $out.imp\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd: cp dd-in\n"
+"build out: touch in || dd\n"
+" dyndep = dd\n"
+ ));
+ fs_.Create("in", "");
+ fs_.Create("dd-in",
+"ninja_dyndep_version = 1\n"
+"build out | out.imp: dyndep\n"
+);
+ fs_.Tick();
+ fs_.Create("out", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("", err);
+
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
+ EXPECT_EQ("touch out out.imp", command_runner_.commands_ran_[1]);
+}
+
+TEST_F(BuildTest, DyndepBuildDiscoverNewOutputWithMultipleRules1) {
+ // Verify that a dyndep file can be built and loaded to discover
+ // a new output of an edge that is already the output of another edge.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out $out.imp\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd: cp dd-in\n"
+"build out1 | out-twice.imp: touch in\n"
+"build out2: touch in || dd\n"
+" dyndep = dd\n"
+ ));
+ fs_.Create("in", "");
+ fs_.Create("dd-in",
+"ninja_dyndep_version = 1\n"
+"build out2 | out-twice.imp: dyndep\n"
+);
+ fs_.Tick();
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ EXPECT_EQ("", err);
+
+ EXPECT_FALSE(builder_.Build(&err));
+ EXPECT_EQ("multiple rules generate out-twice.imp", err);
+}
+
+TEST_F(BuildTest, DyndepBuildDiscoverNewOutputWithMultipleRules2) {
+ // Verify that a dyndep file can be built and loaded to discover
+ // a new output of an edge that is already the output of another
+ // edge also discovered by dyndep.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out $out.imp\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd1: cp dd1-in\n"
+"build out1: touch || dd1\n"
+" dyndep = dd1\n"
+"build dd2: cp dd2-in || dd1\n" // make order predictable for test
+"build out2: touch || dd2\n"
+" dyndep = dd2\n"
+));
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+ fs_.Create("dd1-in",
+"ninja_dyndep_version = 1\n"
+"build out1 | out-twice.imp: dyndep\n"
+);
+ fs_.Create("dd2-in", "");
+ fs_.Create("dd2",
+"ninja_dyndep_version = 1\n"
+"build out2 | out-twice.imp: dyndep\n"
+);
+ fs_.Tick();
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ EXPECT_EQ("", err);
+
+ EXPECT_FALSE(builder_.Build(&err));
+ EXPECT_EQ("multiple rules generate out-twice.imp", err);
+}
+
+TEST_F(BuildTest, DyndepBuildDiscoverNewInput) {
+ // Verify that a dyndep file can be built and loaded to discover
+ // a new input to an edge.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd: cp dd-in\n"
+"build in: touch\n"
+"build out: touch || dd\n"
+" dyndep = dd\n"
+ ));
+ fs_.Create("dd-in",
+"ninja_dyndep_version = 1\n"
+"build out: dyndep | in\n"
+);
+ fs_.Tick();
+ fs_.Create("out", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("", err);
+
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
+ EXPECT_EQ("touch in", command_runner_.commands_ran_[1]);
+ EXPECT_EQ("touch out", command_runner_.commands_ran_[2]);
+}
+
+TEST_F(BuildTest, DyndepBuildDiscoverImplicitConnection) {
+ // Verify that a dyndep file can be built and loaded to discover
+ // that one edge has an implicit output that is also an implicit
+ // input of another edge.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out $out.imp\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd: cp dd-in\n"
+"build tmp: touch || dd\n"
+" dyndep = dd\n"
+"build out: touch || dd\n"
+" dyndep = dd\n"
+));
+ fs_.Create("dd-in",
+"ninja_dyndep_version = 1\n"
+"build out | out.imp: dyndep | tmp.imp\n"
+"build tmp | tmp.imp: dyndep\n"
+);
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
+ EXPECT_EQ("touch tmp tmp.imp", command_runner_.commands_ran_[1]);
+ EXPECT_EQ("touch out out.imp", command_runner_.commands_ran_[2]);
+}
+
+TEST_F(BuildTest, DyndepBuildDiscoverNowWantEdge) {
+ // Verify that a dyndep file can be built and loaded to discover
+ // that an edge is actually wanted due to a missing implicit output.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out $out.imp\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd: cp dd-in\n"
+"build tmp: touch || dd\n"
+" dyndep = dd\n"
+"build out: touch tmp || dd\n"
+" dyndep = dd\n"
+));
+ fs_.Create("tmp", "");
+ fs_.Create("out", "");
+ fs_.Create("dd-in",
+"ninja_dyndep_version = 1\n"
+"build out: dyndep\n"
+"build tmp | tmp.imp: dyndep\n"
+);
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
+ EXPECT_EQ("touch tmp tmp.imp", command_runner_.commands_ran_[1]);
+ EXPECT_EQ("touch out out.imp", command_runner_.commands_ran_[2]);
+}
+
+TEST_F(BuildTest, DyndepBuildDiscoverNowWantEdgeAndDependent) {
+ // Verify that a dyndep file can be built and loaded to discover
+ // that an edge and a dependent are actually wanted.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out $out.imp\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd: cp dd-in\n"
+"build tmp: touch || dd\n"
+" dyndep = dd\n"
+"build out: touch tmp\n"
+));
+ fs_.Create("tmp", "");
+ fs_.Create("out", "");
+ fs_.Create("dd-in",
+"ninja_dyndep_version = 1\n"
+"build tmp | tmp.imp: dyndep\n"
+);
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
+ EXPECT_EQ("touch tmp tmp.imp", command_runner_.commands_ran_[1]);
+ EXPECT_EQ("touch out out.imp", command_runner_.commands_ran_[2]);
+}
+
+TEST_F(BuildTest, DyndepBuildDiscoverCircular) {
+ // Verify that a dyndep file can be built and loaded to discover
+ // and reject a circular dependency.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd: cp dd-in\n"
+"build out: r in || dd\n"
+" depfile = out.d\n"
+" dyndep = dd\n"
+"build in: r || dd\n"
+" dyndep = dd\n"
+ ));
+ fs_.Create("out.d", "out: inimp\n");
+ fs_.Create("dd-in",
+"ninja_dyndep_version = 1\n"
+"build out | circ: dyndep\n"
+"build in: dyndep | circ\n"
+ );
+ fs_.Create("out", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("", err);
+
+ EXPECT_FALSE(builder_.Build(&err));
+ // Depending on how the pointers in Plan::ready_ work out, we could have
+ // discovered the cycle from either starting point.
+ EXPECT_TRUE(err == "dependency cycle: circ -> in -> circ" ||
+ err == "dependency cycle: in -> circ -> in");
+}
+
+TEST_F(BuildWithLogTest, DyndepBuildDiscoverRestat) {
+ // Verify that a dyndep file can be built and loaded to discover
+ // that an edge has a restat binding.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule true\n"
+" command = true\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd: cp dd-in\n"
+"build out1: true in || dd\n"
+" dyndep = dd\n"
+"build out2: cat out1\n"));
+
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+ fs_.Create("dd-in",
+"ninja_dyndep_version = 1\n"
+"build out1: dyndep\n"
+" restat = 1\n"
+);
+ fs_.Tick();
+ fs_.Create("in", "");
+
+ // Do a pre-build so that there's commands in the log for the outputs,
+ // otherwise, the lack of an entry in the build log will cause "out2" to
+ // rebuild regardless of restat.
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]);
+ EXPECT_EQ("true", command_runner_.commands_ran_[1]);
+ EXPECT_EQ("cat out1 > out2", command_runner_.commands_ran_[2]);
+
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ fs_.Tick();
+ fs_.Create("in", "");
+
+ // We touched "in", so we should build "out1". But because "true" does not
+ // touch "out1", we should cancel the build of "out2".
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("true", command_runner_.commands_ran_[0]);
+}
+
+TEST_F(BuildTest, DyndepBuildDiscoverScheduledEdge) {
+ // Verify that a dyndep file can be built and loaded to discover a
+ // new input that itself is an output from an edge that has already
+ // been scheduled but not finished. We should not re-schedule it.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out $out.imp\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build out1 | out1.imp: touch\n"
+"build zdd: cp zdd-in\n"
+" verify_active_edge = out1\n" // verify out1 is active when zdd is finished
+"build out2: cp out1 || zdd\n"
+" dyndep = zdd\n"
+));
+ fs_.Create("zdd-in",
+"ninja_dyndep_version = 1\n"
+"build out2: dyndep | out1.imp\n"
+);
+
+ // Enable concurrent builds so that we can load the dyndep file
+ // while another edge is still active.
+ command_runner_.max_active_edges_ = 2;
+
+ // During the build "out1" and "zdd" should be built concurrently.
+ // The fake command runner will finish these in reverse order
+ // of the names of the first outputs, so "zdd" will finish first
+ // and we will load the dyndep file while the edge for "out1" is
+ // still active. This will add a new dependency on "out1.imp",
+ // also produced by the active edge. The builder should not
+ // re-schedule the already-active edge.
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ // Depending on how the pointers in Plan::ready_ work out, the first
+ // two commands may have run in either order.
+ EXPECT_TRUE((command_runner_.commands_ran_[0] == "touch out1 out1.imp" &&
+ command_runner_.commands_ran_[1] == "cp zdd-in zdd") ||
+ (command_runner_.commands_ran_[1] == "touch out1 out1.imp" &&
+ command_runner_.commands_ran_[0] == "cp zdd-in zdd"));
+ EXPECT_EQ("cp out1 out2", command_runner_.commands_ran_[2]);
+}
+
+TEST_F(BuildTest, DyndepTwoLevelDirect) {
+ // Verify that a clean dyndep file can depend on a dirty dyndep file
+ // and be loaded properly after the dirty one is built and loaded.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out $out.imp\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd1: cp dd1-in\n"
+"build out1 | out1.imp: touch || dd1\n"
+" dyndep = dd1\n"
+"build dd2: cp dd2-in || dd1\n" // direct order-only dep on dd1
+"build out2: touch || dd2\n"
+" dyndep = dd2\n"
+));
+ fs_.Create("out1.imp", "");
+ fs_.Create("out2", "");
+ fs_.Create("out2.imp", "");
+ fs_.Create("dd1-in",
+"ninja_dyndep_version = 1\n"
+"build out1: dyndep\n"
+);
+ fs_.Create("dd2-in", "");
+ fs_.Create("dd2",
+"ninja_dyndep_version = 1\n"
+"build out2 | out2.imp: dyndep | out1.imp\n"
+);
+
+ // During the build dd1 should be built and loaded. The RecomputeDirty
+ // called as a result of loading dd1 should not cause dd2 to be loaded
+ // because the builder will never get a chance to update the build plan
+ // to account for dd2. Instead dd2 should only be later loaded once the
+ // builder recognizes that it is now ready (as its order-only dependency
+ // on dd1 has been satisfied). This test case verifies that each dyndep
+ // file is loaded to update the build graph independently.
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cp dd1-in dd1", command_runner_.commands_ran_[0]);
+ EXPECT_EQ("touch out1 out1.imp", command_runner_.commands_ran_[1]);
+ EXPECT_EQ("touch out2 out2.imp", command_runner_.commands_ran_[2]);
+}
+
+TEST_F(BuildTest, DyndepTwoLevelIndirect) {
+ // Verify that dyndep files can add to an edge new implicit inputs that
+ // correspond to implicit outputs added to other edges by other dyndep
+ // files on which they (order-only) depend.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out $out.imp\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd1: cp dd1-in\n"
+"build out1: touch || dd1\n"
+" dyndep = dd1\n"
+"build dd2: cp dd2-in || out1\n" // indirect order-only dep on dd1
+"build out2: touch || dd2\n"
+" dyndep = dd2\n"
+));
+ fs_.Create("out1.imp", "");
+ fs_.Create("out2", "");
+ fs_.Create("out2.imp", "");
+ fs_.Create("dd1-in",
+"ninja_dyndep_version = 1\n"
+"build out1 | out1.imp: dyndep\n"
+);
+ fs_.Create("dd2-in", "");
+ fs_.Create("dd2",
+"ninja_dyndep_version = 1\n"
+"build out2 | out2.imp: dyndep | out1.imp\n"
+);
+
+ // During the build dd1 should be built and loaded. Then dd2 should
+ // be built and loaded. Loading dd2 should cause the builder to
+ // recognize that out2 needs to be built even though it was originally
+ // clean without dyndep info.
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cp dd1-in dd1", command_runner_.commands_ran_[0]);
+ EXPECT_EQ("touch out1 out1.imp", command_runner_.commands_ran_[1]);
+ EXPECT_EQ("touch out2 out2.imp", command_runner_.commands_ran_[2]);
+}
+
+TEST_F(BuildTest, DyndepTwoLevelDiscoveredReady) {
+ // Verify that a dyndep file can discover a new input whose
+ // edge also has a dyndep file that is ready to load immediately.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd0: cp dd0-in\n"
+"build dd1: cp dd1-in\n"
+"build in: touch\n"
+"build tmp: touch || dd0\n"
+" dyndep = dd0\n"
+"build out: touch || dd1\n"
+" dyndep = dd1\n"
+ ));
+ fs_.Create("dd1-in",
+"ninja_dyndep_version = 1\n"
+"build out: dyndep | tmp\n"
+);
+ fs_.Create("dd0-in", "");
+ fs_.Create("dd0",
+"ninja_dyndep_version = 1\n"
+"build tmp: dyndep | in\n"
+);
+ fs_.Tick();
+ fs_.Create("out", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("", err);
+
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(4u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cp dd1-in dd1", command_runner_.commands_ran_[0]);
+ EXPECT_EQ("touch in", command_runner_.commands_ran_[1]);
+ EXPECT_EQ("touch tmp", command_runner_.commands_ran_[2]);
+ EXPECT_EQ("touch out", command_runner_.commands_ran_[3]);
+}
+
+TEST_F(BuildTest, DyndepTwoLevelDiscoveredDirty) {
+ // Verify that a dyndep file can discover a new input whose
+ // edge also has a dyndep file that needs to be built.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out\n"
+"rule cp\n"
+" command = cp $in $out\n"
+"build dd0: cp dd0-in\n"
+"build dd1: cp dd1-in\n"
+"build in: touch\n"
+"build tmp: touch || dd0\n"
+" dyndep = dd0\n"
+"build out: touch || dd1\n"
+" dyndep = dd1\n"
+ ));
+ fs_.Create("dd1-in",
+"ninja_dyndep_version = 1\n"
+"build out: dyndep | tmp\n"
+);
+ fs_.Create("dd0-in",
+"ninja_dyndep_version = 1\n"
+"build tmp: dyndep | in\n"
+);
+ fs_.Tick();
+ fs_.Create("out", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("", err);
+
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(5u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cp dd1-in dd1", command_runner_.commands_ran_[0]);
+ EXPECT_EQ("cp dd0-in dd0", command_runner_.commands_ran_[1]);
+ EXPECT_EQ("touch in", command_runner_.commands_ran_[2]);
+ EXPECT_EQ("touch tmp", command_runner_.commands_ran_[3]);
+ EXPECT_EQ("touch out", command_runner_.commands_ran_[4]);
+}
diff --git a/src/canon_perftest.cc b/src/canon_perftest.cc
new file mode 100644
index 0000000..088bd45
--- /dev/null
+++ b/src/canon_perftest.cc
@@ -0,0 +1,59 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <string.h>
+
+#include "util.h"
+#include "metrics.h"
+
+using namespace std;
+
+const char kPath[] =
+ "../../third_party/WebKit/Source/WebCore/"
+ "platform/leveldb/LevelDBWriteBatch.cpp";
+
+int main() {
+ vector<int> times;
+ string err;
+
+ char buf[200];
+ size_t len = strlen(kPath);
+ strcpy(buf, kPath);
+
+ for (int j = 0; j < 5; ++j) {
+ const int kNumRepetitions = 2000000;
+ int64_t start = GetTimeMillis();
+ uint64_t slash_bits;
+ for (int i = 0; i < kNumRepetitions; ++i) {
+ CanonicalizePath(buf, &len, &slash_bits, &err);
+ }
+ int delta = (int)(GetTimeMillis() - start);
+ times.push_back(delta);
+ }
+
+ int min = times[0];
+ int max = times[0];
+ float total = 0;
+ for (size_t i = 0; i < times.size(); ++i) {
+ total += times[i];
+ if (times[i] < min)
+ min = times[i];
+ else if (times[i] > max)
+ max = times[i];
+ }
+
+ printf("min %dms max %dms avg %.1fms\n",
+ min, max, total / times.size());
+}
diff --git a/src/clean.cc b/src/clean.cc
new file mode 100644
index 0000000..3e57437
--- /dev/null
+++ b/src/clean.cc
@@ -0,0 +1,293 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "clean.h"
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "disk_interface.h"
+#include "graph.h"
+#include "state.h"
+#include "util.h"
+
+using namespace std;
+
+Cleaner::Cleaner(State* state,
+ const BuildConfig& config,
+ DiskInterface* disk_interface)
+ : state_(state),
+ config_(config),
+ dyndep_loader_(state, disk_interface),
+ cleaned_files_count_(0),
+ disk_interface_(disk_interface),
+ status_(0) {
+}
+
+int Cleaner::RemoveFile(const string& path) {
+ return disk_interface_->RemoveFile(path);
+}
+
+bool Cleaner::FileExists(const string& path) {
+ string err;
+ TimeStamp mtime = disk_interface_->Stat(path, &err);
+ if (mtime == -1)
+ Error("%s", err.c_str());
+ return mtime > 0; // Treat Stat() errors as "file does not exist".
+}
+
+void Cleaner::Report(const string& path) {
+ ++cleaned_files_count_;
+ if (IsVerbose())
+ printf("Remove %s\n", path.c_str());
+}
+
+void Cleaner::Remove(const string& path) {
+ if (!IsAlreadyRemoved(path)) {
+ removed_.insert(path);
+ if (config_.dry_run) {
+ if (FileExists(path))
+ Report(path);
+ } else {
+ int ret = RemoveFile(path);
+ if (ret == 0)
+ Report(path);
+ else if (ret == -1)
+ status_ = 1;
+ }
+ }
+}
+
+bool Cleaner::IsAlreadyRemoved(const string& path) {
+ set<string>::iterator i = removed_.find(path);
+ return (i != removed_.end());
+}
+
+void Cleaner::RemoveEdgeFiles(Edge* edge) {
+ string depfile = edge->GetUnescapedDepfile();
+ if (!depfile.empty())
+ Remove(depfile);
+
+ string rspfile = edge->GetUnescapedRspfile();
+ if (!rspfile.empty())
+ Remove(rspfile);
+}
+
+void Cleaner::PrintHeader() {
+ if (config_.verbosity == BuildConfig::QUIET)
+ return;
+ printf("Cleaning...");
+ if (IsVerbose())
+ printf("\n");
+ else
+ printf(" ");
+ fflush(stdout);
+}
+
+void Cleaner::PrintFooter() {
+ if (config_.verbosity == BuildConfig::QUIET)
+ return;
+ printf("%d files.\n", cleaned_files_count_);
+}
+
+int Cleaner::CleanAll(bool generator) {
+ Reset();
+ PrintHeader();
+ LoadDyndeps();
+ for (vector<Edge*>::iterator e = state_->edges_.begin();
+ e != state_->edges_.end(); ++e) {
+ // Do not try to remove phony targets
+ if ((*e)->is_phony())
+ continue;
+ // Do not remove generator's files unless generator specified.
+ if (!generator && (*e)->GetBindingBool("generator"))
+ continue;
+ for (vector<Node*>::iterator out_node = (*e)->outputs_.begin();
+ out_node != (*e)->outputs_.end(); ++out_node) {
+ Remove((*out_node)->path());
+ }
+
+ RemoveEdgeFiles(*e);
+ }
+ PrintFooter();
+ return status_;
+}
+
+int Cleaner::CleanDead(const BuildLog::Entries& entries) {
+ Reset();
+ PrintHeader();
+ for (BuildLog::Entries::const_iterator i = entries.begin(); i != entries.end(); ++i) {
+ Node* n = state_->LookupNode(i->first);
+ if (!n || !n->in_edge()) {
+ Remove(i->first.AsString());
+ }
+ }
+ PrintFooter();
+ return status_;
+}
+
+void Cleaner::DoCleanTarget(Node* target) {
+ if (Edge* e = target->in_edge()) {
+ // Do not try to remove phony targets
+ if (!e->is_phony()) {
+ Remove(target->path());
+ RemoveEdgeFiles(e);
+ }
+ for (vector<Node*>::iterator n = e->inputs_.begin(); n != e->inputs_.end();
+ ++n) {
+ Node* next = *n;
+ // call DoCleanTarget recursively if this node has not been visited
+ if (cleaned_.count(next) == 0) {
+ DoCleanTarget(next);
+ }
+ }
+ }
+
+ // mark this target to be cleaned already
+ cleaned_.insert(target);
+}
+
+int Cleaner::CleanTarget(Node* target) {
+ assert(target);
+
+ Reset();
+ PrintHeader();
+ LoadDyndeps();
+ DoCleanTarget(target);
+ PrintFooter();
+ return status_;
+}
+
+int Cleaner::CleanTarget(const char* target) {
+ assert(target);
+
+ Reset();
+ Node* node = state_->LookupNode(target);
+ if (node) {
+ CleanTarget(node);
+ } else {
+ Error("unknown target '%s'", target);
+ status_ = 1;
+ }
+ return status_;
+}
+
+int Cleaner::CleanTargets(int target_count, char* targets[]) {
+ Reset();
+ PrintHeader();
+ LoadDyndeps();
+ for (int i = 0; i < target_count; ++i) {
+ string target_name = targets[i];
+ uint64_t slash_bits;
+ string err;
+ if (!CanonicalizePath(&target_name, &slash_bits, &err)) {
+ Error("failed to canonicalize '%s': %s", target_name.c_str(), err.c_str());
+ status_ = 1;
+ } else {
+ Node* target = state_->LookupNode(target_name);
+ if (target) {
+ if (IsVerbose())
+ printf("Target %s\n", target_name.c_str());
+ DoCleanTarget(target);
+ } else {
+ Error("unknown target '%s'", target_name.c_str());
+ status_ = 1;
+ }
+ }
+ }
+ PrintFooter();
+ return status_;
+}
+
+void Cleaner::DoCleanRule(const Rule* rule) {
+ assert(rule);
+
+ for (vector<Edge*>::iterator e = state_->edges_.begin();
+ e != state_->edges_.end(); ++e) {
+ if ((*e)->rule().name() == rule->name()) {
+ for (vector<Node*>::iterator out_node = (*e)->outputs_.begin();
+ out_node != (*e)->outputs_.end(); ++out_node) {
+ Remove((*out_node)->path());
+ RemoveEdgeFiles(*e);
+ }
+ }
+ }
+}
+
+int Cleaner::CleanRule(const Rule* rule) {
+ assert(rule);
+
+ Reset();
+ PrintHeader();
+ LoadDyndeps();
+ DoCleanRule(rule);
+ PrintFooter();
+ return status_;
+}
+
+int Cleaner::CleanRule(const char* rule) {
+ assert(rule);
+
+ Reset();
+ const Rule* r = state_->bindings_.LookupRule(rule);
+ if (r) {
+ CleanRule(r);
+ } else {
+ Error("unknown rule '%s'", rule);
+ status_ = 1;
+ }
+ return status_;
+}
+
+int Cleaner::CleanRules(int rule_count, char* rules[]) {
+ assert(rules);
+
+ Reset();
+ PrintHeader();
+ LoadDyndeps();
+ for (int i = 0; i < rule_count; ++i) {
+ const char* rule_name = rules[i];
+ const Rule* rule = state_->bindings_.LookupRule(rule_name);
+ if (rule) {
+ if (IsVerbose())
+ printf("Rule %s\n", rule_name);
+ DoCleanRule(rule);
+ } else {
+ Error("unknown rule '%s'", rule_name);
+ status_ = 1;
+ }
+ }
+ PrintFooter();
+ return status_;
+}
+
+void Cleaner::Reset() {
+ status_ = 0;
+ cleaned_files_count_ = 0;
+ removed_.clear();
+ cleaned_.clear();
+}
+
+void Cleaner::LoadDyndeps() {
+ // Load dyndep files that exist, before they are cleaned.
+ for (vector<Edge*>::iterator e = state_->edges_.begin();
+ e != state_->edges_.end(); ++e) {
+ if (Node* dyndep = (*e)->dyndep_) {
+ // Capture and ignore errors loading the dyndep file.
+ // We clean as much of the graph as we know.
+ std::string err;
+ dyndep_loader_.LoadDyndeps(dyndep, &err);
+ }
+ }
+}
diff --git a/src/clean.h b/src/clean.h
new file mode 100644
index 0000000..cf3f1c3
--- /dev/null
+++ b/src/clean.h
@@ -0,0 +1,111 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_CLEAN_H_
+#define NINJA_CLEAN_H_
+
+#include <set>
+#include <string>
+
+#include "build.h"
+#include "dyndep.h"
+#include "build_log.h"
+
+struct State;
+struct Node;
+struct Rule;
+struct DiskInterface;
+
+struct Cleaner {
+ /// Build a cleaner object with the given @a disk_interface
+ Cleaner(State* state,
+ const BuildConfig& config,
+ DiskInterface* disk_interface);
+
+ /// Clean the given @a target and all the file built for it.
+ /// @return non-zero if an error occurs.
+ int CleanTarget(Node* target);
+ /// Clean the given target @a target.
+ /// @return non-zero if an error occurs.
+ int CleanTarget(const char* target);
+ /// Clean the given target @a targets.
+ /// @return non-zero if an error occurs.
+ int CleanTargets(int target_count, char* targets[]);
+
+ /// Clean all built files, except for files created by generator rules.
+ /// @param generator If set, also clean files created by generator rules.
+ /// @return non-zero if an error occurs.
+ int CleanAll(bool generator = false);
+
+ /// Clean all the file built with the given rule @a rule.
+ /// @return non-zero if an error occurs.
+ int CleanRule(const Rule* rule);
+ /// Clean the file produced by the given @a rule.
+ /// @return non-zero if an error occurs.
+ int CleanRule(const char* rule);
+ /// Clean the file produced by the given @a rules.
+ /// @return non-zero if an error occurs.
+ int CleanRules(int rule_count, char* rules[]);
+ /// Clean the files produced by previous builds that are no longer in the
+ /// manifest.
+ /// @return non-zero if an error occurs.
+ int CleanDead(const BuildLog::Entries& entries);
+
+ /// @return the number of file cleaned.
+ int cleaned_files_count() const {
+ return cleaned_files_count_;
+ }
+
+ /// @return whether the cleaner is in verbose mode.
+ bool IsVerbose() const {
+ return (config_.verbosity != BuildConfig::QUIET
+ && (config_.verbosity == BuildConfig::VERBOSE || config_.dry_run));
+ }
+
+ private:
+ /// Remove the file @a path.
+ /// @return whether the file has been removed.
+ int RemoveFile(const std::string& path);
+ /// @returns whether the file @a path exists.
+ bool FileExists(const std::string& path);
+ void Report(const std::string& path);
+
+ /// Remove the given @a path file only if it has not been already removed.
+ void Remove(const std::string& path);
+ /// @return whether the given @a path has already been removed.
+ bool IsAlreadyRemoved(const std::string& path);
+ /// Remove the depfile and rspfile for an Edge.
+ void RemoveEdgeFiles(Edge* edge);
+
+ /// Helper recursive method for CleanTarget().
+ void DoCleanTarget(Node* target);
+ void PrintHeader();
+ void PrintFooter();
+ void DoCleanRule(const Rule* rule);
+ void Reset();
+
+ /// Load dependencies from dyndep bindings.
+ void LoadDyndeps();
+
+ State* state_;
+ const BuildConfig& config_;
+ DyndepLoader dyndep_loader_;
+ std::set<std::string> removed_;
+ std::set<Node*> cleaned_;
+ int cleaned_files_count_;
+ DiskInterface* disk_interface_;
+ int status_;
+};
+
+#endif // NINJA_CLEAN_H_
diff --git a/src/clean_test.cc b/src/clean_test.cc
new file mode 100644
index 0000000..1b843a2
--- /dev/null
+++ b/src/clean_test.cc
@@ -0,0 +1,540 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "clean.h"
+#include "build.h"
+
+#include "util.h"
+#include "test.h"
+
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+
+using namespace std;
+
+namespace {
+
+const char kTestFilename[] = "CleanTest-tempfile";
+
+struct CleanTest : public StateTestWithBuiltinRules {
+ VirtualFileSystem fs_;
+ BuildConfig config_;
+ virtual void SetUp() {
+ config_.verbosity = BuildConfig::QUIET;
+ }
+};
+
+TEST_F(CleanTest, CleanAll) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build in1: cat src1\n"
+"build out1: cat in1\n"
+"build in2: cat src2\n"
+"build out2: cat in2\n"));
+ fs_.Create("in1", "");
+ fs_.Create("out1", "");
+ fs_.Create("in2", "");
+ fs_.Create("out2", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(4, cleaner.cleaned_files_count());
+ EXPECT_EQ(4u, fs_.files_removed_.size());
+
+ // Check they are removed.
+ string err;
+ EXPECT_EQ(0, fs_.Stat("in1", &err));
+ EXPECT_EQ(0, fs_.Stat("out1", &err));
+ EXPECT_EQ(0, fs_.Stat("in2", &err));
+ EXPECT_EQ(0, fs_.Stat("out2", &err));
+ fs_.files_removed_.clear();
+
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(0, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanAllDryRun) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build in1: cat src1\n"
+"build out1: cat in1\n"
+"build in2: cat src2\n"
+"build out2: cat in2\n"));
+ fs_.Create("in1", "");
+ fs_.Create("out1", "");
+ fs_.Create("in2", "");
+ fs_.Create("out2", "");
+
+ config_.dry_run = true;
+ Cleaner cleaner(&state_, config_, &fs_);
+
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(4, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+
+ // Check they are not removed.
+ string err;
+ EXPECT_LT(0, fs_.Stat("in1", &err));
+ EXPECT_LT(0, fs_.Stat("out1", &err));
+ EXPECT_LT(0, fs_.Stat("in2", &err));
+ EXPECT_LT(0, fs_.Stat("out2", &err));
+ fs_.files_removed_.clear();
+
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(4, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanTarget) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build in1: cat src1\n"
+"build out1: cat in1\n"
+"build in2: cat src2\n"
+"build out2: cat in2\n"));
+ fs_.Create("in1", "");
+ fs_.Create("out1", "");
+ fs_.Create("in2", "");
+ fs_.Create("out2", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ ASSERT_EQ(0, cleaner.CleanTarget("out1"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+
+ // Check they are removed.
+ string err;
+ EXPECT_EQ(0, fs_.Stat("in1", &err));
+ EXPECT_EQ(0, fs_.Stat("out1", &err));
+ EXPECT_LT(0, fs_.Stat("in2", &err));
+ EXPECT_LT(0, fs_.Stat("out2", &err));
+ fs_.files_removed_.clear();
+
+ ASSERT_EQ(0, cleaner.CleanTarget("out1"));
+ EXPECT_EQ(0, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanTargetDryRun) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build in1: cat src1\n"
+"build out1: cat in1\n"
+"build in2: cat src2\n"
+"build out2: cat in2\n"));
+ fs_.Create("in1", "");
+ fs_.Create("out1", "");
+ fs_.Create("in2", "");
+ fs_.Create("out2", "");
+
+ config_.dry_run = true;
+ Cleaner cleaner(&state_, config_, &fs_);
+
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ ASSERT_EQ(0, cleaner.CleanTarget("out1"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+
+ // Check they are not removed.
+ string err;
+ EXPECT_LT(0, fs_.Stat("in1", &err));
+ EXPECT_LT(0, fs_.Stat("out1", &err));
+ EXPECT_LT(0, fs_.Stat("in2", &err));
+ EXPECT_LT(0, fs_.Stat("out2", &err));
+ fs_.files_removed_.clear();
+
+ ASSERT_EQ(0, cleaner.CleanTarget("out1"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanRule) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cat_e\n"
+" command = cat -e $in > $out\n"
+"build in1: cat_e src1\n"
+"build out1: cat in1\n"
+"build in2: cat_e src2\n"
+"build out2: cat in2\n"));
+ fs_.Create("in1", "");
+ fs_.Create("out1", "");
+ fs_.Create("in2", "");
+ fs_.Create("out2", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+
+ // Check they are removed.
+ string err;
+ EXPECT_EQ(0, fs_.Stat("in1", &err));
+ EXPECT_LT(0, fs_.Stat("out1", &err));
+ EXPECT_EQ(0, fs_.Stat("in2", &err));
+ EXPECT_LT(0, fs_.Stat("out2", &err));
+ fs_.files_removed_.clear();
+
+ ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
+ EXPECT_EQ(0, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanRuleDryRun) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cat_e\n"
+" command = cat -e $in > $out\n"
+"build in1: cat_e src1\n"
+"build out1: cat in1\n"
+"build in2: cat_e src2\n"
+"build out2: cat in2\n"));
+ fs_.Create("in1", "");
+ fs_.Create("out1", "");
+ fs_.Create("in2", "");
+ fs_.Create("out2", "");
+
+ config_.dry_run = true;
+ Cleaner cleaner(&state_, config_, &fs_);
+
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+
+ // Check they are not removed.
+ string err;
+ EXPECT_LT(0, fs_.Stat("in1", &err));
+ EXPECT_LT(0, fs_.Stat("out1", &err));
+ EXPECT_LT(0, fs_.Stat("in2", &err));
+ EXPECT_LT(0, fs_.Stat("out2", &err));
+ fs_.files_removed_.clear();
+
+ ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanRuleGenerator) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule regen\n"
+" command = cat $in > $out\n"
+" generator = 1\n"
+"build out1: cat in1\n"
+"build out2: regen in2\n"));
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(1, cleaner.cleaned_files_count());
+ EXPECT_EQ(1u, fs_.files_removed_.size());
+
+ fs_.Create("out1", "");
+
+ EXPECT_EQ(0, cleaner.CleanAll(/*generator=*/true));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanDepFile) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n"
+" command = cc $in > $out\n"
+" depfile = $out.d\n"
+"build out1: cc in1\n"));
+ fs_.Create("out1", "");
+ fs_.Create("out1.d", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanDepFileOnCleanTarget) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n"
+" command = cc $in > $out\n"
+" depfile = $out.d\n"
+"build out1: cc in1\n"));
+ fs_.Create("out1", "");
+ fs_.Create("out1.d", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_EQ(0, cleaner.CleanTarget("out1"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanDepFileOnCleanRule) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n"
+" command = cc $in > $out\n"
+" depfile = $out.d\n"
+"build out1: cc in1\n"));
+ fs_.Create("out1", "");
+ fs_.Create("out1.d", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_EQ(0, cleaner.CleanRule("cc"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanDyndep) {
+ // Verify that a dyndep file can be loaded to discover a new output
+ // to be cleaned.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat in || dd\n"
+" dyndep = dd\n"
+ ));
+ fs_.Create("in", "");
+ fs_.Create("dd",
+"ninja_dyndep_version = 1\n"
+"build out | out.imp: dyndep\n"
+);
+ fs_.Create("out", "");
+ fs_.Create("out.imp", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+
+ string err;
+ EXPECT_EQ(0, fs_.Stat("out", &err));
+ EXPECT_EQ(0, fs_.Stat("out.imp", &err));
+}
+
+TEST_F(CleanTest, CleanDyndepMissing) {
+ // Verify that a missing dyndep file is tolerated.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat in || dd\n"
+" dyndep = dd\n"
+ ));
+ fs_.Create("in", "");
+ fs_.Create("out", "");
+ fs_.Create("out.imp", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(1, cleaner.cleaned_files_count());
+ EXPECT_EQ(1u, fs_.files_removed_.size());
+
+ string err;
+ EXPECT_EQ(0, fs_.Stat("out", &err));
+ EXPECT_EQ(1, fs_.Stat("out.imp", &err));
+}
+
+TEST_F(CleanTest, CleanRspFile) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n"
+" command = cc $in > $out\n"
+" rspfile = $rspfile\n"
+" rspfile_content=$in\n"
+"build out1: cc in1\n"
+" rspfile = cc1.rsp\n"));
+ fs_.Create("out1", "");
+ fs_.Create("cc1.rsp", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanRsp) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cat_rsp \n"
+" command = cat $rspfile > $out\n"
+" rspfile = $rspfile\n"
+" rspfile_content = $in\n"
+"build in1: cat src1\n"
+"build out1: cat in1\n"
+"build in2: cat_rsp src2\n"
+" rspfile=in2.rsp\n"
+"build out2: cat_rsp in2\n"
+" rspfile=out2.rsp\n"
+));
+ fs_.Create("in1", "");
+ fs_.Create("out1", "");
+ fs_.Create("in2.rsp", "");
+ fs_.Create("out2.rsp", "");
+ fs_.Create("in2", "");
+ fs_.Create("out2", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ ASSERT_EQ(0, cleaner.CleanTarget("out1"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ ASSERT_EQ(0, cleaner.CleanTarget("in2"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ ASSERT_EQ(0, cleaner.CleanRule("cat_rsp"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+
+ EXPECT_EQ(6u, fs_.files_removed_.size());
+
+ // Check they are removed.
+ string err;
+ EXPECT_EQ(0, fs_.Stat("in1", &err));
+ EXPECT_EQ(0, fs_.Stat("out1", &err));
+ EXPECT_EQ(0, fs_.Stat("in2", &err));
+ EXPECT_EQ(0, fs_.Stat("out2", &err));
+ EXPECT_EQ(0, fs_.Stat("in2.rsp", &err));
+ EXPECT_EQ(0, fs_.Stat("out2.rsp", &err));
+}
+
+TEST_F(CleanTest, CleanFailure) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "build dir: cat src1\n"));
+ fs_.MakeDir("dir");
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_NE(0, cleaner.CleanAll());
+}
+
+TEST_F(CleanTest, CleanPhony) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build phony: phony t1 t2\n"
+"build t1: cat\n"
+"build t2: cat\n"));
+
+ fs_.Create("phony", "");
+ fs_.Create("t1", "");
+ fs_.Create("t2", "");
+
+ // Check that CleanAll does not remove "phony".
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_LT(0, fs_.Stat("phony", &err));
+
+ fs_.Create("t1", "");
+ fs_.Create("t2", "");
+
+ // Check that CleanTarget does not remove "phony".
+ EXPECT_EQ(0, cleaner.CleanTarget("phony"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_LT(0, fs_.Stat("phony", &err));
+}
+
+TEST_F(CleanTest, CleanDepFileAndRspFileWithSpaces) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc_dep\n"
+" command = cc $in > $out\n"
+" depfile = $out.d\n"
+"rule cc_rsp\n"
+" command = cc $in > $out\n"
+" rspfile = $out.rsp\n"
+" rspfile_content = $in\n"
+"build out$ 1: cc_dep in$ 1\n"
+"build out$ 2: cc_rsp in$ 1\n"
+));
+ fs_.Create("out 1", "");
+ fs_.Create("out 2", "");
+ fs_.Create("out 1.d", "");
+ fs_.Create("out 2.rsp", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(4, cleaner.cleaned_files_count());
+ EXPECT_EQ(4u, fs_.files_removed_.size());
+
+ string err;
+ EXPECT_EQ(0, fs_.Stat("out 1", &err));
+ EXPECT_EQ(0, fs_.Stat("out 2", &err));
+ EXPECT_EQ(0, fs_.Stat("out 1.d", &err));
+ EXPECT_EQ(0, fs_.Stat("out 2.rsp", &err));
+}
+
+struct CleanDeadTest : public CleanTest, public BuildLogUser{
+ virtual void SetUp() {
+ // In case a crashing test left a stale file behind.
+ unlink(kTestFilename);
+ CleanTest::SetUp();
+ }
+ virtual void TearDown() {
+ unlink(kTestFilename);
+ }
+ virtual bool IsPathDead(StringPiece) const { return false; }
+};
+
+TEST_F(CleanDeadTest, CleanDead) {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state,
+"rule cat\n"
+" command = cat $in > $out\n"
+"build out1: cat in\n"
+"build out2: cat in\n"
+));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out2: cat in\n"
+));
+ fs_.Create("in", "");
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+
+ BuildLog log1;
+ string err;
+ EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err));
+ ASSERT_EQ("", err);
+ log1.RecordCommand(state.edges_[0], 15, 18);
+ log1.RecordCommand(state.edges_[1], 20, 25);
+ log1.Close();
+
+ BuildLog log2;
+ EXPECT_TRUE(log2.Load(kTestFilename, &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(2u, log2.entries().size());
+ ASSERT_TRUE(log2.LookupByOutput("out1"));
+ ASSERT_TRUE(log2.LookupByOutput("out2"));
+
+ // First use the manifest that describe how to build out1.
+ Cleaner cleaner1(&state, config_, &fs_);
+ EXPECT_EQ(0, cleaner1.CleanDead(log2.entries()));
+ EXPECT_EQ(0, cleaner1.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+ EXPECT_NE(0, fs_.Stat("in", &err));
+ EXPECT_NE(0, fs_.Stat("out1", &err));
+ EXPECT_NE(0, fs_.Stat("out2", &err));
+
+ // Then use the manifest that does not build out1 anymore.
+ Cleaner cleaner2(&state_, config_, &fs_);
+ EXPECT_EQ(0, cleaner2.CleanDead(log2.entries()));
+ EXPECT_EQ(1, cleaner2.cleaned_files_count());
+ EXPECT_EQ(1u, fs_.files_removed_.size());
+ EXPECT_EQ("out1", *(fs_.files_removed_.begin()));
+ EXPECT_NE(0, fs_.Stat("in", &err));
+ EXPECT_EQ(0, fs_.Stat("out1", &err));
+ EXPECT_NE(0, fs_.Stat("out2", &err));
+
+ // Nothing to do now.
+ EXPECT_EQ(0, cleaner2.CleanDead(log2.entries()));
+ EXPECT_EQ(0, cleaner2.cleaned_files_count());
+ EXPECT_EQ(1u, fs_.files_removed_.size());
+ EXPECT_EQ("out1", *(fs_.files_removed_.begin()));
+ EXPECT_NE(0, fs_.Stat("in", &err));
+ EXPECT_EQ(0, fs_.Stat("out1", &err));
+ EXPECT_NE(0, fs_.Stat("out2", &err));
+ log2.Close();
+}
+} // anonymous namespace
diff --git a/src/clparser.cc b/src/clparser.cc
new file mode 100644
index 0000000..275641e
--- /dev/null
+++ b/src/clparser.cc
@@ -0,0 +1,128 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "clparser.h"
+
+#include <algorithm>
+#include <assert.h>
+#include <string.h>
+
+#include "metrics.h"
+#include "string_piece_util.h"
+
+#ifdef _WIN32
+#include "includes_normalize.h"
+#include "string_piece.h"
+#else
+#include "util.h"
+#endif
+
+using namespace std;
+
+namespace {
+
+/// Return true if \a input ends with \a needle.
+bool EndsWith(const string& input, const string& needle) {
+ return (input.size() >= needle.size() &&
+ input.substr(input.size() - needle.size()) == needle);
+}
+
+} // anonymous namespace
+
+// static
+string CLParser::FilterShowIncludes(const string& line,
+ const string& deps_prefix) {
+ const string kDepsPrefixEnglish = "Note: including file: ";
+ const char* in = line.c_str();
+ const char* end = in + line.size();
+ const string& prefix = deps_prefix.empty() ? kDepsPrefixEnglish : deps_prefix;
+ if (end - in > (int)prefix.size() &&
+ memcmp(in, prefix.c_str(), (int)prefix.size()) == 0) {
+ in += prefix.size();
+ while (*in == ' ')
+ ++in;
+ return line.substr(in - line.c_str());
+ }
+ return "";
+}
+
+// static
+bool CLParser::IsSystemInclude(string path) {
+ transform(path.begin(), path.end(), path.begin(), ToLowerASCII);
+ // TODO: this is a heuristic, perhaps there's a better way?
+ return (path.find("program files") != string::npos ||
+ path.find("microsoft visual studio") != string::npos);
+}
+
+// static
+bool CLParser::FilterInputFilename(string line) {
+ transform(line.begin(), line.end(), line.begin(), ToLowerASCII);
+ // TODO: other extensions, like .asm?
+ return EndsWith(line, ".c") ||
+ EndsWith(line, ".cc") ||
+ EndsWith(line, ".cxx") ||
+ EndsWith(line, ".cpp");
+}
+
+// static
+bool CLParser::Parse(const string& output, const string& deps_prefix,
+ string* filtered_output, string* err) {
+ METRIC_RECORD("CLParser::Parse");
+
+ // Loop over all lines in the output to process them.
+ assert(&output != filtered_output);
+ size_t start = 0;
+#ifdef _WIN32
+ IncludesNormalize normalizer(".");
+#endif
+
+ while (start < output.size()) {
+ size_t end = output.find_first_of("\r\n", start);
+ if (end == string::npos)
+ end = output.size();
+ string line = output.substr(start, end - start);
+
+ string include = FilterShowIncludes(line, deps_prefix);
+ if (!include.empty()) {
+ string normalized;
+#ifdef _WIN32
+ if (!normalizer.Normalize(include, &normalized, err))
+ return false;
+#else
+ // TODO: should this make the path relative to cwd?
+ normalized = include;
+ uint64_t slash_bits;
+ if (!CanonicalizePath(&normalized, &slash_bits, err))
+ return false;
+#endif
+ if (!IsSystemInclude(normalized))
+ includes_.insert(normalized);
+ } else if (FilterInputFilename(line)) {
+ // Drop it.
+ // TODO: if we support compiling multiple output files in a single
+ // cl.exe invocation, we should stash the filename.
+ } else {
+ filtered_output->append(line);
+ filtered_output->append("\n");
+ }
+
+ if (end < output.size() && output[end] == '\r')
+ ++end;
+ if (end < output.size() && output[end] == '\n')
+ ++end;
+ start = end;
+ }
+
+ return true;
+}
diff --git a/src/clparser.h b/src/clparser.h
new file mode 100644
index 0000000..2a33628
--- /dev/null
+++ b/src/clparser.h
@@ -0,0 +1,51 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_CLPARSER_H_
+#define NINJA_CLPARSER_H_
+
+#include <set>
+#include <string>
+
+/// Visual Studio's cl.exe requires some massaging to work with Ninja;
+/// for example, it emits include information on stderr in a funny
+/// format when building with /showIncludes. This class parses this
+/// output.
+struct CLParser {
+ /// Parse a line of cl.exe output and extract /showIncludes info.
+ /// If a dependency is extracted, returns a nonempty string.
+ /// Exposed for testing.
+ static std::string FilterShowIncludes(const std::string& line,
+ const std::string& deps_prefix);
+
+ /// Return true if a mentioned include file is a system path.
+ /// Filtering these out reduces dependency information considerably.
+ static bool IsSystemInclude(std::string path);
+
+ /// Parse a line of cl.exe output and return true if it looks like
+ /// it's printing an input filename. This is a heuristic but it appears
+ /// to be the best we can do.
+ /// Exposed for testing.
+ static bool FilterInputFilename(std::string line);
+
+ /// Parse the full output of cl, filling filtered_output with the text that
+ /// should be printed (if any). Returns true on success, or false with err
+ /// filled. output must not be the same object as filtered_object.
+ bool Parse(const std::string& output, const std::string& deps_prefix,
+ std::string* filtered_output, std::string* err);
+
+ std::set<std::string> includes_;
+};
+
+#endif // NINJA_CLPARSER_H_
diff --git a/src/clparser_perftest.cc b/src/clparser_perftest.cc
new file mode 100644
index 0000000..008ac46
--- /dev/null
+++ b/src/clparser_perftest.cc
@@ -0,0 +1,159 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "clparser.h"
+#include "metrics.h"
+
+using namespace std;
+
+int main(int argc, char* argv[]) {
+ // Output of /showIncludes from #include <iostream>
+ string perf_testdata =
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\iostream\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\istream\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ostream\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ios\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xlocnum\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\climits\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\yvals.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xkeycheck.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\crtdefs.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\sal.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ConcurrencySal.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vadefs.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\use_ansi.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\limits.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cmath\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\math.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xtgmath.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xtr1common\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstdlib\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\stdlib.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_malloc.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_search.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\stddef.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wstdlib.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstdio\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\stdio.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wstdio.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_stdio_config.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\streambuf\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xiosbase\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xlocale\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstring\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\string.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_memory.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_memcpy_s.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\errno.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_string.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wstring.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\stdexcept\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\exception\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\type_traits\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xstddef\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstddef\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\initializer_list\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\malloc.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_exception.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\eh.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_terminate.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xstring\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xmemory0\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstdint\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\stdint.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\limits\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ymath.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cfloat\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\float.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cwchar\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\wchar.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wconio.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wctype.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wdirect.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wio.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_share.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wprocess.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wtime.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\sys/stat.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\sys/types.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\new\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_new.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xutility\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\utility\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\iosfwd\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\crtdbg.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_new_debug.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xatomic0.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\intrin.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\setjmp.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\immintrin.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\wmmintrin.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\nmmintrin.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\smmintrin.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\tmmintrin.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\pmmintrin.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\emmintrin.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xmmintrin.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\mmintrin.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ammintrin.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\mm3dnow.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\typeinfo\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_typeinfo.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xlocinfo\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xlocinfo.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\ctype.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\locale.h\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xfacet\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\system_error\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cerrno\r\n"
+ "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\share.h\r\n";
+
+ for (int limit = 1 << 10; limit < (1<<20); limit *= 2) {
+ int64_t start = GetTimeMillis();
+ for (int rep = 0; rep < limit; ++rep) {
+ string output;
+ string err;
+
+ CLParser parser;
+ if (!parser.Parse(perf_testdata, "", &output, &err)) {
+ printf("%s\n", err.c_str());
+ return 1;
+ }
+ }
+ int64_t end = GetTimeMillis();
+
+ if (end - start > 2000) {
+ int delta_ms = (int)(end - start);
+ printf("Parse %d times in %dms avg %.1fus\n",
+ limit, delta_ms, float(delta_ms * 1000) / limit);
+ break;
+ }
+ }
+
+ return 0;
+}
diff --git a/src/clparser_test.cc b/src/clparser_test.cc
new file mode 100644
index 0000000..0b829c1
--- /dev/null
+++ b/src/clparser_test.cc
@@ -0,0 +1,119 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "clparser.h"
+
+#include "test.h"
+#include "util.h"
+
+using namespace std;
+
+TEST(CLParserTest, ShowIncludes) {
+ ASSERT_EQ("", CLParser::FilterShowIncludes("", ""));
+
+ ASSERT_EQ("", CLParser::FilterShowIncludes("Sample compiler output", ""));
+ ASSERT_EQ("c:\\Some Files\\foobar.h",
+ CLParser::FilterShowIncludes("Note: including file: "
+ "c:\\Some Files\\foobar.h", ""));
+ ASSERT_EQ("c:\\initspaces.h",
+ CLParser::FilterShowIncludes("Note: including file: "
+ "c:\\initspaces.h", ""));
+ ASSERT_EQ("c:\\initspaces.h",
+ CLParser::FilterShowIncludes("Non-default prefix: inc file: "
+ "c:\\initspaces.h",
+ "Non-default prefix: inc file:"));
+}
+
+TEST(CLParserTest, FilterInputFilename) {
+ ASSERT_TRUE(CLParser::FilterInputFilename("foobar.cc"));
+ ASSERT_TRUE(CLParser::FilterInputFilename("foo bar.cc"));
+ ASSERT_TRUE(CLParser::FilterInputFilename("baz.c"));
+ ASSERT_TRUE(CLParser::FilterInputFilename("FOOBAR.CC"));
+
+ ASSERT_FALSE(CLParser::FilterInputFilename(
+ "src\\cl_helper.cc(166) : fatal error C1075: end "
+ "of file found ..."));
+}
+
+TEST(CLParserTest, ParseSimple) {
+ CLParser parser;
+ string output, err;
+ ASSERT_TRUE(parser.Parse(
+ "foo\r\n"
+ "Note: inc file prefix: foo.h\r\n"
+ "bar\r\n",
+ "Note: inc file prefix:", &output, &err));
+
+ ASSERT_EQ("foo\nbar\n", output);
+ ASSERT_EQ(1u, parser.includes_.size());
+ ASSERT_EQ("foo.h", *parser.includes_.begin());
+}
+
+TEST(CLParserTest, ParseFilenameFilter) {
+ CLParser parser;
+ string output, err;
+ ASSERT_TRUE(parser.Parse(
+ "foo.cc\r\n"
+ "cl: warning\r\n",
+ "", &output, &err));
+ ASSERT_EQ("cl: warning\n", output);
+}
+
+TEST(CLParserTest, ParseSystemInclude) {
+ CLParser parser;
+ string output, err;
+ ASSERT_TRUE(parser.Parse(
+ "Note: including file: c:\\Program Files\\foo.h\r\n"
+ "Note: including file: d:\\Microsoft Visual Studio\\bar.h\r\n"
+ "Note: including file: path.h\r\n",
+ "", &output, &err));
+ // We should have dropped the first two includes because they look like
+ // system headers.
+ ASSERT_EQ("", output);
+ ASSERT_EQ(1u, parser.includes_.size());
+ ASSERT_EQ("path.h", *parser.includes_.begin());
+}
+
+TEST(CLParserTest, DuplicatedHeader) {
+ CLParser parser;
+ string output, err;
+ ASSERT_TRUE(parser.Parse(
+ "Note: including file: foo.h\r\n"
+ "Note: including file: bar.h\r\n"
+ "Note: including file: foo.h\r\n",
+ "", &output, &err));
+ // We should have dropped one copy of foo.h.
+ ASSERT_EQ("", output);
+ ASSERT_EQ(2u, parser.includes_.size());
+}
+
+TEST(CLParserTest, DuplicatedHeaderPathConverted) {
+ CLParser parser;
+ string output, err;
+
+ // This isn't inline in the Parse() call below because the #ifdef in
+ // a macro expansion would confuse MSVC2013's preprocessor.
+ const char kInput[] =
+ "Note: including file: sub/./foo.h\r\n"
+ "Note: including file: bar.h\r\n"
+#ifdef _WIN32
+ "Note: including file: sub\\foo.h\r\n";
+#else
+ "Note: including file: sub/foo.h\r\n";
+#endif
+ ASSERT_TRUE(parser.Parse(kInput, "", &output, &err));
+ // We should have dropped one copy of foo.h.
+ ASSERT_EQ("", output);
+ ASSERT_EQ(2u, parser.includes_.size());
+}
diff --git a/src/debug_flags.cc b/src/debug_flags.cc
new file mode 100644
index 0000000..44b14c4
--- /dev/null
+++ b/src/debug_flags.cc
@@ -0,0 +1,21 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+bool g_explaining = false;
+
+bool g_keep_depfile = false;
+
+bool g_keep_rsp = false;
+
+bool g_experimental_statcache = true;
diff --git a/src/debug_flags.h b/src/debug_flags.h
new file mode 100644
index 0000000..e08a43b
--- /dev/null
+++ b/src/debug_flags.h
@@ -0,0 +1,33 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_EXPLAIN_H_
+#define NINJA_EXPLAIN_H_
+
+#include <stdio.h>
+
+#define EXPLAIN(fmt, ...) { \
+ if (g_explaining) \
+ fprintf(stderr, "ninja explain: " fmt "\n", __VA_ARGS__); \
+}
+
+extern bool g_explaining;
+
+extern bool g_keep_depfile;
+
+extern bool g_keep_rsp;
+
+extern bool g_experimental_statcache;
+
+#endif // NINJA_EXPLAIN_H_
diff --git a/src/depfile_parser.cc b/src/depfile_parser.cc
new file mode 100644
index 0000000..bffeb76
--- /dev/null
+++ b/src/depfile_parser.cc
@@ -0,0 +1,371 @@
+/* Generated by re2c 1.3 */
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "depfile_parser.h"
+#include "util.h"
+
+#include <algorithm>
+
+using namespace std;
+
+DepfileParser::DepfileParser(DepfileParserOptions options)
+ : options_(options)
+{
+}
+
+// A note on backslashes in Makefiles, from reading the docs:
+// Backslash-newline is the line continuation character.
+// Backslash-# escapes a # (otherwise meaningful as a comment start).
+// Backslash-% escapes a % (otherwise meaningful as a special).
+// Finally, quoting the GNU manual, "Backslashes that are not in danger
+// of quoting ‘%’ characters go unmolested."
+// How do you end a line with a backslash? The netbsd Make docs suggest
+// reading the result of a shell command echoing a backslash!
+//
+// Rather than implement all of above, we follow what GCC/Clang produces:
+// Backslashes escape a space or hash sign.
+// When a space is preceded by 2N+1 backslashes, it is represents N backslashes
+// followed by space.
+// When a space is preceded by 2N backslashes, it represents 2N backslashes at
+// the end of a filename.
+// A hash sign is escaped by a single backslash. All other backslashes remain
+// unchanged.
+//
+// If anyone actually has depfiles that rely on the more complicated
+// behavior we can adjust this.
+bool DepfileParser::Parse(string* content, string* err) {
+ // in: current parser input point.
+ // end: end of input.
+ // parsing_targets: whether we are parsing targets or dependencies.
+ char* in = &(*content)[0];
+ char* end = in + content->size();
+ bool have_target = false;
+ bool parsing_targets = true;
+ bool poisoned_input = false;
+ while (in < end) {
+ bool have_newline = false;
+ // out: current output point (typically same as in, but can fall behind
+ // as we de-escape backslashes).
+ char* out = in;
+ // filename: start of the current parsed filename.
+ char* filename = out;
+ for (;;) {
+ // start: beginning of the current parsed span.
+ const char* start = in;
+ char* yymarker = NULL;
+
+ {
+ unsigned char yych;
+ static const unsigned char yybm[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 128, 0, 0, 0, 128, 0, 0,
+ 128, 128, 0, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 0, 0, 128, 0, 0,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 0, 128, 0, 128,
+ 0, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 0, 128, 128, 0,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ };
+ yych = *in;
+ if (yybm[0+yych] & 128) {
+ goto yy9;
+ }
+ if (yych <= '\r') {
+ if (yych <= '\t') {
+ if (yych >= 0x01) goto yy4;
+ } else {
+ if (yych <= '\n') goto yy6;
+ if (yych <= '\f') goto yy4;
+ goto yy8;
+ }
+ } else {
+ if (yych <= '$') {
+ if (yych <= '#') goto yy4;
+ goto yy12;
+ } else {
+ if (yych <= '?') goto yy4;
+ if (yych <= '\\') goto yy13;
+ goto yy4;
+ }
+ }
+ ++in;
+ {
+ break;
+ }
+yy4:
+ ++in;
+yy5:
+ {
+ // For any other character (e.g. whitespace), swallow it here,
+ // allowing the outer logic to loop around again.
+ break;
+ }
+yy6:
+ ++in;
+ {
+ // A newline ends the current file name and the current rule.
+ have_newline = true;
+ break;
+ }
+yy8:
+ yych = *++in;
+ if (yych == '\n') goto yy6;
+ goto yy5;
+yy9:
+ yych = *++in;
+ if (yybm[0+yych] & 128) {
+ goto yy9;
+ }
+yy11:
+ {
+ // Got a span of plain text.
+ int len = (int)(in - start);
+ // Need to shift it over if we're overwriting backslashes.
+ if (out < start)
+ memmove(out, start, len);
+ out += len;
+ continue;
+ }
+yy12:
+ yych = *++in;
+ if (yych == '$') goto yy14;
+ goto yy5;
+yy13:
+ yych = *(yymarker = ++in);
+ if (yych <= ' ') {
+ if (yych <= '\n') {
+ if (yych <= 0x00) goto yy5;
+ if (yych <= '\t') goto yy16;
+ goto yy17;
+ } else {
+ if (yych == '\r') goto yy19;
+ if (yych <= 0x1F) goto yy16;
+ goto yy21;
+ }
+ } else {
+ if (yych <= '9') {
+ if (yych == '#') goto yy23;
+ goto yy16;
+ } else {
+ if (yych <= ':') goto yy25;
+ if (yych == '\\') goto yy27;
+ goto yy16;
+ }
+ }
+yy14:
+ ++in;
+ {
+ // De-escape dollar character.
+ *out++ = '$';
+ continue;
+ }
+yy16:
+ ++in;
+ goto yy11;
+yy17:
+ ++in;
+ {
+ // A line continuation ends the current file name.
+ break;
+ }
+yy19:
+ yych = *++in;
+ if (yych == '\n') goto yy17;
+ in = yymarker;
+ goto yy5;
+yy21:
+ ++in;
+ {
+ // 2N+1 backslashes plus space -> N backslashes plus space.
+ int len = (int)(in - start);
+ int n = len / 2 - 1;
+ if (out < start)
+ memset(out, '\\', n);
+ out += n;
+ *out++ = ' ';
+ continue;
+ }
+yy23:
+ ++in;
+ {
+ // De-escape hash sign, but preserve other leading backslashes.
+ int len = (int)(in - start);
+ if (len > 2 && out < start)
+ memset(out, '\\', len - 2);
+ out += len - 2;
+ *out++ = '#';
+ continue;
+ }
+yy25:
+ yych = *++in;
+ if (yych <= '\f') {
+ if (yych <= 0x00) goto yy28;
+ if (yych <= 0x08) goto yy26;
+ if (yych <= '\n') goto yy28;
+ } else {
+ if (yych <= '\r') goto yy28;
+ if (yych == ' ') goto yy28;
+ }
+yy26:
+ {
+ // De-escape colon sign, but preserve other leading backslashes.
+ // Regular expression uses lookahead to make sure that no whitespace
+ // nor EOF follows. In that case it'd be the : at the end of a target
+ int len = (int)(in - start);
+ if (len > 2 && out < start)
+ memset(out, '\\', len - 2);
+ out += len - 2;
+ *out++ = ':';
+ continue;
+ }
+yy27:
+ yych = *++in;
+ if (yych <= ' ') {
+ if (yych <= '\n') {
+ if (yych <= 0x00) goto yy11;
+ if (yych <= '\t') goto yy16;
+ goto yy11;
+ } else {
+ if (yych == '\r') goto yy11;
+ if (yych <= 0x1F) goto yy16;
+ goto yy30;
+ }
+ } else {
+ if (yych <= '9') {
+ if (yych == '#') goto yy23;
+ goto yy16;
+ } else {
+ if (yych <= ':') goto yy25;
+ if (yych == '\\') goto yy32;
+ goto yy16;
+ }
+ }
+yy28:
+ ++in;
+ {
+ // Backslash followed by : and whitespace.
+ // It is therefore normal text and not an escaped colon
+ int len = (int)(in - start - 1);
+ // Need to shift it over if we're overwriting backslashes.
+ if (out < start)
+ memmove(out, start, len);
+ out += len;
+ if (*(in - 1) == '\n')
+ have_newline = true;
+ break;
+ }
+yy30:
+ ++in;
+ {
+ // 2N backslashes plus space -> 2N backslashes, end of filename.
+ int len = (int)(in - start);
+ if (out < start)
+ memset(out, '\\', len - 1);
+ out += len - 1;
+ break;
+ }
+yy32:
+ yych = *++in;
+ if (yych <= ' ') {
+ if (yych <= '\n') {
+ if (yych <= 0x00) goto yy11;
+ if (yych <= '\t') goto yy16;
+ goto yy11;
+ } else {
+ if (yych == '\r') goto yy11;
+ if (yych <= 0x1F) goto yy16;
+ goto yy21;
+ }
+ } else {
+ if (yych <= '9') {
+ if (yych == '#') goto yy23;
+ goto yy16;
+ } else {
+ if (yych <= ':') goto yy25;
+ if (yych == '\\') goto yy27;
+ goto yy16;
+ }
+ }
+ }
+
+ }
+
+ int len = (int)(out - filename);
+ const bool is_dependency = !parsing_targets;
+ if (len > 0 && filename[len - 1] == ':') {
+ len--; // Strip off trailing colon, if any.
+ parsing_targets = false;
+ have_target = true;
+ }
+
+ if (len > 0) {
+ StringPiece piece = StringPiece(filename, len);
+ // If we've seen this as an input before, skip it.
+ std::vector<StringPiece>::iterator pos = std::find(ins_.begin(), ins_.end(), piece);
+ if (pos == ins_.end()) {
+ if (is_dependency) {
+ if (poisoned_input) {
+ *err = "inputs may not also have inputs";
+ return false;
+ }
+ // New input.
+ ins_.push_back(piece);
+ } else {
+ // Check for a new output.
+ if (std::find(outs_.begin(), outs_.end(), piece) == outs_.end())
+ outs_.push_back(piece);
+ }
+ } else if (!is_dependency) {
+ // We've passed an input on the left side; reject new inputs.
+ poisoned_input = true;
+ }
+ }
+
+ if (have_newline) {
+ // A newline ends a rule so the next filename will be a new target.
+ parsing_targets = true;
+ poisoned_input = false;
+ }
+ }
+ if (!have_target) {
+ *err = "expected ':' in depfile";
+ return false;
+ }
+ return true;
+}
diff --git a/src/depfile_parser.h b/src/depfile_parser.h
new file mode 100644
index 0000000..0e8db81
--- /dev/null
+++ b/src/depfile_parser.h
@@ -0,0 +1,42 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_DEPFILE_PARSER_H_
+#define NINJA_DEPFILE_PARSER_H_
+
+#include <string>
+#include <vector>
+
+#include "string_piece.h"
+
+struct DepfileParserOptions {
+ DepfileParserOptions() {}
+};
+
+/// Parser for the dependency information emitted by gcc's -M flags.
+struct DepfileParser {
+ explicit DepfileParser(DepfileParserOptions options =
+ DepfileParserOptions());
+
+ /// Parse an input file. Input must be NUL-terminated.
+ /// Warning: may mutate the content in-place and parsed StringPieces are
+ /// pointers within it.
+ bool Parse(std::string* content, std::string* err);
+
+ std::vector<StringPiece> outs_;
+ std::vector<StringPiece> ins_;
+ DepfileParserOptions options_;
+};
+
+#endif // NINJA_DEPFILE_PARSER_H_
diff --git a/src/depfile_parser.in.cc b/src/depfile_parser.in.cc
new file mode 100644
index 0000000..75ba982
--- /dev/null
+++ b/src/depfile_parser.in.cc
@@ -0,0 +1,207 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "depfile_parser.h"
+#include "util.h"
+
+#include <algorithm>
+
+using namespace std;
+
+DepfileParser::DepfileParser(DepfileParserOptions options)
+ : options_(options)
+{
+}
+
+// A note on backslashes in Makefiles, from reading the docs:
+// Backslash-newline is the line continuation character.
+// Backslash-# escapes a # (otherwise meaningful as a comment start).
+// Backslash-% escapes a % (otherwise meaningful as a special).
+// Finally, quoting the GNU manual, "Backslashes that are not in danger
+// of quoting ‘%’ characters go unmolested."
+// How do you end a line with a backslash? The netbsd Make docs suggest
+// reading the result of a shell command echoing a backslash!
+//
+// Rather than implement all of above, we follow what GCC/Clang produces:
+// Backslashes escape a space or hash sign.
+// When a space is preceded by 2N+1 backslashes, it is represents N backslashes
+// followed by space.
+// When a space is preceded by 2N backslashes, it represents 2N backslashes at
+// the end of a filename.
+// A hash sign is escaped by a single backslash. All other backslashes remain
+// unchanged.
+//
+// If anyone actually has depfiles that rely on the more complicated
+// behavior we can adjust this.
+bool DepfileParser::Parse(string* content, string* err) {
+ // in: current parser input point.
+ // end: end of input.
+ // parsing_targets: whether we are parsing targets or dependencies.
+ char* in = &(*content)[0];
+ char* end = in + content->size();
+ bool have_target = false;
+ bool parsing_targets = true;
+ bool poisoned_input = false;
+ while (in < end) {
+ bool have_newline = false;
+ // out: current output point (typically same as in, but can fall behind
+ // as we de-escape backslashes).
+ char* out = in;
+ // filename: start of the current parsed filename.
+ char* filename = out;
+ for (;;) {
+ // start: beginning of the current parsed span.
+ const char* start = in;
+ char* yymarker = NULL;
+ /*!re2c
+ re2c:define:YYCTYPE = "unsigned char";
+ re2c:define:YYCURSOR = in;
+ re2c:define:YYLIMIT = end;
+ re2c:define:YYMARKER = yymarker;
+
+ re2c:yyfill:enable = 0;
+
+ re2c:indent:top = 2;
+ re2c:indent:string = " ";
+
+ nul = "\000";
+ newline = '\r'?'\n';
+
+ '\\\\'* '\\ ' {
+ // 2N+1 backslashes plus space -> N backslashes plus space.
+ int len = (int)(in - start);
+ int n = len / 2 - 1;
+ if (out < start)
+ memset(out, '\\', n);
+ out += n;
+ *out++ = ' ';
+ continue;
+ }
+ '\\\\'+ ' ' {
+ // 2N backslashes plus space -> 2N backslashes, end of filename.
+ int len = (int)(in - start);
+ if (out < start)
+ memset(out, '\\', len - 1);
+ out += len - 1;
+ break;
+ }
+ '\\'+ '#' {
+ // De-escape hash sign, but preserve other leading backslashes.
+ int len = (int)(in - start);
+ if (len > 2 && out < start)
+ memset(out, '\\', len - 2);
+ out += len - 2;
+ *out++ = '#';
+ continue;
+ }
+ '\\'+ ':' [\x00\x20\r\n\t] {
+ // Backslash followed by : and whitespace.
+ // It is therefore normal text and not an escaped colon
+ int len = (int)(in - start - 1);
+ // Need to shift it over if we're overwriting backslashes.
+ if (out < start)
+ memmove(out, start, len);
+ out += len;
+ if (*(in - 1) == '\n')
+ have_newline = true;
+ break;
+ }
+ '\\'+ ':' {
+ // De-escape colon sign, but preserve other leading backslashes.
+ // Regular expression uses lookahead to make sure that no whitespace
+ // nor EOF follows. In that case it'd be the : at the end of a target
+ int len = (int)(in - start);
+ if (len > 2 && out < start)
+ memset(out, '\\', len - 2);
+ out += len - 2;
+ *out++ = ':';
+ continue;
+ }
+ '$$' {
+ // De-escape dollar character.
+ *out++ = '$';
+ continue;
+ }
+ '\\'+ [^\000\r\n] | [a-zA-Z0-9+,/_:.~()}{%=@\x5B\x5D!\x80-\xFF-]+ {
+ // Got a span of plain text.
+ int len = (int)(in - start);
+ // Need to shift it over if we're overwriting backslashes.
+ if (out < start)
+ memmove(out, start, len);
+ out += len;
+ continue;
+ }
+ nul {
+ break;
+ }
+ '\\' newline {
+ // A line continuation ends the current file name.
+ break;
+ }
+ newline {
+ // A newline ends the current file name and the current rule.
+ have_newline = true;
+ break;
+ }
+ [^] {
+ // For any other character (e.g. whitespace), swallow it here,
+ // allowing the outer logic to loop around again.
+ break;
+ }
+ */
+ }
+
+ int len = (int)(out - filename);
+ const bool is_dependency = !parsing_targets;
+ if (len > 0 && filename[len - 1] == ':') {
+ len--; // Strip off trailing colon, if any.
+ parsing_targets = false;
+ have_target = true;
+ }
+
+ if (len > 0) {
+ StringPiece piece = StringPiece(filename, len);
+ // If we've seen this as an input before, skip it.
+ std::vector<StringPiece>::iterator pos = std::find(ins_.begin(), ins_.end(), piece);
+ if (pos == ins_.end()) {
+ if (is_dependency) {
+ if (poisoned_input) {
+ *err = "inputs may not also have inputs";
+ return false;
+ }
+ // New input.
+ ins_.push_back(piece);
+ } else {
+ // Check for a new output.
+ if (std::find(outs_.begin(), outs_.end(), piece) == outs_.end())
+ outs_.push_back(piece);
+ }
+ } else if (!is_dependency) {
+ // We've passed an input on the left side; reject new inputs.
+ poisoned_input = true;
+ }
+ }
+
+ if (have_newline) {
+ // A newline ends a rule so the next filename will be a new target.
+ parsing_targets = true;
+ poisoned_input = false;
+ }
+ }
+ if (!have_target) {
+ *err = "expected ':' in depfile";
+ return false;
+ }
+ return true;
+}
diff --git a/src/depfile_parser_perftest.cc b/src/depfile_parser_perftest.cc
new file mode 100644
index 0000000..52555e6
--- /dev/null
+++ b/src/depfile_parser_perftest.cc
@@ -0,0 +1,79 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "depfile_parser.h"
+#include "util.h"
+#include "metrics.h"
+
+using namespace std;
+
+int main(int argc, char* argv[]) {
+ if (argc < 2) {
+ printf("usage: %s <file1> <file2...>\n", argv[0]);
+ return 1;
+ }
+
+ vector<float> times;
+ for (int i = 1; i < argc; ++i) {
+ const char* filename = argv[i];
+
+ for (int limit = 1 << 10; limit < (1<<20); limit *= 2) {
+ int64_t start = GetTimeMillis();
+ for (int rep = 0; rep < limit; ++rep) {
+ string buf;
+ string err;
+ if (ReadFile(filename, &buf, &err) < 0) {
+ printf("%s: %s\n", filename, err.c_str());
+ return 1;
+ }
+
+ DepfileParser parser;
+ if (!parser.Parse(&buf, &err)) {
+ printf("%s: %s\n", filename, err.c_str());
+ return 1;
+ }
+ }
+ int64_t end = GetTimeMillis();
+
+ if (end - start > 100) {
+ int delta = (int)(end - start);
+ float time = delta*1000 / (float)limit;
+ printf("%s: %.1fus\n", filename, time);
+ times.push_back(time);
+ break;
+ }
+ }
+ }
+
+ if (!times.empty()) {
+ float min = times[0];
+ float max = times[0];
+ float total = 0;
+ for (size_t i = 0; i < times.size(); ++i) {
+ total += times[i];
+ if (times[i] < min)
+ min = times[i];
+ else if (times[i] > max)
+ max = times[i];
+ }
+
+ printf("min %.1fus max %.1fus avg %.1fus\n",
+ min, max, total / times.size());
+ }
+
+ return 0;
+}
diff --git a/src/depfile_parser_test.cc b/src/depfile_parser_test.cc
new file mode 100644
index 0000000..8886258
--- /dev/null
+++ b/src/depfile_parser_test.cc
@@ -0,0 +1,380 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "depfile_parser.h"
+
+#include "test.h"
+
+using namespace std;
+
+struct DepfileParserTest : public testing::Test {
+ bool Parse(const char* input, string* err);
+
+ DepfileParser parser_;
+ string input_;
+};
+
+bool DepfileParserTest::Parse(const char* input, string* err) {
+ input_ = input;
+ return parser_.Parse(&input_, err);
+}
+
+TEST_F(DepfileParserTest, Basic) {
+ string err;
+ EXPECT_TRUE(Parse(
+"build/ninja.o: ninja.cc ninja.h eval_env.h manifest_parser.h\n",
+ &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, parser_.outs_.size());
+ EXPECT_EQ("build/ninja.o", parser_.outs_[0].AsString());
+ EXPECT_EQ(4u, parser_.ins_.size());
+}
+
+TEST_F(DepfileParserTest, EarlyNewlineAndWhitespace) {
+ string err;
+ EXPECT_TRUE(Parse(
+" \\\n"
+" out: in\n",
+ &err));
+ ASSERT_EQ("", err);
+}
+
+TEST_F(DepfileParserTest, Continuation) {
+ string err;
+ EXPECT_TRUE(Parse(
+"foo.o: \\\n"
+" bar.h baz.h\n",
+ &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, parser_.outs_.size());
+ EXPECT_EQ("foo.o", parser_.outs_[0].AsString());
+ EXPECT_EQ(2u, parser_.ins_.size());
+}
+
+TEST_F(DepfileParserTest, CarriageReturnContinuation) {
+ string err;
+ EXPECT_TRUE(Parse(
+"foo.o: \\\r\n"
+" bar.h baz.h\r\n",
+ &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, parser_.outs_.size());
+ EXPECT_EQ("foo.o", parser_.outs_[0].AsString());
+ EXPECT_EQ(2u, parser_.ins_.size());
+}
+
+TEST_F(DepfileParserTest, BackSlashes) {
+ string err;
+ EXPECT_TRUE(Parse(
+"Project\\Dir\\Build\\Release8\\Foo\\Foo.res : \\\n"
+" Dir\\Library\\Foo.rc \\\n"
+" Dir\\Library\\Version\\Bar.h \\\n"
+" Dir\\Library\\Foo.ico \\\n"
+" Project\\Thing\\Bar.tlb \\\n",
+ &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, parser_.outs_.size());
+ EXPECT_EQ("Project\\Dir\\Build\\Release8\\Foo\\Foo.res",
+ parser_.outs_[0].AsString());
+ EXPECT_EQ(4u, parser_.ins_.size());
+}
+
+TEST_F(DepfileParserTest, Spaces) {
+ string err;
+ EXPECT_TRUE(Parse(
+"a\\ bc\\ def: a\\ b c d",
+ &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, parser_.outs_.size());
+ EXPECT_EQ("a bc def",
+ parser_.outs_[0].AsString());
+ ASSERT_EQ(3u, parser_.ins_.size());
+ EXPECT_EQ("a b",
+ parser_.ins_[0].AsString());
+ EXPECT_EQ("c",
+ parser_.ins_[1].AsString());
+ EXPECT_EQ("d",
+ parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, MultipleBackslashes) {
+ // Successive 2N+1 backslashes followed by space (' ') are replaced by N >= 0
+ // backslashes and the space. A single backslash before hash sign is removed.
+ // Other backslashes remain untouched (including 2N backslashes followed by
+ // space).
+ string err;
+ EXPECT_TRUE(Parse(
+"a\\ b\\#c.h: \\\\\\\\\\ \\\\\\\\ \\\\share\\info\\\\#1",
+ &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, parser_.outs_.size());
+ EXPECT_EQ("a b#c.h",
+ parser_.outs_[0].AsString());
+ ASSERT_EQ(3u, parser_.ins_.size());
+ EXPECT_EQ("\\\\ ",
+ parser_.ins_[0].AsString());
+ EXPECT_EQ("\\\\\\\\",
+ parser_.ins_[1].AsString());
+ EXPECT_EQ("\\\\share\\info\\#1",
+ parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, Escapes) {
+ // Put backslashes before a variety of characters, see which ones make
+ // it through.
+ string err;
+ EXPECT_TRUE(Parse(
+"\\!\\@\\#$$\\%\\^\\&\\[\\]\\\\:",
+ &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, parser_.outs_.size());
+ EXPECT_EQ("\\!\\@#$\\%\\^\\&\\[\\]\\\\",
+ parser_.outs_[0].AsString());
+ ASSERT_EQ(0u, parser_.ins_.size());
+}
+
+TEST_F(DepfileParserTest, EscapedColons)
+{
+ std::string err;
+ // Tests for correct parsing of depfiles produced on Windows
+ // by both Clang, GCC pre 10 and GCC 10
+ EXPECT_TRUE(Parse(
+"c\\:\\gcc\\x86_64-w64-mingw32\\include\\stddef.o: \\\n"
+" c:\\gcc\\x86_64-w64-mingw32\\include\\stddef.h \n",
+ &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, parser_.outs_.size());
+ EXPECT_EQ("c:\\gcc\\x86_64-w64-mingw32\\include\\stddef.o",
+ parser_.outs_[0].AsString());
+ ASSERT_EQ(1u, parser_.ins_.size());
+ EXPECT_EQ("c:\\gcc\\x86_64-w64-mingw32\\include\\stddef.h",
+ parser_.ins_[0].AsString());
+}
+
+TEST_F(DepfileParserTest, EscapedTargetColon)
+{
+ std::string err;
+ EXPECT_TRUE(Parse(
+"foo1\\: x\n"
+"foo1\\:\n"
+"foo1\\:\r\n"
+"foo1\\:\t\n"
+"foo1\\:",
+ &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, parser_.outs_.size());
+ EXPECT_EQ("foo1\\", parser_.outs_[0].AsString());
+ ASSERT_EQ(1u, parser_.ins_.size());
+ EXPECT_EQ("x", parser_.ins_[0].AsString());
+}
+
+TEST_F(DepfileParserTest, SpecialChars) {
+ // See filenames like istreambuf.iterator_op!= in
+ // https://github.com/google/libcxx/tree/master/test/iterators/stream.iterators/istreambuf.iterator/
+ string err;
+ EXPECT_TRUE(Parse(
+"C:/Program\\ Files\\ (x86)/Microsoft\\ crtdefs.h: \\\n"
+" en@quot.header~ t+t-x!=1 \\\n"
+" openldap/slapd.d/cn=config/cn=schema/cn={0}core.ldif\\\n"
+" Fu\303\244ball\\\n"
+" a[1]b@2%c",
+ &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, parser_.outs_.size());
+ EXPECT_EQ("C:/Program Files (x86)/Microsoft crtdefs.h",
+ parser_.outs_[0].AsString());
+ ASSERT_EQ(5u, parser_.ins_.size());
+ EXPECT_EQ("en@quot.header~",
+ parser_.ins_[0].AsString());
+ EXPECT_EQ("t+t-x!=1",
+ parser_.ins_[1].AsString());
+ EXPECT_EQ("openldap/slapd.d/cn=config/cn=schema/cn={0}core.ldif",
+ parser_.ins_[2].AsString());
+ EXPECT_EQ("Fu\303\244ball",
+ parser_.ins_[3].AsString());
+ EXPECT_EQ("a[1]b@2%c",
+ parser_.ins_[4].AsString());
+}
+
+TEST_F(DepfileParserTest, UnifyMultipleOutputs) {
+ // check that multiple duplicate targets are properly unified
+ string err;
+ EXPECT_TRUE(Parse("foo foo: x y z", &err));
+ ASSERT_EQ(1u, parser_.outs_.size());
+ ASSERT_EQ("foo", parser_.outs_[0].AsString());
+ ASSERT_EQ(3u, parser_.ins_.size());
+ EXPECT_EQ("x", parser_.ins_[0].AsString());
+ EXPECT_EQ("y", parser_.ins_[1].AsString());
+ EXPECT_EQ("z", parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, MultipleDifferentOutputs) {
+ // check that multiple different outputs are accepted by the parser
+ string err;
+ EXPECT_TRUE(Parse("foo bar: x y z", &err));
+ ASSERT_EQ(2u, parser_.outs_.size());
+ ASSERT_EQ("foo", parser_.outs_[0].AsString());
+ ASSERT_EQ("bar", parser_.outs_[1].AsString());
+ ASSERT_EQ(3u, parser_.ins_.size());
+ EXPECT_EQ("x", parser_.ins_[0].AsString());
+ EXPECT_EQ("y", parser_.ins_[1].AsString());
+ EXPECT_EQ("z", parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, MultipleEmptyRules) {
+ string err;
+ EXPECT_TRUE(Parse("foo: x\n"
+ "foo: \n"
+ "foo:\n", &err));
+ ASSERT_EQ(1u, parser_.outs_.size());
+ ASSERT_EQ("foo", parser_.outs_[0].AsString());
+ ASSERT_EQ(1u, parser_.ins_.size());
+ EXPECT_EQ("x", parser_.ins_[0].AsString());
+}
+
+TEST_F(DepfileParserTest, UnifyMultipleRulesLF) {
+ string err;
+ EXPECT_TRUE(Parse("foo: x\n"
+ "foo: y\n"
+ "foo \\\n"
+ "foo: z\n", &err));
+ ASSERT_EQ(1u, parser_.outs_.size());
+ ASSERT_EQ("foo", parser_.outs_[0].AsString());
+ ASSERT_EQ(3u, parser_.ins_.size());
+ EXPECT_EQ("x", parser_.ins_[0].AsString());
+ EXPECT_EQ("y", parser_.ins_[1].AsString());
+ EXPECT_EQ("z", parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, UnifyMultipleRulesCRLF) {
+ string err;
+ EXPECT_TRUE(Parse("foo: x\r\n"
+ "foo: y\r\n"
+ "foo \\\r\n"
+ "foo: z\r\n", &err));
+ ASSERT_EQ(1u, parser_.outs_.size());
+ ASSERT_EQ("foo", parser_.outs_[0].AsString());
+ ASSERT_EQ(3u, parser_.ins_.size());
+ EXPECT_EQ("x", parser_.ins_[0].AsString());
+ EXPECT_EQ("y", parser_.ins_[1].AsString());
+ EXPECT_EQ("z", parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, UnifyMixedRulesLF) {
+ string err;
+ EXPECT_TRUE(Parse("foo: x\\\n"
+ " y\n"
+ "foo \\\n"
+ "foo: z\n", &err));
+ ASSERT_EQ(1u, parser_.outs_.size());
+ ASSERT_EQ("foo", parser_.outs_[0].AsString());
+ ASSERT_EQ(3u, parser_.ins_.size());
+ EXPECT_EQ("x", parser_.ins_[0].AsString());
+ EXPECT_EQ("y", parser_.ins_[1].AsString());
+ EXPECT_EQ("z", parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, UnifyMixedRulesCRLF) {
+ string err;
+ EXPECT_TRUE(Parse("foo: x\\\r\n"
+ " y\r\n"
+ "foo \\\r\n"
+ "foo: z\r\n", &err));
+ ASSERT_EQ(1u, parser_.outs_.size());
+ ASSERT_EQ("foo", parser_.outs_[0].AsString());
+ ASSERT_EQ(3u, parser_.ins_.size());
+ EXPECT_EQ("x", parser_.ins_[0].AsString());
+ EXPECT_EQ("y", parser_.ins_[1].AsString());
+ EXPECT_EQ("z", parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, IndentedRulesLF) {
+ string err;
+ EXPECT_TRUE(Parse(" foo: x\n"
+ " foo: y\n"
+ " foo: z\n", &err));
+ ASSERT_EQ(1u, parser_.outs_.size());
+ ASSERT_EQ("foo", parser_.outs_[0].AsString());
+ ASSERT_EQ(3u, parser_.ins_.size());
+ EXPECT_EQ("x", parser_.ins_[0].AsString());
+ EXPECT_EQ("y", parser_.ins_[1].AsString());
+ EXPECT_EQ("z", parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, IndentedRulesCRLF) {
+ string err;
+ EXPECT_TRUE(Parse(" foo: x\r\n"
+ " foo: y\r\n"
+ " foo: z\r\n", &err));
+ ASSERT_EQ(1u, parser_.outs_.size());
+ ASSERT_EQ("foo", parser_.outs_[0].AsString());
+ ASSERT_EQ(3u, parser_.ins_.size());
+ EXPECT_EQ("x", parser_.ins_[0].AsString());
+ EXPECT_EQ("y", parser_.ins_[1].AsString());
+ EXPECT_EQ("z", parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, TolerateMP) {
+ string err;
+ EXPECT_TRUE(Parse("foo: x y z\n"
+ "x:\n"
+ "y:\n"
+ "z:\n", &err));
+ ASSERT_EQ(1u, parser_.outs_.size());
+ ASSERT_EQ("foo", parser_.outs_[0].AsString());
+ ASSERT_EQ(3u, parser_.ins_.size());
+ EXPECT_EQ("x", parser_.ins_[0].AsString());
+ EXPECT_EQ("y", parser_.ins_[1].AsString());
+ EXPECT_EQ("z", parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, MultipleRulesTolerateMP) {
+ string err;
+ EXPECT_TRUE(Parse("foo: x\n"
+ "x:\n"
+ "foo: y\n"
+ "y:\n"
+ "foo: z\n"
+ "z:\n", &err));
+ ASSERT_EQ(1u, parser_.outs_.size());
+ ASSERT_EQ("foo", parser_.outs_[0].AsString());
+ ASSERT_EQ(3u, parser_.ins_.size());
+ EXPECT_EQ("x", parser_.ins_[0].AsString());
+ EXPECT_EQ("y", parser_.ins_[1].AsString());
+ EXPECT_EQ("z", parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, MultipleRulesDifferentOutputs) {
+ // check that multiple different outputs are accepted by the parser
+ // when spread across multiple rules
+ string err;
+ EXPECT_TRUE(Parse("foo: x y\n"
+ "bar: y z\n", &err));
+ ASSERT_EQ(2u, parser_.outs_.size());
+ ASSERT_EQ("foo", parser_.outs_[0].AsString());
+ ASSERT_EQ("bar", parser_.outs_[1].AsString());
+ ASSERT_EQ(3u, parser_.ins_.size());
+ EXPECT_EQ("x", parser_.ins_[0].AsString());
+ EXPECT_EQ("y", parser_.ins_[1].AsString());
+ EXPECT_EQ("z", parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, BuggyMP) {
+ std::string err;
+ EXPECT_FALSE(Parse("foo: x y z\n"
+ "x: alsoin\n"
+ "y:\n"
+ "z:\n", &err));
+ ASSERT_EQ("inputs may not also have inputs", err);
+}
diff --git a/src/deps_log.cc b/src/deps_log.cc
new file mode 100644
index 0000000..191f300
--- /dev/null
+++ b/src/deps_log.cc
@@ -0,0 +1,438 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "deps_log.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#ifndef _WIN32
+#include <unistd.h>
+#elif defined(_MSC_VER) && (_MSC_VER < 1900)
+typedef __int32 int32_t;
+typedef unsigned __int32 uint32_t;
+#endif
+
+#include "graph.h"
+#include "metrics.h"
+#include "state.h"
+#include "util.h"
+
+using namespace std;
+
+// The version is stored as 4 bytes after the signature and also serves as a
+// byte order mark. Signature and version combined are 16 bytes long.
+const char kFileSignature[] = "# ninjadeps\n";
+const int kCurrentVersion = 4;
+
+// Record size is currently limited to less than the full 32 bit, due to
+// internal buffers having to have this size.
+const unsigned kMaxRecordSize = (1 << 19) - 1;
+
+DepsLog::~DepsLog() {
+ Close();
+}
+
+bool DepsLog::OpenForWrite(const string& path, string* err) {
+ if (needs_recompaction_) {
+ if (!Recompact(path, err))
+ return false;
+ }
+
+ assert(!file_);
+ file_path_ = path; // we don't actually open the file right now, but will do
+ // so on the first write attempt
+ return true;
+}
+
+bool DepsLog::RecordDeps(Node* node, TimeStamp mtime,
+ const vector<Node*>& nodes) {
+ return RecordDeps(node, mtime, nodes.size(),
+ nodes.empty() ? NULL : (Node**)&nodes.front());
+}
+
+bool DepsLog::RecordDeps(Node* node, TimeStamp mtime,
+ int node_count, Node** nodes) {
+ // Track whether there's any new data to be recorded.
+ bool made_change = false;
+
+ // Assign ids to all nodes that are missing one.
+ if (node->id() < 0) {
+ if (!RecordId(node))
+ return false;
+ made_change = true;
+ }
+ for (int i = 0; i < node_count; ++i) {
+ if (nodes[i]->id() < 0) {
+ if (!RecordId(nodes[i]))
+ return false;
+ made_change = true;
+ }
+ }
+
+ // See if the new data is different than the existing data, if any.
+ if (!made_change) {
+ Deps* deps = GetDeps(node);
+ if (!deps ||
+ deps->mtime != mtime ||
+ deps->node_count != node_count) {
+ made_change = true;
+ } else {
+ for (int i = 0; i < node_count; ++i) {
+ if (deps->nodes[i] != nodes[i]) {
+ made_change = true;
+ break;
+ }
+ }
+ }
+ }
+
+ // Don't write anything if there's no new info.
+ if (!made_change)
+ return true;
+
+ // Update on-disk representation.
+ unsigned size = 4 * (1 + 2 + node_count);
+ if (size > kMaxRecordSize) {
+ errno = ERANGE;
+ return false;
+ }
+
+ if (!OpenForWriteIfNeeded()) {
+ return false;
+ }
+ size |= 0x80000000; // Deps record: set high bit.
+ if (fwrite(&size, 4, 1, file_) < 1)
+ return false;
+ int id = node->id();
+ if (fwrite(&id, 4, 1, file_) < 1)
+ return false;
+ uint32_t mtime_part = static_cast<uint32_t>(mtime & 0xffffffff);
+ if (fwrite(&mtime_part, 4, 1, file_) < 1)
+ return false;
+ mtime_part = static_cast<uint32_t>((mtime >> 32) & 0xffffffff);
+ if (fwrite(&mtime_part, 4, 1, file_) < 1)
+ return false;
+ for (int i = 0; i < node_count; ++i) {
+ id = nodes[i]->id();
+ if (fwrite(&id, 4, 1, file_) < 1)
+ return false;
+ }
+ if (fflush(file_) != 0)
+ return false;
+
+ // Update in-memory representation.
+ Deps* deps = new Deps(mtime, node_count);
+ for (int i = 0; i < node_count; ++i)
+ deps->nodes[i] = nodes[i];
+ UpdateDeps(node->id(), deps);
+
+ return true;
+}
+
+void DepsLog::Close() {
+ OpenForWriteIfNeeded(); // create the file even if nothing has been recorded
+ if (file_)
+ fclose(file_);
+ file_ = NULL;
+}
+
+LoadStatus DepsLog::Load(const string& path, State* state, string* err) {
+ METRIC_RECORD(".ninja_deps load");
+ char buf[kMaxRecordSize + 1];
+ FILE* f = fopen(path.c_str(), "rb");
+ if (!f) {
+ if (errno == ENOENT)
+ return LOAD_NOT_FOUND;
+ *err = strerror(errno);
+ return LOAD_ERROR;
+ }
+
+ bool valid_header = true;
+ int version = 0;
+ if (!fgets(buf, sizeof(buf), f) || fread(&version, 4, 1, f) < 1)
+ valid_header = false;
+ // Note: For version differences, this should migrate to the new format.
+ // But the v1 format could sometimes (rarely) end up with invalid data, so
+ // don't migrate v1 to v3 to force a rebuild. (v2 only existed for a few days,
+ // and there was no release with it, so pretend that it never happened.)
+ if (!valid_header || strcmp(buf, kFileSignature) != 0 ||
+ version != kCurrentVersion) {
+ if (version == 1)
+ *err = "deps log version change; rebuilding";
+ else
+ *err = "bad deps log signature or version; starting over";
+ fclose(f);
+ unlink(path.c_str());
+ // Don't report this as a failure. An empty deps log will cause
+ // us to rebuild the outputs anyway.
+ return LOAD_SUCCESS;
+ }
+
+ long offset;
+ bool read_failed = false;
+ int unique_dep_record_count = 0;
+ int total_dep_record_count = 0;
+ for (;;) {
+ offset = ftell(f);
+
+ unsigned size;
+ if (fread(&size, 4, 1, f) < 1) {
+ if (!feof(f))
+ read_failed = true;
+ break;
+ }
+ bool is_deps = (size >> 31) != 0;
+ size = size & 0x7FFFFFFF;
+
+ if (size > kMaxRecordSize || fread(buf, size, 1, f) < 1) {
+ read_failed = true;
+ break;
+ }
+
+ if (is_deps) {
+ assert(size % 4 == 0);
+ int* deps_data = reinterpret_cast<int*>(buf);
+ int out_id = deps_data[0];
+ TimeStamp mtime;
+ mtime = (TimeStamp)(((uint64_t)(unsigned int)deps_data[2] << 32) |
+ (uint64_t)(unsigned int)deps_data[1]);
+ deps_data += 3;
+ int deps_count = (size / 4) - 3;
+
+ Deps* deps = new Deps(mtime, deps_count);
+ for (int i = 0; i < deps_count; ++i) {
+ assert(deps_data[i] < (int)nodes_.size());
+ assert(nodes_[deps_data[i]]);
+ deps->nodes[i] = nodes_[deps_data[i]];
+ }
+
+ total_dep_record_count++;
+ if (!UpdateDeps(out_id, deps))
+ ++unique_dep_record_count;
+ } else {
+ int path_size = size - 4;
+ assert(path_size > 0); // CanonicalizePath() rejects empty paths.
+ // There can be up to 3 bytes of padding.
+ if (buf[path_size - 1] == '\0') --path_size;
+ if (buf[path_size - 1] == '\0') --path_size;
+ if (buf[path_size - 1] == '\0') --path_size;
+ StringPiece subpath(buf, path_size);
+ // It is not necessary to pass in a correct slash_bits here. It will
+ // either be a Node that's in the manifest (in which case it will already
+ // have a correct slash_bits that GetNode will look up), or it is an
+ // implicit dependency from a .d which does not affect the build command
+ // (and so need not have its slashes maintained).
+ Node* node = state->GetNode(subpath, 0);
+
+ // Check that the expected index matches the actual index. This can only
+ // happen if two ninja processes write to the same deps log concurrently.
+ // (This uses unary complement to make the checksum look less like a
+ // dependency record entry.)
+ unsigned checksum = *reinterpret_cast<unsigned*>(buf + size - 4);
+ int expected_id = ~checksum;
+ int id = nodes_.size();
+ if (id != expected_id) {
+ read_failed = true;
+ break;
+ }
+
+ assert(node->id() < 0);
+ node->set_id(id);
+ nodes_.push_back(node);
+ }
+ }
+
+ if (read_failed) {
+ // An error occurred while loading; try to recover by truncating the
+ // file to the last fully-read record.
+ if (ferror(f)) {
+ *err = strerror(ferror(f));
+ } else {
+ *err = "premature end of file";
+ }
+ fclose(f);
+
+ if (!Truncate(path, offset, err))
+ return LOAD_ERROR;
+
+ // The truncate succeeded; we'll just report the load error as a
+ // warning because the build can proceed.
+ *err += "; recovering";
+ return LOAD_SUCCESS;
+ }
+
+ fclose(f);
+
+ // Rebuild the log if there are too many dead records.
+ int kMinCompactionEntryCount = 1000;
+ int kCompactionRatio = 3;
+ if (total_dep_record_count > kMinCompactionEntryCount &&
+ total_dep_record_count > unique_dep_record_count * kCompactionRatio) {
+ needs_recompaction_ = true;
+ }
+
+ return LOAD_SUCCESS;
+}
+
+DepsLog::Deps* DepsLog::GetDeps(Node* node) {
+ // Abort if the node has no id (never referenced in the deps) or if
+ // there's no deps recorded for the node.
+ if (node->id() < 0 || node->id() >= (int)deps_.size())
+ return NULL;
+ return deps_[node->id()];
+}
+
+bool DepsLog::Recompact(const string& path, string* err) {
+ METRIC_RECORD(".ninja_deps recompact");
+
+ Close();
+ string temp_path = path + ".recompact";
+
+ // OpenForWrite() opens for append. Make sure it's not appending to a
+ // left-over file from a previous recompaction attempt that crashed somehow.
+ unlink(temp_path.c_str());
+
+ DepsLog new_log;
+ if (!new_log.OpenForWrite(temp_path, err))
+ return false;
+
+ // Clear all known ids so that new ones can be reassigned. The new indices
+ // will refer to the ordering in new_log, not in the current log.
+ for (vector<Node*>::iterator i = nodes_.begin(); i != nodes_.end(); ++i)
+ (*i)->set_id(-1);
+
+ // Write out all deps again.
+ for (int old_id = 0; old_id < (int)deps_.size(); ++old_id) {
+ Deps* deps = deps_[old_id];
+ if (!deps) continue; // If nodes_[old_id] is a leaf, it has no deps.
+
+ if (!IsDepsEntryLiveFor(nodes_[old_id]))
+ continue;
+
+ if (!new_log.RecordDeps(nodes_[old_id], deps->mtime,
+ deps->node_count, deps->nodes)) {
+ new_log.Close();
+ return false;
+ }
+ }
+
+ new_log.Close();
+
+ // All nodes now have ids that refer to new_log, so steal its data.
+ deps_.swap(new_log.deps_);
+ nodes_.swap(new_log.nodes_);
+
+ if (unlink(path.c_str()) < 0) {
+ *err = strerror(errno);
+ return false;
+ }
+
+ if (rename(temp_path.c_str(), path.c_str()) < 0) {
+ *err = strerror(errno);
+ return false;
+ }
+
+ return true;
+}
+
+bool DepsLog::IsDepsEntryLiveFor(Node* node) {
+ // Skip entries that don't have in-edges or whose edges don't have a
+ // "deps" attribute. They were in the deps log from previous builds, but
+ // the the files they were for were removed from the build and their deps
+ // entries are no longer needed.
+ // (Without the check for "deps", a chain of two or more nodes that each
+ // had deps wouldn't be collected in a single recompaction.)
+ return node->in_edge() && !node->in_edge()->GetBinding("deps").empty();
+}
+
+bool DepsLog::UpdateDeps(int out_id, Deps* deps) {
+ if (out_id >= (int)deps_.size())
+ deps_.resize(out_id + 1);
+
+ bool delete_old = deps_[out_id] != NULL;
+ if (delete_old)
+ delete deps_[out_id];
+ deps_[out_id] = deps;
+ return delete_old;
+}
+
+bool DepsLog::RecordId(Node* node) {
+ int path_size = node->path().size();
+ int padding = (4 - path_size % 4) % 4; // Pad path to 4 byte boundary.
+
+ unsigned size = path_size + padding + 4;
+ if (size > kMaxRecordSize) {
+ errno = ERANGE;
+ return false;
+ }
+
+ if (!OpenForWriteIfNeeded()) {
+ return false;
+ }
+ if (fwrite(&size, 4, 1, file_) < 1)
+ return false;
+ if (fwrite(node->path().data(), path_size, 1, file_) < 1) {
+ assert(!node->path().empty());
+ return false;
+ }
+ if (padding && fwrite("\0\0", padding, 1, file_) < 1)
+ return false;
+ int id = nodes_.size();
+ unsigned checksum = ~(unsigned)id;
+ if (fwrite(&checksum, 4, 1, file_) < 1)
+ return false;
+ if (fflush(file_) != 0)
+ return false;
+
+ node->set_id(id);
+ nodes_.push_back(node);
+
+ return true;
+}
+
+bool DepsLog::OpenForWriteIfNeeded() {
+ if (file_path_.empty()) {
+ return true;
+ }
+ file_ = fopen(file_path_.c_str(), "ab");
+ if (!file_) {
+ return false;
+ }
+ // Set the buffer size to this and flush the file buffer after every record
+ // to make sure records aren't written partially.
+ if (setvbuf(file_, NULL, _IOFBF, kMaxRecordSize + 1) != 0) {
+ return false;
+ }
+ SetCloseOnExec(fileno(file_));
+
+ // Opening a file in append mode doesn't set the file pointer to the file's
+ // end on Windows. Do that explicitly.
+ fseek(file_, 0, SEEK_END);
+
+ if (ftell(file_) == 0) {
+ if (fwrite(kFileSignature, sizeof(kFileSignature) - 1, 1, file_) < 1) {
+ return false;
+ }
+ if (fwrite(&kCurrentVersion, 4, 1, file_) < 1) {
+ return false;
+ }
+ }
+ if (fflush(file_) != 0) {
+ return false;
+ }
+ file_path_.clear();
+ return true;
+}
diff --git a/src/deps_log.h b/src/deps_log.h
new file mode 100644
index 0000000..cc44b41
--- /dev/null
+++ b/src/deps_log.h
@@ -0,0 +1,128 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_DEPS_LOG_H_
+#define NINJA_DEPS_LOG_H_
+
+#include <string>
+#include <vector>
+
+#include <stdio.h>
+
+#include "load_status.h"
+#include "timestamp.h"
+
+struct Node;
+struct State;
+
+/// As build commands run they can output extra dependency information
+/// (e.g. header dependencies for C source) dynamically. DepsLog collects
+/// that information at build time and uses it for subsequent builds.
+///
+/// The on-disk format is based on two primary design constraints:
+/// - it must be written to as a stream (during the build, which may be
+/// interrupted);
+/// - it can be read all at once on startup. (Alternative designs, where
+/// it contains indexing information, were considered and discarded as
+/// too complicated to implement; if the file is small than reading it
+/// fully on startup is acceptable.)
+/// Here are some stats from the Windows Chrome dependency files, to
+/// help guide the design space. The total text in the files sums to
+/// 90mb so some compression is warranted to keep load-time fast.
+/// There's about 10k files worth of dependencies that reference about
+/// 40k total paths totalling 2mb of unique strings.
+///
+/// Based on these stats, here's the current design.
+/// The file is structured as version header followed by a sequence of records.
+/// Each record is either a path string or a dependency list.
+/// Numbering the path strings in file order gives them dense integer ids.
+/// A dependency list maps an output id to a list of input ids.
+///
+/// Concretely, a record is:
+/// four bytes record length, high bit indicates record type
+/// (but max record sizes are capped at 512kB)
+/// path records contain the string name of the path, followed by up to 3
+/// padding bytes to align on 4 byte boundaries, followed by the
+/// one's complement of the expected index of the record (to detect
+/// concurrent writes of multiple ninja processes to the log).
+/// dependency records are an array of 4-byte integers
+/// [output path id,
+/// output path mtime (lower 4 bytes), output path mtime (upper 4 bytes),
+/// input path id, input path id...]
+/// (The mtime is compared against the on-disk output path mtime
+/// to verify the stored data is up-to-date.)
+/// If two records reference the same output the latter one in the file
+/// wins, allowing updates to just be appended to the file. A separate
+/// repacking step can run occasionally to remove dead records.
+struct DepsLog {
+ DepsLog() : needs_recompaction_(false), file_(NULL) {}
+ ~DepsLog();
+
+ // Writing (build-time) interface.
+ bool OpenForWrite(const std::string& path, std::string* err);
+ bool RecordDeps(Node* node, TimeStamp mtime, const std::vector<Node*>& nodes);
+ bool RecordDeps(Node* node, TimeStamp mtime, int node_count, Node** nodes);
+ void Close();
+
+ // Reading (startup-time) interface.
+ struct Deps {
+ Deps(int64_t mtime, int node_count)
+ : mtime(mtime), node_count(node_count), nodes(new Node*[node_count]) {}
+ ~Deps() { delete [] nodes; }
+ TimeStamp mtime;
+ int node_count;
+ Node** nodes;
+ };
+ LoadStatus Load(const std::string& path, State* state, std::string* err);
+ Deps* GetDeps(Node* node);
+
+ /// Rewrite the known log entries, throwing away old data.
+ bool Recompact(const std::string& path, std::string* err);
+
+ /// Returns if the deps entry for a node is still reachable from the manifest.
+ ///
+ /// The deps log can contain deps entries for files that were built in the
+ /// past but are no longer part of the manifest. This function returns if
+ /// this is the case for a given node. This function is slow, don't call
+ /// it from code that runs on every build.
+ bool IsDepsEntryLiveFor(Node* node);
+
+ /// Used for tests.
+ const std::vector<Node*>& nodes() const { return nodes_; }
+ const std::vector<Deps*>& deps() const { return deps_; }
+
+ private:
+ // Updates the in-memory representation. Takes ownership of |deps|.
+ // Returns true if a prior deps record was deleted.
+ bool UpdateDeps(int out_id, Deps* deps);
+ // Write a node name record, assigning it an id.
+ bool RecordId(Node* node);
+
+ /// Should be called before using file_. When false is returned, errno will
+ /// be set.
+ bool OpenForWriteIfNeeded();
+
+ bool needs_recompaction_;
+ FILE* file_;
+ std::string file_path_;
+
+ /// Maps id -> Node.
+ std::vector<Node*> nodes_;
+ /// Maps id -> deps of that id.
+ std::vector<Deps*> deps_;
+
+ friend struct DepsLogTest;
+};
+
+#endif // NINJA_DEPS_LOG_H_
diff --git a/src/deps_log_test.cc b/src/deps_log_test.cc
new file mode 100644
index 0000000..4055941
--- /dev/null
+++ b/src/deps_log_test.cc
@@ -0,0 +1,481 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "deps_log.h"
+
+#include <sys/stat.h>
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+
+#include "graph.h"
+#include "util.h"
+#include "test.h"
+
+using namespace std;
+
+namespace {
+
+const char kTestFilename[] = "DepsLogTest-tempfile";
+
+struct DepsLogTest : public testing::Test {
+ virtual void SetUp() {
+ // In case a crashing test left a stale file behind.
+ unlink(kTestFilename);
+ }
+ virtual void TearDown() {
+ unlink(kTestFilename);
+ }
+};
+
+TEST_F(DepsLogTest, WriteRead) {
+ State state1;
+ DepsLog log1;
+ string err;
+ EXPECT_TRUE(log1.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ {
+ vector<Node*> deps;
+ deps.push_back(state1.GetNode("foo.h", 0));
+ deps.push_back(state1.GetNode("bar.h", 0));
+ log1.RecordDeps(state1.GetNode("out.o", 0), 1, deps);
+
+ deps.clear();
+ deps.push_back(state1.GetNode("foo.h", 0));
+ deps.push_back(state1.GetNode("bar2.h", 0));
+ log1.RecordDeps(state1.GetNode("out2.o", 0), 2, deps);
+
+ DepsLog::Deps* log_deps = log1.GetDeps(state1.GetNode("out.o", 0));
+ ASSERT_TRUE(log_deps);
+ ASSERT_EQ(1, log_deps->mtime);
+ ASSERT_EQ(2, log_deps->node_count);
+ ASSERT_EQ("foo.h", log_deps->nodes[0]->path());
+ ASSERT_EQ("bar.h", log_deps->nodes[1]->path());
+ }
+
+ log1.Close();
+
+ State state2;
+ DepsLog log2;
+ EXPECT_TRUE(log2.Load(kTestFilename, &state2, &err));
+ ASSERT_EQ("", err);
+
+ ASSERT_EQ(log1.nodes().size(), log2.nodes().size());
+ for (int i = 0; i < (int)log1.nodes().size(); ++i) {
+ Node* node1 = log1.nodes()[i];
+ Node* node2 = log2.nodes()[i];
+ ASSERT_EQ(i, node1->id());
+ ASSERT_EQ(node1->id(), node2->id());
+ }
+
+ // Spot-check the entries in log2.
+ DepsLog::Deps* log_deps = log2.GetDeps(state2.GetNode("out2.o", 0));
+ ASSERT_TRUE(log_deps);
+ ASSERT_EQ(2, log_deps->mtime);
+ ASSERT_EQ(2, log_deps->node_count);
+ ASSERT_EQ("foo.h", log_deps->nodes[0]->path());
+ ASSERT_EQ("bar2.h", log_deps->nodes[1]->path());
+}
+
+TEST_F(DepsLogTest, LotsOfDeps) {
+ const int kNumDeps = 100000; // More than 64k.
+
+ State state1;
+ DepsLog log1;
+ string err;
+ EXPECT_TRUE(log1.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ {
+ vector<Node*> deps;
+ for (int i = 0; i < kNumDeps; ++i) {
+ char buf[32];
+ sprintf(buf, "file%d.h", i);
+ deps.push_back(state1.GetNode(buf, 0));
+ }
+ log1.RecordDeps(state1.GetNode("out.o", 0), 1, deps);
+
+ DepsLog::Deps* log_deps = log1.GetDeps(state1.GetNode("out.o", 0));
+ ASSERT_EQ(kNumDeps, log_deps->node_count);
+ }
+
+ log1.Close();
+
+ State state2;
+ DepsLog log2;
+ EXPECT_TRUE(log2.Load(kTestFilename, &state2, &err));
+ ASSERT_EQ("", err);
+
+ DepsLog::Deps* log_deps = log2.GetDeps(state2.GetNode("out.o", 0));
+ ASSERT_EQ(kNumDeps, log_deps->node_count);
+}
+
+// Verify that adding the same deps twice doesn't grow the file.
+TEST_F(DepsLogTest, DoubleEntry) {
+ // Write some deps to the file and grab its size.
+ int file_size;
+ {
+ State state;
+ DepsLog log;
+ string err;
+ EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ vector<Node*> deps;
+ deps.push_back(state.GetNode("foo.h", 0));
+ deps.push_back(state.GetNode("bar.h", 0));
+ log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
+ log.Close();
+
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+ file_size = (int)st.st_size;
+ ASSERT_GT(file_size, 0);
+ }
+
+ // Now reload the file, and read the same deps.
+ {
+ State state;
+ DepsLog log;
+ string err;
+ EXPECT_TRUE(log.Load(kTestFilename, &state, &err));
+
+ EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ vector<Node*> deps;
+ deps.push_back(state.GetNode("foo.h", 0));
+ deps.push_back(state.GetNode("bar.h", 0));
+ log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
+ log.Close();
+
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+ int file_size_2 = (int)st.st_size;
+ ASSERT_EQ(file_size, file_size_2);
+ }
+}
+
+// Verify that adding the new deps works and can be compacted away.
+TEST_F(DepsLogTest, Recompact) {
+ const char kManifest[] =
+"rule cc\n"
+" command = cc\n"
+" deps = gcc\n"
+"build out.o: cc\n"
+"build other_out.o: cc\n";
+
+ // Write some deps to the file and grab its size.
+ int file_size;
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, kManifest));
+ DepsLog log;
+ string err;
+ ASSERT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ vector<Node*> deps;
+ deps.push_back(state.GetNode("foo.h", 0));
+ deps.push_back(state.GetNode("bar.h", 0));
+ log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
+
+ deps.clear();
+ deps.push_back(state.GetNode("foo.h", 0));
+ deps.push_back(state.GetNode("baz.h", 0));
+ log.RecordDeps(state.GetNode("other_out.o", 0), 1, deps);
+
+ log.Close();
+
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+ file_size = (int)st.st_size;
+ ASSERT_GT(file_size, 0);
+ }
+
+ // Now reload the file, and add slightly different deps.
+ int file_size_2;
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, kManifest));
+ DepsLog log;
+ string err;
+ ASSERT_TRUE(log.Load(kTestFilename, &state, &err));
+
+ ASSERT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ vector<Node*> deps;
+ deps.push_back(state.GetNode("foo.h", 0));
+ log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
+ log.Close();
+
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+ file_size_2 = (int)st.st_size;
+ // The file should grow to record the new deps.
+ ASSERT_GT(file_size_2, file_size);
+ }
+
+ // Now reload the file, verify the new deps have replaced the old, then
+ // recompact.
+ int file_size_3;
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, kManifest));
+ DepsLog log;
+ string err;
+ ASSERT_TRUE(log.Load(kTestFilename, &state, &err));
+
+ Node* out = state.GetNode("out.o", 0);
+ DepsLog::Deps* deps = log.GetDeps(out);
+ ASSERT_TRUE(deps);
+ ASSERT_EQ(1, deps->mtime);
+ ASSERT_EQ(1, deps->node_count);
+ ASSERT_EQ("foo.h", deps->nodes[0]->path());
+
+ Node* other_out = state.GetNode("other_out.o", 0);
+ deps = log.GetDeps(other_out);
+ ASSERT_TRUE(deps);
+ ASSERT_EQ(1, deps->mtime);
+ ASSERT_EQ(2, deps->node_count);
+ ASSERT_EQ("foo.h", deps->nodes[0]->path());
+ ASSERT_EQ("baz.h", deps->nodes[1]->path());
+
+ ASSERT_TRUE(log.Recompact(kTestFilename, &err));
+
+ // The in-memory deps graph should still be valid after recompaction.
+ deps = log.GetDeps(out);
+ ASSERT_TRUE(deps);
+ ASSERT_EQ(1, deps->mtime);
+ ASSERT_EQ(1, deps->node_count);
+ ASSERT_EQ("foo.h", deps->nodes[0]->path());
+ ASSERT_EQ(out, log.nodes()[out->id()]);
+
+ deps = log.GetDeps(other_out);
+ ASSERT_TRUE(deps);
+ ASSERT_EQ(1, deps->mtime);
+ ASSERT_EQ(2, deps->node_count);
+ ASSERT_EQ("foo.h", deps->nodes[0]->path());
+ ASSERT_EQ("baz.h", deps->nodes[1]->path());
+ ASSERT_EQ(other_out, log.nodes()[other_out->id()]);
+
+ // The file should have shrunk a bit for the smaller deps.
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+ file_size_3 = (int)st.st_size;
+ ASSERT_LT(file_size_3, file_size_2);
+ }
+
+ // Now reload the file and recompact with an empty manifest. The previous
+ // entries should be removed.
+ {
+ State state;
+ // Intentionally not parsing kManifest here.
+ DepsLog log;
+ string err;
+ ASSERT_TRUE(log.Load(kTestFilename, &state, &err));
+
+ Node* out = state.GetNode("out.o", 0);
+ DepsLog::Deps* deps = log.GetDeps(out);
+ ASSERT_TRUE(deps);
+ ASSERT_EQ(1, deps->mtime);
+ ASSERT_EQ(1, deps->node_count);
+ ASSERT_EQ("foo.h", deps->nodes[0]->path());
+
+ Node* other_out = state.GetNode("other_out.o", 0);
+ deps = log.GetDeps(other_out);
+ ASSERT_TRUE(deps);
+ ASSERT_EQ(1, deps->mtime);
+ ASSERT_EQ(2, deps->node_count);
+ ASSERT_EQ("foo.h", deps->nodes[0]->path());
+ ASSERT_EQ("baz.h", deps->nodes[1]->path());
+
+ ASSERT_TRUE(log.Recompact(kTestFilename, &err));
+
+ // The previous entries should have been removed.
+ deps = log.GetDeps(out);
+ ASSERT_FALSE(deps);
+
+ deps = log.GetDeps(other_out);
+ ASSERT_FALSE(deps);
+
+ // The .h files pulled in via deps should no longer have ids either.
+ ASSERT_EQ(-1, state.LookupNode("foo.h")->id());
+ ASSERT_EQ(-1, state.LookupNode("baz.h")->id());
+
+ // The file should have shrunk more.
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+ int file_size_4 = (int)st.st_size;
+ ASSERT_LT(file_size_4, file_size_3);
+ }
+}
+
+// Verify that invalid file headers cause a new build.
+TEST_F(DepsLogTest, InvalidHeader) {
+ const char *kInvalidHeaders[] = {
+ "", // Empty file.
+ "# ninjad", // Truncated first line.
+ "# ninjadeps\n", // No version int.
+ "# ninjadeps\n\001\002", // Truncated version int.
+ "# ninjadeps\n\001\002\003\004" // Invalid version int.
+ };
+ for (size_t i = 0; i < sizeof(kInvalidHeaders) / sizeof(kInvalidHeaders[0]);
+ ++i) {
+ FILE* deps_log = fopen(kTestFilename, "wb");
+ ASSERT_TRUE(deps_log != NULL);
+ ASSERT_EQ(
+ strlen(kInvalidHeaders[i]),
+ fwrite(kInvalidHeaders[i], 1, strlen(kInvalidHeaders[i]), deps_log));
+ ASSERT_EQ(0 ,fclose(deps_log));
+
+ string err;
+ DepsLog log;
+ State state;
+ ASSERT_TRUE(log.Load(kTestFilename, &state, &err));
+ EXPECT_EQ("bad deps log signature or version; starting over", err);
+ }
+}
+
+// Simulate what happens when loading a truncated log file.
+TEST_F(DepsLogTest, Truncated) {
+ // Create a file with some entries.
+ {
+ State state;
+ DepsLog log;
+ string err;
+ EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ vector<Node*> deps;
+ deps.push_back(state.GetNode("foo.h", 0));
+ deps.push_back(state.GetNode("bar.h", 0));
+ log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
+
+ deps.clear();
+ deps.push_back(state.GetNode("foo.h", 0));
+ deps.push_back(state.GetNode("bar2.h", 0));
+ log.RecordDeps(state.GetNode("out2.o", 0), 2, deps);
+
+ log.Close();
+ }
+
+ // Get the file size.
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+
+ // Try reloading at truncated sizes.
+ // Track how many nodes/deps were found; they should decrease with
+ // smaller sizes.
+ int node_count = 5;
+ int deps_count = 2;
+ for (int size = (int)st.st_size; size > 0; --size) {
+ string err;
+ ASSERT_TRUE(Truncate(kTestFilename, size, &err));
+
+ State state;
+ DepsLog log;
+ EXPECT_TRUE(log.Load(kTestFilename, &state, &err));
+ if (!err.empty()) {
+ // At some point the log will be so short as to be unparseable.
+ break;
+ }
+
+ ASSERT_GE(node_count, (int)log.nodes().size());
+ node_count = log.nodes().size();
+
+ // Count how many non-NULL deps entries there are.
+ int new_deps_count = 0;
+ for (vector<DepsLog::Deps*>::const_iterator i = log.deps().begin();
+ i != log.deps().end(); ++i) {
+ if (*i)
+ ++new_deps_count;
+ }
+ ASSERT_GE(deps_count, new_deps_count);
+ deps_count = new_deps_count;
+ }
+}
+
+// Run the truncation-recovery logic.
+TEST_F(DepsLogTest, TruncatedRecovery) {
+ // Create a file with some entries.
+ {
+ State state;
+ DepsLog log;
+ string err;
+ EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ vector<Node*> deps;
+ deps.push_back(state.GetNode("foo.h", 0));
+ deps.push_back(state.GetNode("bar.h", 0));
+ log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
+
+ deps.clear();
+ deps.push_back(state.GetNode("foo.h", 0));
+ deps.push_back(state.GetNode("bar2.h", 0));
+ log.RecordDeps(state.GetNode("out2.o", 0), 2, deps);
+
+ log.Close();
+ }
+
+ // Shorten the file, corrupting the last record.
+ {
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+ string err;
+ ASSERT_TRUE(Truncate(kTestFilename, st.st_size - 2, &err));
+ }
+
+ // Load the file again, add an entry.
+ {
+ State state;
+ DepsLog log;
+ string err;
+ EXPECT_TRUE(log.Load(kTestFilename, &state, &err));
+ ASSERT_EQ("premature end of file; recovering", err);
+ err.clear();
+
+ // The truncated entry should've been discarded.
+ EXPECT_EQ(NULL, log.GetDeps(state.GetNode("out2.o", 0)));
+
+ EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ // Add a new entry.
+ vector<Node*> deps;
+ deps.push_back(state.GetNode("foo.h", 0));
+ deps.push_back(state.GetNode("bar2.h", 0));
+ log.RecordDeps(state.GetNode("out2.o", 0), 3, deps);
+
+ log.Close();
+ }
+
+ // Load the file a third time to verify appending after a mangled
+ // entry doesn't break things.
+ {
+ State state;
+ DepsLog log;
+ string err;
+ EXPECT_TRUE(log.Load(kTestFilename, &state, &err));
+
+ // The truncated entry should exist.
+ DepsLog::Deps* deps = log.GetDeps(state.GetNode("out2.o", 0));
+ ASSERT_TRUE(deps);
+ }
+}
+
+} // anonymous namespace
diff --git a/src/disk_interface.cc b/src/disk_interface.cc
new file mode 100644
index 0000000..49af001
--- /dev/null
+++ b/src/disk_interface.cc
@@ -0,0 +1,287 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "disk_interface.h"
+
+#include <algorithm>
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#ifdef _WIN32
+#include <sstream>
+#include <windows.h>
+#include <direct.h> // _mkdir
+#else
+#include <unistd.h>
+#endif
+
+#include "metrics.h"
+#include "util.h"
+
+using namespace std;
+
+namespace {
+
+string DirName(const string& path) {
+#ifdef _WIN32
+ static const char kPathSeparators[] = "\\/";
+#else
+ static const char kPathSeparators[] = "/";
+#endif
+ static const char* const kEnd = kPathSeparators + sizeof(kPathSeparators) - 1;
+
+ string::size_type slash_pos = path.find_last_of(kPathSeparators);
+ if (slash_pos == string::npos)
+ return string(); // Nothing to do.
+ while (slash_pos > 0 &&
+ std::find(kPathSeparators, kEnd, path[slash_pos - 1]) != kEnd)
+ --slash_pos;
+ return path.substr(0, slash_pos);
+}
+
+int MakeDir(const string& path) {
+#ifdef _WIN32
+ return _mkdir(path.c_str());
+#else
+ return mkdir(path.c_str(), 0777);
+#endif
+}
+
+#ifdef _WIN32
+TimeStamp TimeStampFromFileTime(const FILETIME& filetime) {
+ // FILETIME is in 100-nanosecond increments since the Windows epoch.
+ // We don't much care about epoch correctness but we do want the
+ // resulting value to fit in a 64-bit integer.
+ uint64_t mtime = ((uint64_t)filetime.dwHighDateTime << 32) |
+ ((uint64_t)filetime.dwLowDateTime);
+ // 1600 epoch -> 2000 epoch (subtract 400 years).
+ return (TimeStamp)mtime - 12622770400LL * (1000000000LL / 100);
+}
+
+TimeStamp StatSingleFile(const string& path, string* err) {
+ WIN32_FILE_ATTRIBUTE_DATA attrs;
+ if (!GetFileAttributesExA(path.c_str(), GetFileExInfoStandard, &attrs)) {
+ DWORD win_err = GetLastError();
+ if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND)
+ return 0;
+ *err = "GetFileAttributesEx(" + path + "): " + GetLastErrorString();
+ return -1;
+ }
+ return TimeStampFromFileTime(attrs.ftLastWriteTime);
+}
+
+bool IsWindows7OrLater() {
+ OSVERSIONINFOEX version_info =
+ { sizeof(OSVERSIONINFOEX), 6, 1, 0, 0, {0}, 0, 0, 0, 0, 0};
+ DWORDLONG comparison = 0;
+ VER_SET_CONDITION(comparison, VER_MAJORVERSION, VER_GREATER_EQUAL);
+ VER_SET_CONDITION(comparison, VER_MINORVERSION, VER_GREATER_EQUAL);
+ return VerifyVersionInfo(
+ &version_info, VER_MAJORVERSION | VER_MINORVERSION, comparison);
+}
+
+bool StatAllFilesInDir(const string& dir, map<string, TimeStamp>* stamps,
+ string* err) {
+ // FindExInfoBasic is 30% faster than FindExInfoStandard.
+ static bool can_use_basic_info = IsWindows7OrLater();
+ // This is not in earlier SDKs.
+ const FINDEX_INFO_LEVELS kFindExInfoBasic =
+ static_cast<FINDEX_INFO_LEVELS>(1);
+ FINDEX_INFO_LEVELS level =
+ can_use_basic_info ? kFindExInfoBasic : FindExInfoStandard;
+ WIN32_FIND_DATAA ffd;
+ HANDLE find_handle = FindFirstFileExA((dir + "\\*").c_str(), level, &ffd,
+ FindExSearchNameMatch, NULL, 0);
+
+ if (find_handle == INVALID_HANDLE_VALUE) {
+ DWORD win_err = GetLastError();
+ if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND)
+ return true;
+ *err = "FindFirstFileExA(" + dir + "): " + GetLastErrorString();
+ return false;
+ }
+ do {
+ string lowername = ffd.cFileName;
+ if (lowername == "..") {
+ // Seems to just copy the timestamp for ".." from ".", which is wrong.
+ // This is the case at least on NTFS under Windows 7.
+ continue;
+ }
+ transform(lowername.begin(), lowername.end(), lowername.begin(), ::tolower);
+ stamps->insert(make_pair(lowername,
+ TimeStampFromFileTime(ffd.ftLastWriteTime)));
+ } while (FindNextFileA(find_handle, &ffd));
+ FindClose(find_handle);
+ return true;
+}
+#endif // _WIN32
+
+} // namespace
+
+// DiskInterface ---------------------------------------------------------------
+
+bool DiskInterface::MakeDirs(const string& path) {
+ string dir = DirName(path);
+ if (dir.empty())
+ return true; // Reached root; assume it's there.
+ string err;
+ TimeStamp mtime = Stat(dir, &err);
+ if (mtime < 0) {
+ Error("%s", err.c_str());
+ return false;
+ }
+ if (mtime > 0)
+ return true; // Exists already; we're done.
+
+ // Directory doesn't exist. Try creating its parent first.
+ bool success = MakeDirs(dir);
+ if (!success)
+ return false;
+ return MakeDir(dir);
+}
+
+// RealDiskInterface -----------------------------------------------------------
+
+TimeStamp RealDiskInterface::Stat(const string& path, string* err) const {
+ METRIC_RECORD("node stat");
+#ifdef _WIN32
+ // MSDN: "Naming Files, Paths, and Namespaces"
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
+ if (!path.empty() && path[0] != '\\' && path.size() > MAX_PATH) {
+ ostringstream err_stream;
+ err_stream << "Stat(" << path << "): Filename longer than " << MAX_PATH
+ << " characters";
+ *err = err_stream.str();
+ return -1;
+ }
+ if (!use_cache_)
+ return StatSingleFile(path, err);
+
+ string dir = DirName(path);
+ string base(path.substr(dir.size() ? dir.size() + 1 : 0));
+ if (base == "..") {
+ // StatAllFilesInDir does not report any information for base = "..".
+ base = ".";
+ dir = path;
+ }
+
+ transform(dir.begin(), dir.end(), dir.begin(), ::tolower);
+ transform(base.begin(), base.end(), base.begin(), ::tolower);
+
+ Cache::iterator ci = cache_.find(dir);
+ if (ci == cache_.end()) {
+ ci = cache_.insert(make_pair(dir, DirCache())).first;
+ if (!StatAllFilesInDir(dir.empty() ? "." : dir, &ci->second, err)) {
+ cache_.erase(ci);
+ return -1;
+ }
+ }
+ DirCache::iterator di = ci->second.find(base);
+ return di != ci->second.end() ? di->second : 0;
+#else
+ struct stat st;
+ if (stat(path.c_str(), &st) < 0) {
+ if (errno == ENOENT || errno == ENOTDIR)
+ return 0;
+ *err = "stat(" + path + "): " + strerror(errno);
+ return -1;
+ }
+ // Some users (Flatpak) set mtime to 0, this should be harmless
+ // and avoids conflicting with our return value of 0 meaning
+ // that it doesn't exist.
+ if (st.st_mtime == 0)
+ return 1;
+#if defined(_AIX)
+ return (int64_t)st.st_mtime * 1000000000LL + st.st_mtime_n;
+#elif defined(__APPLE__)
+ return ((int64_t)st.st_mtimespec.tv_sec * 1000000000LL +
+ st.st_mtimespec.tv_nsec);
+#elif defined(st_mtime) // A macro, so we're likely on modern POSIX.
+ return (int64_t)st.st_mtim.tv_sec * 1000000000LL + st.st_mtim.tv_nsec;
+#else
+ return (int64_t)st.st_mtime * 1000000000LL + st.st_mtimensec;
+#endif
+#endif
+}
+
+bool RealDiskInterface::WriteFile(const string& path, const string& contents) {
+ FILE* fp = fopen(path.c_str(), "w");
+ if (fp == NULL) {
+ Error("WriteFile(%s): Unable to create file. %s",
+ path.c_str(), strerror(errno));
+ return false;
+ }
+
+ if (fwrite(contents.data(), 1, contents.length(), fp) < contents.length()) {
+ Error("WriteFile(%s): Unable to write to the file. %s",
+ path.c_str(), strerror(errno));
+ fclose(fp);
+ return false;
+ }
+
+ if (fclose(fp) == EOF) {
+ Error("WriteFile(%s): Unable to close the file. %s",
+ path.c_str(), strerror(errno));
+ return false;
+ }
+
+ return true;
+}
+
+bool RealDiskInterface::MakeDir(const string& path) {
+ if (::MakeDir(path) < 0) {
+ if (errno == EEXIST) {
+ return true;
+ }
+ Error("mkdir(%s): %s", path.c_str(), strerror(errno));
+ return false;
+ }
+ return true;
+}
+
+FileReader::Status RealDiskInterface::ReadFile(const string& path,
+ string* contents,
+ string* err) {
+ switch (::ReadFile(path, contents, err)) {
+ case 0: return Okay;
+ case -ENOENT: return NotFound;
+ default: return OtherError;
+ }
+}
+
+int RealDiskInterface::RemoveFile(const string& path) {
+ if (remove(path.c_str()) < 0) {
+ switch (errno) {
+ case ENOENT:
+ return 1;
+ default:
+ Error("remove(%s): %s", path.c_str(), strerror(errno));
+ return -1;
+ }
+ } else {
+ return 0;
+ }
+}
+
+void RealDiskInterface::AllowStatCache(bool allow) {
+#ifdef _WIN32
+ use_cache_ = allow;
+ if (!use_cache_)
+ cache_.clear();
+#endif
+}
diff --git a/src/disk_interface.h b/src/disk_interface.h
new file mode 100644
index 0000000..bc29ab7
--- /dev/null
+++ b/src/disk_interface.h
@@ -0,0 +1,101 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_DISK_INTERFACE_H_
+#define NINJA_DISK_INTERFACE_H_
+
+#include <map>
+#include <string>
+
+#include "timestamp.h"
+
+/// Interface for reading files from disk. See DiskInterface for details.
+/// This base offers the minimum interface needed just to read files.
+struct FileReader {
+ virtual ~FileReader() {}
+
+ /// Result of ReadFile.
+ enum Status {
+ Okay,
+ NotFound,
+ OtherError
+ };
+
+ /// Read and store in given string. On success, return Okay.
+ /// On error, return another Status and fill |err|.
+ virtual Status ReadFile(const std::string& path, std::string* contents,
+ std::string* err) = 0;
+};
+
+/// Interface for accessing the disk.
+///
+/// Abstract so it can be mocked out for tests. The real implementation
+/// is RealDiskInterface.
+struct DiskInterface: public FileReader {
+ /// stat() a file, returning the mtime, or 0 if missing and -1 on
+ /// other errors.
+ virtual TimeStamp Stat(const std::string& path, std::string* err) const = 0;
+
+ /// Create a directory, returning false on failure.
+ virtual bool MakeDir(const std::string& path) = 0;
+
+ /// Create a file, with the specified name and contents
+ /// Returns true on success, false on failure
+ virtual bool WriteFile(const std::string& path,
+ const std::string& contents) = 0;
+
+ /// Remove the file named @a path. It behaves like 'rm -f path' so no errors
+ /// are reported if it does not exists.
+ /// @returns 0 if the file has been removed,
+ /// 1 if the file does not exist, and
+ /// -1 if an error occurs.
+ virtual int RemoveFile(const std::string& path) = 0;
+
+ /// Create all the parent directories for path; like mkdir -p
+ /// `basename path`.
+ bool MakeDirs(const std::string& path);
+};
+
+/// Implementation of DiskInterface that actually hits the disk.
+struct RealDiskInterface : public DiskInterface {
+ RealDiskInterface()
+#ifdef _WIN32
+ : use_cache_(false)
+#endif
+ {}
+ virtual ~RealDiskInterface() {}
+ virtual TimeStamp Stat(const std::string& path, std::string* err) const;
+ virtual bool MakeDir(const std::string& path);
+ virtual bool WriteFile(const std::string& path, const std::string& contents);
+ virtual Status ReadFile(const std::string& path, std::string* contents,
+ std::string* err);
+ virtual int RemoveFile(const std::string& path);
+
+ /// Whether stat information can be cached. Only has an effect on Windows.
+ void AllowStatCache(bool allow);
+
+ private:
+#ifdef _WIN32
+ /// Whether stat information can be cached.
+ bool use_cache_;
+
+ typedef std::map<std::string, TimeStamp> DirCache;
+ // TODO: Neither a map nor a hashmap seems ideal here. If the statcache
+ // works out, come up with a better data structure.
+ typedef std::map<std::string, DirCache> Cache;
+ mutable Cache cache_;
+#endif
+};
+
+#endif // NINJA_DISK_INTERFACE_H_
diff --git a/src/disk_interface_test.cc b/src/disk_interface_test.cc
new file mode 100644
index 0000000..066c770
--- /dev/null
+++ b/src/disk_interface_test.cc
@@ -0,0 +1,324 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <assert.h>
+#include <stdio.h>
+#ifdef _WIN32
+#include <io.h>
+#include <windows.h>
+#endif
+
+#include "disk_interface.h"
+#include "graph.h"
+#include "test.h"
+
+using namespace std;
+
+namespace {
+
+struct DiskInterfaceTest : public testing::Test {
+ virtual void SetUp() {
+ // These tests do real disk accesses, so create a temp dir.
+ temp_dir_.CreateAndEnter("Ninja-DiskInterfaceTest");
+ }
+
+ virtual void TearDown() {
+ temp_dir_.Cleanup();
+ }
+
+ bool Touch(const char* path) {
+ FILE *f = fopen(path, "w");
+ if (!f)
+ return false;
+ return fclose(f) == 0;
+ }
+
+ ScopedTempDir temp_dir_;
+ RealDiskInterface disk_;
+};
+
+TEST_F(DiskInterfaceTest, StatMissingFile) {
+ string err;
+ EXPECT_EQ(0, disk_.Stat("nosuchfile", &err));
+ EXPECT_EQ("", err);
+
+ // On Windows, the errno for a file in a nonexistent directory
+ // is different.
+ EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile", &err));
+ EXPECT_EQ("", err);
+
+ // On POSIX systems, the errno is different if a component of the
+ // path prefix is not a directory.
+ ASSERT_TRUE(Touch("notadir"));
+ EXPECT_EQ(0, disk_.Stat("notadir/nosuchfile", &err));
+ EXPECT_EQ("", err);
+}
+
+TEST_F(DiskInterfaceTest, StatBadPath) {
+ string err;
+#ifdef _WIN32
+ string bad_path("cc:\\foo");
+ EXPECT_EQ(-1, disk_.Stat(bad_path, &err));
+ EXPECT_NE("", err);
+#else
+ string too_long_name(512, 'x');
+ EXPECT_EQ(-1, disk_.Stat(too_long_name, &err));
+ EXPECT_NE("", err);
+#endif
+}
+
+TEST_F(DiskInterfaceTest, StatExistingFile) {
+ string err;
+ ASSERT_TRUE(Touch("file"));
+ EXPECT_GT(disk_.Stat("file", &err), 1);
+ EXPECT_EQ("", err);
+}
+
+TEST_F(DiskInterfaceTest, StatExistingDir) {
+ string err;
+ ASSERT_TRUE(disk_.MakeDir("subdir"));
+ ASSERT_TRUE(disk_.MakeDir("subdir/subsubdir"));
+ EXPECT_GT(disk_.Stat("..", &err), 1);
+ EXPECT_EQ("", err);
+ EXPECT_GT(disk_.Stat(".", &err), 1);
+ EXPECT_EQ("", err);
+ EXPECT_GT(disk_.Stat("subdir", &err), 1);
+ EXPECT_EQ("", err);
+ EXPECT_GT(disk_.Stat("subdir/subsubdir", &err), 1);
+ EXPECT_EQ("", err);
+
+ EXPECT_EQ(disk_.Stat("subdir", &err),
+ disk_.Stat("subdir/.", &err));
+ EXPECT_EQ(disk_.Stat("subdir", &err),
+ disk_.Stat("subdir/subsubdir/..", &err));
+ EXPECT_EQ(disk_.Stat("subdir/subsubdir", &err),
+ disk_.Stat("subdir/subsubdir/.", &err));
+}
+
+#ifdef _WIN32
+TEST_F(DiskInterfaceTest, StatCache) {
+ string err;
+
+ ASSERT_TRUE(Touch("file1"));
+ ASSERT_TRUE(Touch("fiLE2"));
+ ASSERT_TRUE(disk_.MakeDir("subdir"));
+ ASSERT_TRUE(disk_.MakeDir("subdir/subsubdir"));
+ ASSERT_TRUE(Touch("subdir\\subfile1"));
+ ASSERT_TRUE(Touch("subdir\\SUBFILE2"));
+ ASSERT_TRUE(Touch("subdir\\SUBFILE3"));
+
+ disk_.AllowStatCache(false);
+ TimeStamp parent_stat_uncached = disk_.Stat("..", &err);
+ disk_.AllowStatCache(true);
+
+ EXPECT_GT(disk_.Stat("FIle1", &err), 1);
+ EXPECT_EQ("", err);
+ EXPECT_GT(disk_.Stat("file1", &err), 1);
+ EXPECT_EQ("", err);
+
+ EXPECT_GT(disk_.Stat("subdir/subfile2", &err), 1);
+ EXPECT_EQ("", err);
+ EXPECT_GT(disk_.Stat("sUbdir\\suBFile1", &err), 1);
+ EXPECT_EQ("", err);
+
+ EXPECT_GT(disk_.Stat("..", &err), 1);
+ EXPECT_EQ("", err);
+ EXPECT_GT(disk_.Stat(".", &err), 1);
+ EXPECT_EQ("", err);
+ EXPECT_GT(disk_.Stat("subdir", &err), 1);
+ EXPECT_EQ("", err);
+ EXPECT_GT(disk_.Stat("subdir/subsubdir", &err), 1);
+ EXPECT_EQ("", err);
+
+#ifndef _MSC_VER // TODO: Investigate why. Also see https://github.com/ninja-build/ninja/pull/1423
+ EXPECT_EQ(disk_.Stat("subdir", &err),
+ disk_.Stat("subdir/.", &err));
+ EXPECT_EQ("", err);
+ EXPECT_EQ(disk_.Stat("subdir", &err),
+ disk_.Stat("subdir/subsubdir/..", &err));
+#endif
+ EXPECT_EQ("", err);
+ EXPECT_EQ(disk_.Stat("..", &err), parent_stat_uncached);
+ EXPECT_EQ("", err);
+ EXPECT_EQ(disk_.Stat("subdir/subsubdir", &err),
+ disk_.Stat("subdir/subsubdir/.", &err));
+ EXPECT_EQ("", err);
+
+ // Test error cases.
+ string bad_path("cc:\\foo");
+ EXPECT_EQ(-1, disk_.Stat(bad_path, &err));
+ EXPECT_NE("", err); err.clear();
+ EXPECT_EQ(-1, disk_.Stat(bad_path, &err));
+ EXPECT_NE("", err); err.clear();
+ EXPECT_EQ(0, disk_.Stat("nosuchfile", &err));
+ EXPECT_EQ("", err);
+ EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile", &err));
+ EXPECT_EQ("", err);
+}
+#endif
+
+TEST_F(DiskInterfaceTest, ReadFile) {
+ string err;
+ std::string content;
+ ASSERT_EQ(DiskInterface::NotFound,
+ disk_.ReadFile("foobar", &content, &err));
+ EXPECT_EQ("", content);
+ EXPECT_NE("", err); // actual value is platform-specific
+ err.clear();
+
+ const char* kTestFile = "testfile";
+ FILE* f = fopen(kTestFile, "wb");
+ ASSERT_TRUE(f);
+ const char* kTestContent = "test content\nok";
+ fprintf(f, "%s", kTestContent);
+ ASSERT_EQ(0, fclose(f));
+
+ ASSERT_EQ(DiskInterface::Okay,
+ disk_.ReadFile(kTestFile, &content, &err));
+ EXPECT_EQ(kTestContent, content);
+ EXPECT_EQ("", err);
+}
+
+TEST_F(DiskInterfaceTest, MakeDirs) {
+ string path = "path/with/double//slash/";
+ EXPECT_TRUE(disk_.MakeDirs(path));
+ FILE* f = fopen((path + "a_file").c_str(), "w");
+ EXPECT_TRUE(f);
+ EXPECT_EQ(0, fclose(f));
+#ifdef _WIN32
+ string path2 = "another\\with\\back\\\\slashes\\";
+ EXPECT_TRUE(disk_.MakeDirs(path2.c_str()));
+ FILE* f2 = fopen((path2 + "a_file").c_str(), "w");
+ EXPECT_TRUE(f2);
+ EXPECT_EQ(0, fclose(f2));
+#endif
+}
+
+TEST_F(DiskInterfaceTest, RemoveFile) {
+ const char* kFileName = "file-to-remove";
+ ASSERT_TRUE(Touch(kFileName));
+ EXPECT_EQ(0, disk_.RemoveFile(kFileName));
+ EXPECT_EQ(1, disk_.RemoveFile(kFileName));
+ EXPECT_EQ(1, disk_.RemoveFile("does not exist"));
+}
+
+struct StatTest : public StateTestWithBuiltinRules,
+ public DiskInterface {
+ StatTest() : scan_(&state_, NULL, NULL, this, NULL) {}
+
+ // DiskInterface implementation.
+ virtual TimeStamp Stat(const string& path, string* err) const;
+ virtual bool WriteFile(const string& path, const string& contents) {
+ assert(false);
+ return true;
+ }
+ virtual bool MakeDir(const string& path) {
+ assert(false);
+ return false;
+ }
+ virtual Status ReadFile(const string& path, string* contents, string* err) {
+ assert(false);
+ return NotFound;
+ }
+ virtual int RemoveFile(const string& path) {
+ assert(false);
+ return 0;
+ }
+
+ DependencyScan scan_;
+ map<string, TimeStamp> mtimes_;
+ mutable vector<string> stats_;
+};
+
+TimeStamp StatTest::Stat(const string& path, string* err) const {
+ stats_.push_back(path);
+ map<string, TimeStamp>::const_iterator i = mtimes_.find(path);
+ if (i == mtimes_.end())
+ return 0; // File not found.
+ return i->second;
+}
+
+TEST_F(StatTest, Simple) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat in\n"));
+
+ Node* out = GetNode("out");
+ string err;
+ EXPECT_TRUE(out->Stat(this, &err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, stats_.size());
+ scan_.RecomputeDirty(out, NULL);
+ ASSERT_EQ(2u, stats_.size());
+ ASSERT_EQ("out", stats_[0]);
+ ASSERT_EQ("in", stats_[1]);
+}
+
+TEST_F(StatTest, TwoStep) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat mid\n"
+"build mid: cat in\n"));
+
+ Node* out = GetNode("out");
+ string err;
+ EXPECT_TRUE(out->Stat(this, &err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, stats_.size());
+ scan_.RecomputeDirty(out, NULL);
+ ASSERT_EQ(3u, stats_.size());
+ ASSERT_EQ("out", stats_[0]);
+ ASSERT_TRUE(GetNode("out")->dirty());
+ ASSERT_EQ("mid", stats_[1]);
+ ASSERT_TRUE(GetNode("mid")->dirty());
+ ASSERT_EQ("in", stats_[2]);
+}
+
+TEST_F(StatTest, Tree) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat mid1 mid2\n"
+"build mid1: cat in11 in12\n"
+"build mid2: cat in21 in22\n"));
+
+ Node* out = GetNode("out");
+ string err;
+ EXPECT_TRUE(out->Stat(this, &err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, stats_.size());
+ scan_.RecomputeDirty(out, NULL);
+ ASSERT_EQ(1u + 6u, stats_.size());
+ ASSERT_EQ("mid1", stats_[1]);
+ ASSERT_TRUE(GetNode("mid1")->dirty());
+ ASSERT_EQ("in11", stats_[2]);
+}
+
+TEST_F(StatTest, Middle) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat mid\n"
+"build mid: cat in\n"));
+
+ mtimes_["in"] = 1;
+ mtimes_["mid"] = 0; // missing
+ mtimes_["out"] = 1;
+
+ Node* out = GetNode("out");
+ string err;
+ EXPECT_TRUE(out->Stat(this, &err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, stats_.size());
+ scan_.RecomputeDirty(out, NULL);
+ ASSERT_FALSE(GetNode("in")->dirty());
+ ASSERT_TRUE(GetNode("mid")->dirty());
+ ASSERT_TRUE(GetNode("out")->dirty());
+}
+
+} // namespace
diff --git a/src/dyndep.cc b/src/dyndep.cc
new file mode 100644
index 0000000..b388e9b
--- /dev/null
+++ b/src/dyndep.cc
@@ -0,0 +1,126 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dyndep.h"
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "debug_flags.h"
+#include "disk_interface.h"
+#include "dyndep_parser.h"
+#include "graph.h"
+#include "state.h"
+#include "util.h"
+
+using namespace std;
+
+bool DyndepLoader::LoadDyndeps(Node* node, std::string* err) const {
+ DyndepFile ddf;
+ return LoadDyndeps(node, &ddf, err);
+}
+
+bool DyndepLoader::LoadDyndeps(Node* node, DyndepFile* ddf,
+ std::string* err) const {
+ // We are loading the dyndep file now so it is no longer pending.
+ node->set_dyndep_pending(false);
+
+ // Load the dyndep information from the file.
+ EXPLAIN("loading dyndep file '%s'", node->path().c_str());
+ if (!LoadDyndepFile(node, ddf, err))
+ return false;
+
+ // Update each edge that specified this node as its dyndep binding.
+ std::vector<Edge*> const& out_edges = node->out_edges();
+ for (std::vector<Edge*>::const_iterator oe = out_edges.begin();
+ oe != out_edges.end(); ++oe) {
+ Edge* const edge = *oe;
+ if (edge->dyndep_ != node)
+ continue;
+
+ DyndepFile::iterator ddi = ddf->find(edge);
+ if (ddi == ddf->end()) {
+ *err = ("'" + edge->outputs_[0]->path() + "' "
+ "not mentioned in its dyndep file "
+ "'" + node->path() + "'");
+ return false;
+ }
+
+ ddi->second.used_ = true;
+ Dyndeps const& dyndeps = ddi->second;
+ if (!UpdateEdge(edge, &dyndeps, err)) {
+ return false;
+ }
+ }
+
+ // Reject extra outputs in dyndep file.
+ for (DyndepFile::const_iterator oe = ddf->begin(); oe != ddf->end();
+ ++oe) {
+ if (!oe->second.used_) {
+ Edge* const edge = oe->first;
+ *err = ("dyndep file '" + node->path() + "' mentions output "
+ "'" + edge->outputs_[0]->path() + "' whose build statement "
+ "does not have a dyndep binding for the file");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool DyndepLoader::UpdateEdge(Edge* edge, Dyndeps const* dyndeps,
+ std::string* err) const {
+ // Add dyndep-discovered bindings to the edge.
+ // We know the edge already has its own binding
+ // scope because it has a "dyndep" binding.
+ if (dyndeps->restat_)
+ edge->env_->AddBinding("restat", "1");
+
+ // Add the dyndep-discovered outputs to the edge.
+ edge->outputs_.insert(edge->outputs_.end(),
+ dyndeps->implicit_outputs_.begin(),
+ dyndeps->implicit_outputs_.end());
+ edge->implicit_outs_ += dyndeps->implicit_outputs_.size();
+
+ // Add this edge as incoming to each new output.
+ for (std::vector<Node*>::const_iterator i =
+ dyndeps->implicit_outputs_.begin();
+ i != dyndeps->implicit_outputs_.end(); ++i) {
+ if ((*i)->in_edge() != NULL) {
+ *err = "multiple rules generate " + (*i)->path();
+ return false;
+ }
+ (*i)->set_in_edge(edge);
+ }
+
+ // Add the dyndep-discovered inputs to the edge.
+ edge->inputs_.insert(edge->inputs_.end() - edge->order_only_deps_,
+ dyndeps->implicit_inputs_.begin(),
+ dyndeps->implicit_inputs_.end());
+ edge->implicit_deps_ += dyndeps->implicit_inputs_.size();
+
+ // Add this edge as outgoing from each new input.
+ for (std::vector<Node*>::const_iterator i =
+ dyndeps->implicit_inputs_.begin();
+ i != dyndeps->implicit_inputs_.end(); ++i)
+ (*i)->AddOutEdge(edge);
+
+ return true;
+}
+
+bool DyndepLoader::LoadDyndepFile(Node* file, DyndepFile* ddf,
+ std::string* err) const {
+ DyndepParser parser(state_, disk_interface_, ddf);
+ return parser.Load(file->path(), err);
+}
diff --git a/src/dyndep.h b/src/dyndep.h
new file mode 100644
index 0000000..907f921
--- /dev/null
+++ b/src/dyndep.h
@@ -0,0 +1,64 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_DYNDEP_LOADER_H_
+#define NINJA_DYNDEP_LOADER_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+struct DiskInterface;
+struct Edge;
+struct Node;
+struct State;
+
+/// Store dynamically-discovered dependency information for one edge.
+struct Dyndeps {
+ Dyndeps() : used_(false), restat_(false) {}
+ bool used_;
+ bool restat_;
+ std::vector<Node*> implicit_inputs_;
+ std::vector<Node*> implicit_outputs_;
+};
+
+/// Store data loaded from one dyndep file. Map from an edge
+/// to its dynamically-discovered dependency information.
+/// This is a struct rather than a typedef so that we can
+/// forward-declare it in other headers.
+struct DyndepFile: public std::map<Edge*, Dyndeps> {};
+
+/// DyndepLoader loads dynamically discovered dependencies, as
+/// referenced via the "dyndep" attribute in build files.
+struct DyndepLoader {
+ DyndepLoader(State* state, DiskInterface* disk_interface)
+ : state_(state), disk_interface_(disk_interface) {}
+
+ /// Load a dyndep file from the given node's path and update the
+ /// build graph with the new information. One overload accepts
+ /// a caller-owned 'DyndepFile' object in which to store the
+ /// information loaded from the dyndep file.
+ bool LoadDyndeps(Node* node, std::string* err) const;
+ bool LoadDyndeps(Node* node, DyndepFile* ddf, std::string* err) const;
+
+ private:
+ bool LoadDyndepFile(Node* file, DyndepFile* ddf, std::string* err) const;
+
+ bool UpdateEdge(Edge* edge, Dyndeps const* dyndeps, std::string* err) const;
+
+ State* state_;
+ DiskInterface* disk_interface_;
+};
+
+#endif // NINJA_DYNDEP_LOADER_H_
diff --git a/src/dyndep_parser.cc b/src/dyndep_parser.cc
new file mode 100644
index 0000000..56da16f
--- /dev/null
+++ b/src/dyndep_parser.cc
@@ -0,0 +1,225 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dyndep_parser.h"
+
+#include <vector>
+
+#include "dyndep.h"
+#include "graph.h"
+#include "state.h"
+#include "util.h"
+#include "version.h"
+
+using namespace std;
+
+DyndepParser::DyndepParser(State* state, FileReader* file_reader,
+ DyndepFile* dyndep_file)
+ : Parser(state, file_reader)
+ , dyndep_file_(dyndep_file) {
+}
+
+bool DyndepParser::Parse(const string& filename, const string& input,
+ string* err) {
+ lexer_.Start(filename, input);
+
+ // Require a supported ninja_dyndep_version value immediately so
+ // we can exit before encountering any syntactic surprises.
+ bool haveDyndepVersion = false;
+
+ for (;;) {
+ Lexer::Token token = lexer_.ReadToken();
+ switch (token) {
+ case Lexer::BUILD: {
+ if (!haveDyndepVersion)
+ return lexer_.Error("expected 'ninja_dyndep_version = ...'", err);
+ if (!ParseEdge(err))
+ return false;
+ break;
+ }
+ case Lexer::IDENT: {
+ lexer_.UnreadToken();
+ if (haveDyndepVersion)
+ return lexer_.Error(string("unexpected ") + Lexer::TokenName(token),
+ err);
+ if (!ParseDyndepVersion(err))
+ return false;
+ haveDyndepVersion = true;
+ break;
+ }
+ case Lexer::ERROR:
+ return lexer_.Error(lexer_.DescribeLastError(), err);
+ case Lexer::TEOF:
+ if (!haveDyndepVersion)
+ return lexer_.Error("expected 'ninja_dyndep_version = ...'", err);
+ return true;
+ case Lexer::NEWLINE:
+ break;
+ default:
+ return lexer_.Error(string("unexpected ") + Lexer::TokenName(token),
+ err);
+ }
+ }
+ return false; // not reached
+}
+
+bool DyndepParser::ParseDyndepVersion(string* err) {
+ string name;
+ EvalString let_value;
+ if (!ParseLet(&name, &let_value, err))
+ return false;
+ if (name != "ninja_dyndep_version") {
+ return lexer_.Error("expected 'ninja_dyndep_version = ...'", err);
+ }
+ string version = let_value.Evaluate(&env_);
+ int major, minor;
+ ParseVersion(version, &major, &minor);
+ if (major != 1 || minor != 0) {
+ return lexer_.Error(
+ string("unsupported 'ninja_dyndep_version = ") + version + "'", err);
+ return false;
+ }
+ return true;
+}
+
+bool DyndepParser::ParseLet(string* key, EvalString* value, string* err) {
+ if (!lexer_.ReadIdent(key))
+ return lexer_.Error("expected variable name", err);
+ if (!ExpectToken(Lexer::EQUALS, err))
+ return false;
+ if (!lexer_.ReadVarValue(value, err))
+ return false;
+ return true;
+}
+
+bool DyndepParser::ParseEdge(string* err) {
+ // Parse one explicit output. We expect it to already have an edge.
+ // We will record its dynamically-discovered dependency information.
+ Dyndeps* dyndeps = NULL;
+ {
+ EvalString out0;
+ if (!lexer_.ReadPath(&out0, err))
+ return false;
+ if (out0.empty())
+ return lexer_.Error("expected path", err);
+
+ string path = out0.Evaluate(&env_);
+ string path_err;
+ uint64_t slash_bits;
+ if (!CanonicalizePath(&path, &slash_bits, &path_err))
+ return lexer_.Error(path_err, err);
+ Node* node = state_->LookupNode(path);
+ if (!node || !node->in_edge())
+ return lexer_.Error("no build statement exists for '" + path + "'", err);
+ Edge* edge = node->in_edge();
+ std::pair<DyndepFile::iterator, bool> res =
+ dyndep_file_->insert(DyndepFile::value_type(edge, Dyndeps()));
+ if (!res.second)
+ return lexer_.Error("multiple statements for '" + path + "'", err);
+ dyndeps = &res.first->second;
+ }
+
+ // Disallow explicit outputs.
+ {
+ EvalString out;
+ if (!lexer_.ReadPath(&out, err))
+ return false;
+ if (!out.empty())
+ return lexer_.Error("explicit outputs not supported", err);
+ }
+
+ // Parse implicit outputs, if any.
+ vector<EvalString> outs;
+ if (lexer_.PeekToken(Lexer::PIPE)) {
+ for (;;) {
+ EvalString out;
+ if (!lexer_.ReadPath(&out, err))
+ return err;
+ if (out.empty())
+ break;
+ outs.push_back(out);
+ }
+ }
+
+ if (!ExpectToken(Lexer::COLON, err))
+ return false;
+
+ string rule_name;
+ if (!lexer_.ReadIdent(&rule_name) || rule_name != "dyndep")
+ return lexer_.Error("expected build command name 'dyndep'", err);
+
+ // Disallow explicit inputs.
+ {
+ EvalString in;
+ if (!lexer_.ReadPath(&in, err))
+ return false;
+ if (!in.empty())
+ return lexer_.Error("explicit inputs not supported", err);
+ }
+
+ // Parse implicit inputs, if any.
+ vector<EvalString> ins;
+ if (lexer_.PeekToken(Lexer::PIPE)) {
+ for (;;) {
+ EvalString in;
+ if (!lexer_.ReadPath(&in, err))
+ return err;
+ if (in.empty())
+ break;
+ ins.push_back(in);
+ }
+ }
+
+ // Disallow order-only inputs.
+ if (lexer_.PeekToken(Lexer::PIPE2))
+ return lexer_.Error("order-only inputs not supported", err);
+
+ if (!ExpectToken(Lexer::NEWLINE, err))
+ return false;
+
+ if (lexer_.PeekToken(Lexer::INDENT)) {
+ string key;
+ EvalString val;
+ if (!ParseLet(&key, &val, err))
+ return false;
+ if (key != "restat")
+ return lexer_.Error("binding is not 'restat'", err);
+ string value = val.Evaluate(&env_);
+ dyndeps->restat_ = !value.empty();
+ }
+
+ dyndeps->implicit_inputs_.reserve(ins.size());
+ for (vector<EvalString>::iterator i = ins.begin(); i != ins.end(); ++i) {
+ string path = i->Evaluate(&env_);
+ string path_err;
+ uint64_t slash_bits;
+ if (!CanonicalizePath(&path, &slash_bits, &path_err))
+ return lexer_.Error(path_err, err);
+ Node* n = state_->GetNode(path, slash_bits);
+ dyndeps->implicit_inputs_.push_back(n);
+ }
+
+ dyndeps->implicit_outputs_.reserve(outs.size());
+ for (vector<EvalString>::iterator i = outs.begin(); i != outs.end(); ++i) {
+ string path = i->Evaluate(&env_);
+ string path_err;
+ uint64_t slash_bits;
+ if (!CanonicalizePath(&path, &slash_bits, &path_err))
+ return lexer_.Error(path_err, err);
+ Node* n = state_->GetNode(path, slash_bits);
+ dyndeps->implicit_outputs_.push_back(n);
+ }
+
+ return true;
+}
diff --git a/src/dyndep_parser.h b/src/dyndep_parser.h
new file mode 100644
index 0000000..8f4c28d
--- /dev/null
+++ b/src/dyndep_parser.h
@@ -0,0 +1,47 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_DYNDEP_PARSER_H_
+#define NINJA_DYNDEP_PARSER_H_
+
+#include "eval_env.h"
+#include "parser.h"
+
+struct DyndepFile;
+struct EvalString;
+
+/// Parses dyndep files.
+struct DyndepParser: public Parser {
+ DyndepParser(State* state, FileReader* file_reader,
+ DyndepFile* dyndep_file);
+
+ /// Parse a text string of input. Used by tests.
+ bool ParseTest(const std::string& input, std::string* err) {
+ return Parse("input", input, err);
+ }
+
+private:
+ /// Parse a file, given its contents as a string.
+ bool Parse(const std::string& filename, const std::string& input,
+ std:: string* err);
+
+ bool ParseDyndepVersion(std::string* err);
+ bool ParseLet(std::string* key, EvalString* val, std::string* err);
+ bool ParseEdge(std::string* err);
+
+ DyndepFile* dyndep_file_;
+ BindingEnv env_;
+};
+
+#endif // NINJA_DYNDEP_PARSER_H_
diff --git a/src/dyndep_parser_test.cc b/src/dyndep_parser_test.cc
new file mode 100644
index 0000000..1bba7ba
--- /dev/null
+++ b/src/dyndep_parser_test.cc
@@ -0,0 +1,514 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dyndep_parser.h"
+
+#include <map>
+#include <vector>
+
+#include "dyndep.h"
+#include "graph.h"
+#include "state.h"
+#include "test.h"
+
+using namespace std;
+
+struct DyndepParserTest : public testing::Test {
+ void AssertParse(const char* input) {
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_TRUE(parser.ParseTest(input, &err));
+ ASSERT_EQ("", err);
+ }
+
+ virtual void SetUp() {
+ ::AssertParse(&state_,
+"rule touch\n"
+" command = touch $out\n"
+"build out otherout: touch\n");
+ }
+
+ State state_;
+ VirtualFileSystem fs_;
+ DyndepFile dyndep_file_;
+};
+
+TEST_F(DyndepParserTest, Empty) {
+ const char kInput[] =
+"";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:1: expected 'ninja_dyndep_version = ...'\n", err);
+}
+
+TEST_F(DyndepParserTest, Version1) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1\n"));
+}
+
+TEST_F(DyndepParserTest, Version1Extra) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1-extra\n"));
+}
+
+TEST_F(DyndepParserTest, Version1_0) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1.0\n"));
+}
+
+TEST_F(DyndepParserTest, Version1_0Extra) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1.0-extra\n"));
+}
+
+TEST_F(DyndepParserTest, CommentVersion) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"# comment\n"
+"ninja_dyndep_version = 1\n"));
+}
+
+TEST_F(DyndepParserTest, BlankLineVersion) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"\n"
+"ninja_dyndep_version = 1\n"));
+}
+
+TEST_F(DyndepParserTest, VersionCRLF) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1\r\n"));
+}
+
+TEST_F(DyndepParserTest, CommentVersionCRLF) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"# comment\r\n"
+"ninja_dyndep_version = 1\r\n"));
+}
+
+TEST_F(DyndepParserTest, BlankLineVersionCRLF) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"\r\n"
+"ninja_dyndep_version = 1\r\n"));
+}
+
+TEST_F(DyndepParserTest, VersionUnexpectedEOF) {
+ const char kInput[] =
+"ninja_dyndep_version = 1.0";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:1: unexpected EOF\n"
+ "ninja_dyndep_version = 1.0\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, UnsupportedVersion0) {
+ const char kInput[] =
+"ninja_dyndep_version = 0\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:1: unsupported 'ninja_dyndep_version = 0'\n"
+ "ninja_dyndep_version = 0\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, UnsupportedVersion1_1) {
+ const char kInput[] =
+"ninja_dyndep_version = 1.1\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:1: unsupported 'ninja_dyndep_version = 1.1'\n"
+ "ninja_dyndep_version = 1.1\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, DuplicateVersion) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"ninja_dyndep_version = 1\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:2: unexpected identifier\n", err);
+}
+
+TEST_F(DyndepParserTest, MissingVersionOtherVar) {
+ const char kInput[] =
+"not_ninja_dyndep_version = 1\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:1: expected 'ninja_dyndep_version = ...'\n"
+ "not_ninja_dyndep_version = 1\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, MissingVersionBuild) {
+ const char kInput[] =
+"build out: dyndep\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:1: expected 'ninja_dyndep_version = ...'\n", err);
+}
+
+TEST_F(DyndepParserTest, UnexpectedEqual) {
+ const char kInput[] =
+"= 1\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:1: unexpected '='\n", err);
+}
+
+TEST_F(DyndepParserTest, UnexpectedIndent) {
+ const char kInput[] =
+" = 1\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:1: unexpected indent\n", err);
+}
+
+TEST_F(DyndepParserTest, OutDuplicate) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"build out: dyndep\n"
+"build out: dyndep\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:3: multiple statements for 'out'\n"
+ "build out: dyndep\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, OutDuplicateThroughOther) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"build out: dyndep\n"
+"build otherout: dyndep\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:3: multiple statements for 'otherout'\n"
+ "build otherout: dyndep\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, NoOutEOF) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"build";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:2: unexpected EOF\n"
+ "build\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, NoOutColon) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"build :\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:2: expected path\n"
+ "build :\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, OutNoStatement) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"build missing: dyndep\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:2: no build statement exists for 'missing'\n"
+ "build missing: dyndep\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, OutEOF) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"build out";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:2: unexpected EOF\n"
+ "build out\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, OutNoRule) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"build out:";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:2: expected build command name 'dyndep'\n"
+ "build out:\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, OutBadRule) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"build out: touch";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:2: expected build command name 'dyndep'\n"
+ "build out: touch\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, BuildEOF) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"build out: dyndep";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:2: unexpected EOF\n"
+ "build out: dyndep\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, ExplicitOut) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"build out exp: dyndep\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:2: explicit outputs not supported\n"
+ "build out exp: dyndep\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, ExplicitIn) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"build out: dyndep exp\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:2: explicit inputs not supported\n"
+ "build out: dyndep exp\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, OrderOnlyIn) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"build out: dyndep ||\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:2: order-only inputs not supported\n"
+ "build out: dyndep ||\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, BadBinding) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"build out: dyndep\n"
+" not_restat = 1\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:3: binding is not 'restat'\n"
+ " not_restat = 1\n"
+ " ^ near here", err);
+}
+
+TEST_F(DyndepParserTest, RestatTwice) {
+ const char kInput[] =
+"ninja_dyndep_version = 1\n"
+"build out: dyndep\n"
+" restat = 1\n"
+" restat = 1\n";
+ DyndepParser parser(&state_, &fs_, &dyndep_file_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:4: unexpected indent\n", err);
+}
+
+TEST_F(DyndepParserTest, NoImplicit) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1\n"
+"build out: dyndep\n"));
+
+ EXPECT_EQ(1u, dyndep_file_.size());
+ DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
+ ASSERT_NE(i, dyndep_file_.end());
+ EXPECT_EQ(false, i->second.restat_);
+ EXPECT_EQ(0u, i->second.implicit_outputs_.size());
+ EXPECT_EQ(0u, i->second.implicit_inputs_.size());
+}
+
+TEST_F(DyndepParserTest, EmptyImplicit) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1\n"
+"build out | : dyndep |\n"));
+
+ EXPECT_EQ(1u, dyndep_file_.size());
+ DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
+ ASSERT_NE(i, dyndep_file_.end());
+ EXPECT_EQ(false, i->second.restat_);
+ EXPECT_EQ(0u, i->second.implicit_outputs_.size());
+ EXPECT_EQ(0u, i->second.implicit_inputs_.size());
+}
+
+TEST_F(DyndepParserTest, ImplicitIn) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1\n"
+"build out: dyndep | impin\n"));
+
+ EXPECT_EQ(1u, dyndep_file_.size());
+ DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
+ ASSERT_NE(i, dyndep_file_.end());
+ EXPECT_EQ(false, i->second.restat_);
+ EXPECT_EQ(0u, i->second.implicit_outputs_.size());
+ ASSERT_EQ(1u, i->second.implicit_inputs_.size());
+ EXPECT_EQ("impin", i->second.implicit_inputs_[0]->path());
+}
+
+TEST_F(DyndepParserTest, ImplicitIns) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1\n"
+"build out: dyndep | impin1 impin2\n"));
+
+ EXPECT_EQ(1u, dyndep_file_.size());
+ DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
+ ASSERT_NE(i, dyndep_file_.end());
+ EXPECT_EQ(false, i->second.restat_);
+ EXPECT_EQ(0u, i->second.implicit_outputs_.size());
+ ASSERT_EQ(2u, i->second.implicit_inputs_.size());
+ EXPECT_EQ("impin1", i->second.implicit_inputs_[0]->path());
+ EXPECT_EQ("impin2", i->second.implicit_inputs_[1]->path());
+}
+
+TEST_F(DyndepParserTest, ImplicitOut) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1\n"
+"build out | impout: dyndep\n"));
+
+ EXPECT_EQ(1u, dyndep_file_.size());
+ DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
+ ASSERT_NE(i, dyndep_file_.end());
+ EXPECT_EQ(false, i->second.restat_);
+ ASSERT_EQ(1u, i->second.implicit_outputs_.size());
+ EXPECT_EQ("impout", i->second.implicit_outputs_[0]->path());
+ EXPECT_EQ(0u, i->second.implicit_inputs_.size());
+}
+
+TEST_F(DyndepParserTest, ImplicitOuts) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1\n"
+"build out | impout1 impout2 : dyndep\n"));
+
+ EXPECT_EQ(1u, dyndep_file_.size());
+ DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
+ ASSERT_NE(i, dyndep_file_.end());
+ EXPECT_EQ(false, i->second.restat_);
+ ASSERT_EQ(2u, i->second.implicit_outputs_.size());
+ EXPECT_EQ("impout1", i->second.implicit_outputs_[0]->path());
+ EXPECT_EQ("impout2", i->second.implicit_outputs_[1]->path());
+ EXPECT_EQ(0u, i->second.implicit_inputs_.size());
+}
+
+TEST_F(DyndepParserTest, ImplicitInsAndOuts) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1\n"
+"build out | impout1 impout2: dyndep | impin1 impin2\n"));
+
+ EXPECT_EQ(1u, dyndep_file_.size());
+ DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
+ ASSERT_NE(i, dyndep_file_.end());
+ EXPECT_EQ(false, i->second.restat_);
+ ASSERT_EQ(2u, i->second.implicit_outputs_.size());
+ EXPECT_EQ("impout1", i->second.implicit_outputs_[0]->path());
+ EXPECT_EQ("impout2", i->second.implicit_outputs_[1]->path());
+ ASSERT_EQ(2u, i->second.implicit_inputs_.size());
+ EXPECT_EQ("impin1", i->second.implicit_inputs_[0]->path());
+ EXPECT_EQ("impin2", i->second.implicit_inputs_[1]->path());
+}
+
+TEST_F(DyndepParserTest, Restat) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1\n"
+"build out: dyndep\n"
+" restat = 1\n"));
+
+ EXPECT_EQ(1u, dyndep_file_.size());
+ DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
+ ASSERT_NE(i, dyndep_file_.end());
+ EXPECT_EQ(true, i->second.restat_);
+ EXPECT_EQ(0u, i->second.implicit_outputs_.size());
+ EXPECT_EQ(0u, i->second.implicit_inputs_.size());
+}
+
+TEST_F(DyndepParserTest, OtherOutput) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1\n"
+"build otherout: dyndep\n"));
+
+ EXPECT_EQ(1u, dyndep_file_.size());
+ DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
+ ASSERT_NE(i, dyndep_file_.end());
+ EXPECT_EQ(false, i->second.restat_);
+ EXPECT_EQ(0u, i->second.implicit_outputs_.size());
+ EXPECT_EQ(0u, i->second.implicit_inputs_.size());
+}
+
+TEST_F(DyndepParserTest, MultipleEdges) {
+ ::AssertParse(&state_,
+"build out2: touch\n");
+ ASSERT_EQ(2u, state_.edges_.size());
+ ASSERT_EQ(1u, state_.edges_[1]->outputs_.size());
+ EXPECT_EQ("out2", state_.edges_[1]->outputs_[0]->path());
+ EXPECT_EQ(0u, state_.edges_[0]->inputs_.size());
+
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"ninja_dyndep_version = 1\n"
+"build out: dyndep\n"
+"build out2: dyndep\n"
+" restat = 1\n"));
+
+ EXPECT_EQ(2u, dyndep_file_.size());
+ {
+ DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
+ ASSERT_NE(i, dyndep_file_.end());
+ EXPECT_EQ(false, i->second.restat_);
+ EXPECT_EQ(0u, i->second.implicit_outputs_.size());
+ EXPECT_EQ(0u, i->second.implicit_inputs_.size());
+ }
+ {
+ DyndepFile::iterator i = dyndep_file_.find(state_.edges_[1]);
+ ASSERT_NE(i, dyndep_file_.end());
+ EXPECT_EQ(true, i->second.restat_);
+ EXPECT_EQ(0u, i->second.implicit_outputs_.size());
+ EXPECT_EQ(0u, i->second.implicit_inputs_.size());
+ }
+}
diff --git a/src/edit_distance.cc b/src/edit_distance.cc
new file mode 100644
index 0000000..34bf0e5
--- /dev/null
+++ b/src/edit_distance.cc
@@ -0,0 +1,71 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "edit_distance.h"
+
+#include <algorithm>
+#include <vector>
+
+using namespace std;
+
+int EditDistance(const StringPiece& s1,
+ const StringPiece& s2,
+ bool allow_replacements,
+ int max_edit_distance) {
+ // The algorithm implemented below is the "classic"
+ // dynamic-programming algorithm for computing the Levenshtein
+ // distance, which is described here:
+ //
+ // http://en.wikipedia.org/wiki/Levenshtein_distance
+ //
+ // Although the algorithm is typically described using an m x n
+ // array, only one row plus one element are used at a time, so this
+ // implementation just keeps one vector for the row. To update one entry,
+ // only the entries to the left, top, and top-left are needed. The left
+ // entry is in row[x-1], the top entry is what's in row[x] from the last
+ // iteration, and the top-left entry is stored in previous.
+ int m = s1.len_;
+ int n = s2.len_;
+
+ vector<int> row(n + 1);
+ for (int i = 1; i <= n; ++i)
+ row[i] = i;
+
+ for (int y = 1; y <= m; ++y) {
+ row[0] = y;
+ int best_this_row = row[0];
+
+ int previous = y - 1;
+ for (int x = 1; x <= n; ++x) {
+ int old_row = row[x];
+ if (allow_replacements) {
+ row[x] = min(previous + (s1.str_[y - 1] == s2.str_[x - 1] ? 0 : 1),
+ min(row[x - 1], row[x]) + 1);
+ }
+ else {
+ if (s1.str_[y - 1] == s2.str_[x - 1])
+ row[x] = previous;
+ else
+ row[x] = min(row[x - 1], row[x]) + 1;
+ }
+ previous = old_row;
+ best_this_row = min(best_this_row, row[x]);
+ }
+
+ if (max_edit_distance && best_this_row > max_edit_distance)
+ return max_edit_distance + 1;
+ }
+
+ return row[n];
+}
diff --git a/src/edit_distance.h b/src/edit_distance.h
new file mode 100644
index 0000000..45ae4ae
--- /dev/null
+++ b/src/edit_distance.h
@@ -0,0 +1,25 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_EDIT_DISTANCE_H_
+#define NINJA_EDIT_DISTANCE_H_
+
+#include "string_piece.h"
+
+int EditDistance(const StringPiece& s1,
+ const StringPiece& s2,
+ bool allow_replacements = true,
+ int max_edit_distance = 0);
+
+#endif // NINJA_EDIT_DISTANCE_H_
diff --git a/src/edit_distance_test.cc b/src/edit_distance_test.cc
new file mode 100644
index 0000000..9dc0f82
--- /dev/null
+++ b/src/edit_distance_test.cc
@@ -0,0 +1,48 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "edit_distance.h"
+
+#include "test.h"
+
+TEST(EditDistanceTest, TestEmpty) {
+ EXPECT_EQ(5, EditDistance("", "ninja"));
+ EXPECT_EQ(5, EditDistance("ninja", ""));
+ EXPECT_EQ(0, EditDistance("", ""));
+}
+
+TEST(EditDistanceTest, TestMaxDistance) {
+ const bool allow_replacements = true;
+ for (int max_distance = 1; max_distance < 7; ++max_distance) {
+ EXPECT_EQ(max_distance + 1,
+ EditDistance("abcdefghijklmnop", "ponmlkjihgfedcba",
+ allow_replacements, max_distance));
+ }
+}
+
+TEST(EditDistanceTest, TestAllowReplacements) {
+ bool allow_replacements = true;
+ EXPECT_EQ(1, EditDistance("ninja", "njnja", allow_replacements));
+ EXPECT_EQ(1, EditDistance("njnja", "ninja", allow_replacements));
+
+ allow_replacements = false;
+ EXPECT_EQ(2, EditDistance("ninja", "njnja", allow_replacements));
+ EXPECT_EQ(2, EditDistance("njnja", "ninja", allow_replacements));
+}
+
+TEST(EditDistanceTest, TestBasics) {
+ EXPECT_EQ(0, EditDistance("browser_tests", "browser_tests"));
+ EXPECT_EQ(1, EditDistance("browser_test", "browser_tests"));
+ EXPECT_EQ(1, EditDistance("browser_tests", "browser_test"));
+}
diff --git a/src/eval_env.cc b/src/eval_env.cc
new file mode 100644
index 0000000..796a326
--- /dev/null
+++ b/src/eval_env.cc
@@ -0,0 +1,149 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <assert.h>
+
+#include "eval_env.h"
+
+using namespace std;
+
+string BindingEnv::LookupVariable(const string& var) {
+ map<string, string>::iterator i = bindings_.find(var);
+ if (i != bindings_.end())
+ return i->second;
+ if (parent_)
+ return parent_->LookupVariable(var);
+ return "";
+}
+
+void BindingEnv::AddBinding(const string& key, const string& val) {
+ bindings_[key] = val;
+}
+
+void BindingEnv::AddRule(const Rule* rule) {
+ assert(LookupRuleCurrentScope(rule->name()) == NULL);
+ rules_[rule->name()] = rule;
+}
+
+const Rule* BindingEnv::LookupRuleCurrentScope(const string& rule_name) {
+ map<string, const Rule*>::iterator i = rules_.find(rule_name);
+ if (i == rules_.end())
+ return NULL;
+ return i->second;
+}
+
+const Rule* BindingEnv::LookupRule(const string& rule_name) {
+ map<string, const Rule*>::iterator i = rules_.find(rule_name);
+ if (i != rules_.end())
+ return i->second;
+ if (parent_)
+ return parent_->LookupRule(rule_name);
+ return NULL;
+}
+
+void Rule::AddBinding(const string& key, const EvalString& val) {
+ bindings_[key] = val;
+}
+
+const EvalString* Rule::GetBinding(const string& key) const {
+ Bindings::const_iterator i = bindings_.find(key);
+ if (i == bindings_.end())
+ return NULL;
+ return &i->second;
+}
+
+// static
+bool Rule::IsReservedBinding(const string& var) {
+ return var == "command" ||
+ var == "depfile" ||
+ var == "dyndep" ||
+ var == "description" ||
+ var == "deps" ||
+ var == "generator" ||
+ var == "pool" ||
+ var == "restat" ||
+ var == "rspfile" ||
+ var == "rspfile_content" ||
+ var == "msvc_deps_prefix";
+}
+
+const map<string, const Rule*>& BindingEnv::GetRules() const {
+ return rules_;
+}
+
+string BindingEnv::LookupWithFallback(const string& var,
+ const EvalString* eval,
+ Env* env) {
+ map<string, string>::iterator i = bindings_.find(var);
+ if (i != bindings_.end())
+ return i->second;
+
+ if (eval)
+ return eval->Evaluate(env);
+
+ if (parent_)
+ return parent_->LookupVariable(var);
+
+ return "";
+}
+
+string EvalString::Evaluate(Env* env) const {
+ string result;
+ for (TokenList::const_iterator i = parsed_.begin(); i != parsed_.end(); ++i) {
+ if (i->second == RAW)
+ result.append(i->first);
+ else
+ result.append(env->LookupVariable(i->first));
+ }
+ return result;
+}
+
+void EvalString::AddText(StringPiece text) {
+ // Add it to the end of an existing RAW token if possible.
+ if (!parsed_.empty() && parsed_.back().second == RAW) {
+ parsed_.back().first.append(text.str_, text.len_);
+ } else {
+ parsed_.push_back(make_pair(text.AsString(), RAW));
+ }
+}
+void EvalString::AddSpecial(StringPiece text) {
+ parsed_.push_back(make_pair(text.AsString(), SPECIAL));
+}
+
+string EvalString::Serialize() const {
+ string result;
+ for (TokenList::const_iterator i = parsed_.begin();
+ i != parsed_.end(); ++i) {
+ result.append("[");
+ if (i->second == SPECIAL)
+ result.append("$");
+ result.append(i->first);
+ result.append("]");
+ }
+ return result;
+}
+
+string EvalString::Unparse() const {
+ string result;
+ for (TokenList::const_iterator i = parsed_.begin();
+ i != parsed_.end(); ++i) {
+ bool special = (i->second == SPECIAL);
+ if (special)
+ result.append("${");
+ result.append(i->first);
+ if (special)
+ result.append("}");
+ }
+ return result;
+}
diff --git a/src/eval_env.h b/src/eval_env.h
new file mode 100644
index 0000000..ca7daa4
--- /dev/null
+++ b/src/eval_env.h
@@ -0,0 +1,109 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_EVAL_ENV_H_
+#define NINJA_EVAL_ENV_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "string_piece.h"
+
+struct Rule;
+
+/// An interface for a scope for variable (e.g. "$foo") lookups.
+struct Env {
+ virtual ~Env() {}
+ virtual std::string LookupVariable(const std::string& var) = 0;
+};
+
+/// A tokenized string that contains variable references.
+/// Can be evaluated relative to an Env.
+struct EvalString {
+ /// @return The evaluated string with variable expanded using value found in
+ /// environment @a env.
+ std::string Evaluate(Env* env) const;
+
+ /// @return The string with variables not expanded.
+ std::string Unparse() const;
+
+ void Clear() { parsed_.clear(); }
+ bool empty() const { return parsed_.empty(); }
+
+ void AddText(StringPiece text);
+ void AddSpecial(StringPiece text);
+
+ /// Construct a human-readable representation of the parsed state,
+ /// for use in tests.
+ std::string Serialize() const;
+
+private:
+ enum TokenType { RAW, SPECIAL };
+ typedef std::vector<std::pair<std::string, TokenType> > TokenList;
+ TokenList parsed_;
+};
+
+/// An invokable build command and associated metadata (description, etc.).
+struct Rule {
+ explicit Rule(const std::string& name) : name_(name) {}
+
+ const std::string& name() const { return name_; }
+
+ void AddBinding(const std::string& key, const EvalString& val);
+
+ static bool IsReservedBinding(const std::string& var);
+
+ const EvalString* GetBinding(const std::string& key) const;
+
+ private:
+ // Allow the parsers to reach into this object and fill out its fields.
+ friend struct ManifestParser;
+
+ std::string name_;
+ typedef std::map<std::string, EvalString> Bindings;
+ Bindings bindings_;
+};
+
+/// An Env which contains a mapping of variables to values
+/// as well as a pointer to a parent scope.
+struct BindingEnv : public Env {
+ BindingEnv() : parent_(NULL) {}
+ explicit BindingEnv(BindingEnv* parent) : parent_(parent) {}
+
+ virtual ~BindingEnv() {}
+ virtual std::string LookupVariable(const std::string& var);
+
+ void AddRule(const Rule* rule);
+ const Rule* LookupRule(const std::string& rule_name);
+ const Rule* LookupRuleCurrentScope(const std::string& rule_name);
+ const std::map<std::string, const Rule*>& GetRules() const;
+
+ void AddBinding(const std::string& key, const std::string& val);
+
+ /// This is tricky. Edges want lookup scope to go in this order:
+ /// 1) value set on edge itself (edge_->env_)
+ /// 2) value set on rule, with expansion in the edge's scope
+ /// 3) value set on enclosing scope of edge (edge_->env_->parent_)
+ /// This function takes as parameters the necessary info to do (2).
+ std::string LookupWithFallback(const std::string& var, const EvalString* eval,
+ Env* env);
+
+private:
+ std::map<std::string, std::string> bindings_;
+ std::map<std::string, const Rule*> rules_;
+ BindingEnv* parent_;
+};
+
+#endif // NINJA_EVAL_ENV_H_
diff --git a/src/exit_status.h b/src/exit_status.h
new file mode 100644
index 0000000..a714ece
--- /dev/null
+++ b/src/exit_status.h
@@ -0,0 +1,24 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_EXIT_STATUS_H_
+#define NINJA_EXIT_STATUS_H_
+
+enum ExitStatus {
+ ExitSuccess,
+ ExitFailure,
+ ExitInterrupted
+};
+
+#endif // NINJA_EXIT_STATUS_H_
diff --git a/src/gen_doxygen_mainpage.sh b/src/gen_doxygen_mainpage.sh
new file mode 100755
index 0000000..d159947
--- /dev/null
+++ b/src/gen_doxygen_mainpage.sh
@@ -0,0 +1,92 @@
+#!/bin/sh
+
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+
+STATUS=0
+
+# Print each of its arguments on stderr (one per line) prefixed by the
+# basename of this script.
+stderr()
+{
+ local me=$(basename "$0")
+ local i
+ for i
+ do
+ echo >&2 "$me: $i"
+ done
+}
+
+# Print each of its arguments on stderr (one per line) prefixed by the
+# basename of this script and 'error'.
+error()
+{
+ local i
+ for i
+ do
+ stderr "error: $i"
+ done
+ STATUS=1
+}
+
+generate_header()
+{
+ cat <<EOF
+/**
+ * \\mainpage
+EOF
+}
+
+generate_footer()
+{
+ cat <<EOF
+ */
+EOF
+}
+
+include_file()
+{
+ local file="$1"
+ if ! [ -r "$file" ]
+ then
+ error "'$file' is not readable."
+ return
+ fi
+ cat <<EOF
+ * \\section $file
+ * \\verbatim
+EOF
+ cat < "$file"
+ cat <<EOF
+ \\endverbatim
+EOF
+}
+
+if [ $# -eq 0 ]
+then
+ echo >&2 "usage: $0 inputs..."
+ exit 1
+fi
+
+generate_header
+for i in "$@"
+do
+ include_file "$i"
+done
+generate_footer
+
+exit $STATUS
diff --git a/src/getopt.c b/src/getopt.c
new file mode 100644
index 0000000..861f07f
--- /dev/null
+++ b/src/getopt.c
@@ -0,0 +1,410 @@
+/****************************************************************************
+
+getopt.c - Read command line options
+
+AUTHOR: Gregory Pietsch
+CREATED Fri Jan 10 21:13:05 1997
+
+DESCRIPTION:
+
+The getopt() function parses the command line arguments. Its arguments argc
+and argv are the argument count and array as passed to the main() function
+on program invocation. The argument optstring is a list of available option
+characters. If such a character is followed by a colon (`:'), the option
+takes an argument, which is placed in optarg. If such a character is
+followed by two colons, the option takes an optional argument, which is
+placed in optarg. If the option does not take an argument, optarg is NULL.
+
+The external variable optind is the index of the next array element of argv
+to be processed; it communicates from one call to the next which element to
+process.
+
+The getopt_long() function works like getopt() except that it also accepts
+long options started by two dashes `--'. If these take values, it is either
+in the form
+
+--arg=value
+
+ or
+
+--arg value
+
+It takes the additional arguments longopts which is a pointer to the first
+element of an array of type GETOPT_LONG_OPTION_T. The last element of the
+array has to be filled with NULL for the name field.
+
+The longind pointer points to the index of the current long option relative
+to longopts if it is non-NULL.
+
+The getopt() function returns the option character if the option was found
+successfully, `:' if there was a missing parameter for one of the options,
+`?' for an unknown option character, and EOF for the end of the option list.
+
+The getopt_long() function's return value is described in the header file.
+
+The function getopt_long_only() is identical to getopt_long(), except that a
+plus sign `+' can introduce long options as well as `--'.
+
+The following describes how to deal with options that follow non-option
+argv-elements.
+
+If the caller did not specify anything, the default is REQUIRE_ORDER if the
+environment variable POSIXLY_CORRECT is defined, PERMUTE otherwise.
+
+REQUIRE_ORDER means don't recognize them as options; stop option processing
+when the first non-option is seen. This is what Unix does. This mode of
+operation is selected by either setting the environment variable
+POSIXLY_CORRECT, or using `+' as the first character of the optstring
+parameter.
+
+PERMUTE is the default. We permute the contents of ARGV as we scan, so that
+eventually all the non-options are at the end. This allows options to be
+given in any order, even with programs that were not written to expect this.
+
+RETURN_IN_ORDER is an option available to programs that were written to
+expect options and other argv-elements in any order and that care about the
+ordering of the two. We describe each non-option argv-element as if it were
+the argument of an option with character code 1. Using `-' as the first
+character of the optstring parameter selects this mode of operation.
+
+The special argument `--' forces an end of option-scanning regardless of the
+value of ordering. In the case of RETURN_IN_ORDER, only `--' can cause
+getopt() and friends to return EOF with optind != argc.
+
+COPYRIGHT NOTICE AND DISCLAIMER:
+
+Copyright (C) 1997 Gregory Pietsch
+
+This file and the accompanying getopt.h header file are hereby placed in the
+public domain without restrictions. Just give the author credit, don't
+claim you wrote it or prevent anyone else from using it.
+
+Gregory Pietsch's current e-mail address:
+gpietsch@comcast.net
+****************************************************************************/
+
+/* include files */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef GETOPT_H
+#include "getopt.h"
+#endif
+
+/* macros */
+
+/* types */
+typedef enum GETOPT_ORDERING_T
+{
+ PERMUTE,
+ RETURN_IN_ORDER,
+ REQUIRE_ORDER
+} GETOPT_ORDERING_T;
+
+/* globally-defined variables */
+char *optarg = NULL;
+int optind = 0;
+int opterr = 1;
+int optopt = '?';
+
+/* functions */
+
+/* reverse_argv_elements: reverses num elements starting at argv */
+static void
+reverse_argv_elements (char **argv, int num)
+{
+ int i;
+ char *tmp;
+
+ for (i = 0; i < (num >> 1); i++)
+ {
+ tmp = argv[i];
+ argv[i] = argv[num - i - 1];
+ argv[num - i - 1] = tmp;
+ }
+}
+
+/* permute: swap two blocks of argv-elements given their lengths */
+static void
+permute (char **argv, int len1, int len2)
+{
+ reverse_argv_elements (argv, len1);
+ reverse_argv_elements (argv, len1 + len2);
+ reverse_argv_elements (argv, len2);
+}
+
+/* is_option: is this argv-element an option or the end of the option list? */
+static int
+is_option (char *argv_element, int only)
+{
+ return ((argv_element == NULL)
+ || (argv_element[0] == '-') || (only && argv_element[0] == '+'));
+}
+
+/* getopt_internal: the function that does all the dirty work */
+static int
+getopt_internal (int argc, char **argv, char *shortopts,
+ GETOPT_LONG_OPTION_T * longopts, int *longind, int only)
+{
+ GETOPT_ORDERING_T ordering = PERMUTE;
+ static size_t optwhere = 0;
+ size_t permute_from = 0;
+ int num_nonopts = 0;
+ int optindex = 0;
+ size_t match_chars = 0;
+ char *possible_arg = NULL;
+ int longopt_match = -1;
+ int has_arg = -1;
+ char *cp = NULL;
+ int arg_next = 0;
+
+ /* first, deal with silly parameters and easy stuff */
+ if (argc == 0 || argv == NULL || (shortopts == NULL && longopts == NULL))
+ return (optopt = '?');
+ if (optind >= argc || argv[optind] == NULL)
+ return EOF;
+ if (strcmp (argv[optind], "--") == 0)
+ {
+ optind++;
+ return EOF;
+ }
+ /* if this is our first time through */
+ if (optind == 0)
+ optind = optwhere = 1;
+
+ /* define ordering */
+ if (shortopts != NULL && (*shortopts == '-' || *shortopts == '+'))
+ {
+ ordering = (*shortopts == '-') ? RETURN_IN_ORDER : REQUIRE_ORDER;
+ shortopts++;
+ }
+ else
+ ordering = (getenv ("POSIXLY_CORRECT") != NULL) ? REQUIRE_ORDER : PERMUTE;
+
+ /*
+ * based on ordering, find our next option, if we're at the beginning of
+ * one
+ */
+ if (optwhere == 1)
+ {
+ switch (ordering)
+ {
+ case PERMUTE:
+ permute_from = optind;
+ num_nonopts = 0;
+ while (!is_option (argv[optind], only))
+ {
+ optind++;
+ num_nonopts++;
+ }
+ if (argv[optind] == NULL)
+ {
+ /* no more options */
+ optind = permute_from;
+ return EOF;
+ }
+ else if (strcmp (argv[optind], "--") == 0)
+ {
+ /* no more options, but have to get `--' out of the way */
+ permute (argv + permute_from, num_nonopts, 1);
+ optind = permute_from + 1;
+ return EOF;
+ }
+ break;
+ case RETURN_IN_ORDER:
+ if (!is_option (argv[optind], only))
+ {
+ optarg = argv[optind++];
+ return (optopt = 1);
+ }
+ break;
+ case REQUIRE_ORDER:
+ if (!is_option (argv[optind], only))
+ return EOF;
+ break;
+ }
+ }
+ /* we've got an option, so parse it */
+
+ /* first, is it a long option? */
+ if (longopts != NULL
+ && (memcmp (argv[optind], "--", 2) == 0
+ || (only && argv[optind][0] == '+')) && optwhere == 1)
+ {
+ /* handle long options */
+ if (memcmp (argv[optind], "--", 2) == 0)
+ optwhere = 2;
+ longopt_match = -1;
+ possible_arg = strchr (argv[optind] + optwhere, '=');
+ if (possible_arg == NULL)
+ {
+ /* no =, so next argv might be arg */
+ match_chars = strlen (argv[optind]);
+ possible_arg = argv[optind] + match_chars;
+ match_chars = match_chars - optwhere;
+ }
+ else
+ match_chars = (possible_arg - argv[optind]) - optwhere;
+ for (optindex = 0; longopts[optindex].name != NULL; optindex++)
+ {
+ if (memcmp (argv[optind] + optwhere,
+ longopts[optindex].name, match_chars) == 0)
+ {
+ /* do we have an exact match? */
+ if (match_chars == strlen (longopts[optindex].name))
+ {
+ longopt_match = optindex;
+ break;
+ }
+ /* do any characters match? */
+ else
+ {
+ if (longopt_match < 0)
+ longopt_match = optindex;
+ else
+ {
+ /* we have ambiguous options */
+ if (opterr)
+ fprintf (stderr, "%s: option `%s' is ambiguous "
+ "(could be `--%s' or `--%s')\n",
+ argv[0],
+ argv[optind],
+ longopts[longopt_match].name,
+ longopts[optindex].name);
+ return (optopt = '?');
+ }
+ }
+ }
+ }
+ if (longopt_match >= 0)
+ has_arg = longopts[longopt_match].has_arg;
+ }
+ /* if we didn't find a long option, is it a short option? */
+ if (longopt_match < 0 && shortopts != NULL)
+ {
+ cp = strchr (shortopts, argv[optind][optwhere]);
+ if (cp == NULL)
+ {
+ /* couldn't find option in shortopts */
+ if (opterr)
+ fprintf (stderr,
+ "%s: invalid option -- `-%c'\n",
+ argv[0], argv[optind][optwhere]);
+ optwhere++;
+ if (argv[optind][optwhere] == '\0')
+ {
+ optind++;
+ optwhere = 1;
+ }
+ return (optopt = '?');
+ }
+ has_arg = ((cp[1] == ':')
+ ? ((cp[2] == ':') ? OPTIONAL_ARG : required_argument) : no_argument);
+ possible_arg = argv[optind] + optwhere + 1;
+ optopt = *cp;
+ }
+ /* get argument and reset optwhere */
+ arg_next = 0;
+ switch (has_arg)
+ {
+ case OPTIONAL_ARG:
+ if (*possible_arg == '=')
+ possible_arg++;
+ if (*possible_arg != '\0')
+ {
+ optarg = possible_arg;
+ optwhere = 1;
+ }
+ else
+ optarg = NULL;
+ break;
+ case required_argument:
+ if (*possible_arg == '=')
+ possible_arg++;
+ if (*possible_arg != '\0')
+ {
+ optarg = possible_arg;
+ optwhere = 1;
+ }
+ else if (optind + 1 >= argc)
+ {
+ if (opterr)
+ {
+ fprintf (stderr, "%s: argument required for option `", argv[0]);
+ if (longopt_match >= 0)
+ fprintf (stderr, "--%s'\n", longopts[longopt_match].name);
+ else
+ fprintf (stderr, "-%c'\n", *cp);
+ }
+ optind++;
+ return (optopt = ':');
+ }
+ else
+ {
+ optarg = argv[optind + 1];
+ arg_next = 1;
+ optwhere = 1;
+ }
+ break;
+ case no_argument:
+ if (longopt_match < 0)
+ {
+ optwhere++;
+ if (argv[optind][optwhere] == '\0')
+ optwhere = 1;
+ }
+ else
+ optwhere = 1;
+ optarg = NULL;
+ break;
+ }
+
+ /* do we have to permute or otherwise modify optind? */
+ if (ordering == PERMUTE && optwhere == 1 && num_nonopts != 0)
+ {
+ permute (argv + permute_from, num_nonopts, 1 + arg_next);
+ optind = permute_from + 1 + arg_next;
+ }
+ else if (optwhere == 1)
+ optind = optind + 1 + arg_next;
+
+ /* finally return */
+ if (longopt_match >= 0)
+ {
+ if (longind != NULL)
+ *longind = longopt_match;
+ if (longopts[longopt_match].flag != NULL)
+ {
+ *(longopts[longopt_match].flag) = longopts[longopt_match].val;
+ return 0;
+ }
+ else
+ return longopts[longopt_match].val;
+ }
+ else
+ return optopt;
+}
+
+#ifndef _AIX
+int
+getopt (int argc, char **argv, char *optstring)
+{
+ return getopt_internal (argc, argv, optstring, NULL, NULL, 0);
+}
+#endif
+
+int
+getopt_long (int argc, char **argv, const char *shortopts,
+ const GETOPT_LONG_OPTION_T * longopts, int *longind)
+{
+ return getopt_internal (argc, argv, (char*)shortopts, (GETOPT_LONG_OPTION_T*)longopts, longind, 0);
+}
+
+int
+getopt_long_only (int argc, char **argv, const char *shortopts,
+ const GETOPT_LONG_OPTION_T * longopts, int *longind)
+{
+ return getopt_internal (argc, argv, (char*)shortopts, (GETOPT_LONG_OPTION_T*)longopts, longind, 1);
+}
+
+/* end of file GETOPT.C */
diff --git a/src/getopt.h b/src/getopt.h
new file mode 100644
index 0000000..965dc29
--- /dev/null
+++ b/src/getopt.h
@@ -0,0 +1,57 @@
+#ifndef GETOPT_H
+#define GETOPT_H
+
+/* include files needed by this include file */
+
+/* macros defined by this include file */
+#define no_argument 0
+#define required_argument 1
+#define OPTIONAL_ARG 2
+
+/* types defined by this include file */
+
+/* GETOPT_LONG_OPTION_T: The type of long option */
+typedef struct GETOPT_LONG_OPTION_T
+{
+ const char *name; /* the name of the long option */
+ int has_arg; /* one of the above macros */
+ int *flag; /* determines if getopt_long() returns a
+ * value for a long option; if it is
+ * non-NULL, 0 is returned as a function
+ * value and the value of val is stored in
+ * the area pointed to by flag. Otherwise,
+ * val is returned. */
+ int val; /* determines the value to return if flag is
+ * NULL. */
+} GETOPT_LONG_OPTION_T;
+
+typedef GETOPT_LONG_OPTION_T option;
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+ /* externally-defined variables */
+ extern char *optarg;
+ extern int optind;
+ extern int opterr;
+ extern int optopt;
+
+ /* function prototypes */
+#ifndef _AIX
+ int getopt (int argc, char **argv, char *optstring);
+#endif
+ int getopt_long (int argc, char **argv, const char *shortopts,
+ const GETOPT_LONG_OPTION_T * longopts, int *longind);
+ int getopt_long_only (int argc, char **argv, const char *shortopts,
+ const GETOPT_LONG_OPTION_T * longopts, int *longind);
+
+#ifdef __cplusplus
+};
+
+#endif
+
+#endif /* GETOPT_H */
+
+/* END OF FILE getopt.h */
diff --git a/src/graph.cc b/src/graph.cc
new file mode 100644
index 0000000..ea11360
--- /dev/null
+++ b/src/graph.cc
@@ -0,0 +1,662 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "graph.h"
+
+#include <algorithm>
+#include <assert.h>
+#include <stdio.h>
+
+#include "build_log.h"
+#include "debug_flags.h"
+#include "depfile_parser.h"
+#include "deps_log.h"
+#include "disk_interface.h"
+#include "manifest_parser.h"
+#include "metrics.h"
+#include "state.h"
+#include "util.h"
+
+using namespace std;
+
+bool Node::Stat(DiskInterface* disk_interface, string* err) {
+ return (mtime_ = disk_interface->Stat(path_, err)) != -1;
+}
+
+bool DependencyScan::RecomputeDirty(Node* node, string* err) {
+ vector<Node*> stack;
+ return RecomputeDirty(node, &stack, err);
+}
+
+bool DependencyScan::RecomputeDirty(Node* node, vector<Node*>* stack,
+ string* err) {
+ Edge* edge = node->in_edge();
+ if (!edge) {
+ // If we already visited this leaf node then we are done.
+ if (node->status_known())
+ return true;
+ // This node has no in-edge; it is dirty if it is missing.
+ if (!node->StatIfNecessary(disk_interface_, err))
+ return false;
+ if (!node->exists())
+ EXPLAIN("%s has no in-edge and is missing", node->path().c_str());
+ node->set_dirty(!node->exists());
+ return true;
+ }
+
+ // If we already finished this edge then we are done.
+ if (edge->mark_ == Edge::VisitDone)
+ return true;
+
+ // If we encountered this edge earlier in the call stack we have a cycle.
+ if (!VerifyDAG(node, stack, err))
+ return false;
+
+ // Mark the edge temporarily while in the call stack.
+ edge->mark_ = Edge::VisitInStack;
+ stack->push_back(node);
+
+ bool dirty = false;
+ edge->outputs_ready_ = true;
+ edge->deps_missing_ = false;
+
+ if (!edge->deps_loaded_) {
+ // This is our first encounter with this edge.
+ // If there is a pending dyndep file, visit it now:
+ // * If the dyndep file is ready then load it now to get any
+ // additional inputs and outputs for this and other edges.
+ // Once the dyndep file is loaded it will no longer be pending
+ // if any other edges encounter it, but they will already have
+ // been updated.
+ // * If the dyndep file is not ready then since is known to be an
+ // input to this edge, the edge will not be considered ready below.
+ // Later during the build the dyndep file will become ready and be
+ // loaded to update this edge before it can possibly be scheduled.
+ if (edge->dyndep_ && edge->dyndep_->dyndep_pending()) {
+ if (!RecomputeDirty(edge->dyndep_, stack, err))
+ return false;
+
+ if (!edge->dyndep_->in_edge() ||
+ edge->dyndep_->in_edge()->outputs_ready()) {
+ // The dyndep file is ready, so load it now.
+ if (!LoadDyndeps(edge->dyndep_, err))
+ return false;
+ }
+ }
+ }
+
+ // Load output mtimes so we can compare them to the most recent input below.
+ for (vector<Node*>::iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
+ if (!(*o)->StatIfNecessary(disk_interface_, err))
+ return false;
+ }
+
+ if (!edge->deps_loaded_) {
+ // This is our first encounter with this edge. Load discovered deps.
+ edge->deps_loaded_ = true;
+ if (!dep_loader_.LoadDeps(edge, err)) {
+ if (!err->empty())
+ return false;
+ // Failed to load dependency info: rebuild to regenerate it.
+ // LoadDeps() did EXPLAIN() already, no need to do it here.
+ dirty = edge->deps_missing_ = true;
+ }
+ }
+
+ // Visit all inputs; we're dirty if any of the inputs are dirty.
+ Node* most_recent_input = NULL;
+ for (vector<Node*>::iterator i = edge->inputs_.begin();
+ i != edge->inputs_.end(); ++i) {
+ // Visit this input.
+ if (!RecomputeDirty(*i, stack, err))
+ return false;
+
+ // If an input is not ready, neither are our outputs.
+ if (Edge* in_edge = (*i)->in_edge()) {
+ if (!in_edge->outputs_ready_)
+ edge->outputs_ready_ = false;
+ }
+
+ if (!edge->is_order_only(i - edge->inputs_.begin())) {
+ // If a regular input is dirty (or missing), we're dirty.
+ // Otherwise consider mtime.
+ if ((*i)->dirty()) {
+ EXPLAIN("%s is dirty", (*i)->path().c_str());
+ dirty = true;
+ } else {
+ if (!most_recent_input || (*i)->mtime() > most_recent_input->mtime()) {
+ most_recent_input = *i;
+ }
+ }
+ }
+ }
+
+ // We may also be dirty due to output state: missing outputs, out of
+ // date outputs, etc. Visit all outputs and determine whether they're dirty.
+ if (!dirty)
+ if (!RecomputeOutputsDirty(edge, most_recent_input, &dirty, err))
+ return false;
+
+ // Finally, visit each output and update their dirty state if necessary.
+ for (vector<Node*>::iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
+ if (dirty)
+ (*o)->MarkDirty();
+ }
+
+ // If an edge is dirty, its outputs are normally not ready. (It's
+ // possible to be clean but still not be ready in the presence of
+ // order-only inputs.)
+ // But phony edges with no inputs have nothing to do, so are always
+ // ready.
+ if (dirty && !(edge->is_phony() && edge->inputs_.empty()))
+ edge->outputs_ready_ = false;
+
+ // Mark the edge as finished during this walk now that it will no longer
+ // be in the call stack.
+ edge->mark_ = Edge::VisitDone;
+ assert(stack->back() == node);
+ stack->pop_back();
+
+ return true;
+}
+
+bool DependencyScan::VerifyDAG(Node* node, vector<Node*>* stack, string* err) {
+ Edge* edge = node->in_edge();
+ assert(edge != NULL);
+
+ // If we have no temporary mark on the edge then we do not yet have a cycle.
+ if (edge->mark_ != Edge::VisitInStack)
+ return true;
+
+ // We have this edge earlier in the call stack. Find it.
+ vector<Node*>::iterator start = stack->begin();
+ while (start != stack->end() && (*start)->in_edge() != edge)
+ ++start;
+ assert(start != stack->end());
+
+ // Make the cycle clear by reporting its start as the node at its end
+ // instead of some other output of the starting edge. For example,
+ // running 'ninja b' on
+ // build a b: cat c
+ // build c: cat a
+ // should report a -> c -> a instead of b -> c -> a.
+ *start = node;
+
+ // Construct the error message rejecting the cycle.
+ *err = "dependency cycle: ";
+ for (vector<Node*>::const_iterator i = start; i != stack->end(); ++i) {
+ err->append((*i)->path());
+ err->append(" -> ");
+ }
+ err->append((*start)->path());
+
+ if ((start + 1) == stack->end() && edge->maybe_phonycycle_diagnostic()) {
+ // The manifest parser would have filtered out the self-referencing
+ // input if it were not configured to allow the error.
+ err->append(" [-w phonycycle=err]");
+ }
+
+ return false;
+}
+
+bool DependencyScan::RecomputeOutputsDirty(Edge* edge, Node* most_recent_input,
+ bool* outputs_dirty, string* err) {
+ string command = edge->EvaluateCommand(/*incl_rsp_file=*/true);
+ for (vector<Node*>::iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
+ if (RecomputeOutputDirty(edge, most_recent_input, command, *o)) {
+ *outputs_dirty = true;
+ return true;
+ }
+ }
+ return true;
+}
+
+bool DependencyScan::RecomputeOutputDirty(const Edge* edge,
+ const Node* most_recent_input,
+ const string& command,
+ Node* output) {
+ if (edge->is_phony()) {
+ // Phony edges don't write any output. Outputs are only dirty if
+ // there are no inputs and we're missing the output.
+ if (edge->inputs_.empty() && !output->exists()) {
+ EXPLAIN("output %s of phony edge with no inputs doesn't exist",
+ output->path().c_str());
+ return true;
+ }
+ return false;
+ }
+
+ BuildLog::LogEntry* entry = 0;
+
+ // Dirty if we're missing the output.
+ if (!output->exists()) {
+ EXPLAIN("output %s doesn't exist", output->path().c_str());
+ return true;
+ }
+
+ // Dirty if the output is older than the input.
+ if (most_recent_input && output->mtime() < most_recent_input->mtime()) {
+ TimeStamp output_mtime = output->mtime();
+
+ // If this is a restat rule, we may have cleaned the output with a restat
+ // rule in a previous run and stored the most recent input mtime in the
+ // build log. Use that mtime instead, so that the file will only be
+ // considered dirty if an input was modified since the previous run.
+ bool used_restat = false;
+ if (edge->GetBindingBool("restat") && build_log() &&
+ (entry = build_log()->LookupByOutput(output->path()))) {
+ output_mtime = entry->mtime;
+ used_restat = true;
+ }
+
+ if (output_mtime < most_recent_input->mtime()) {
+ EXPLAIN("%soutput %s older than most recent input %s "
+ "(%" PRId64 " vs %" PRId64 ")",
+ used_restat ? "restat of " : "", output->path().c_str(),
+ most_recent_input->path().c_str(),
+ output_mtime, most_recent_input->mtime());
+ return true;
+ }
+ }
+
+ if (build_log()) {
+ bool generator = edge->GetBindingBool("generator");
+ if (entry || (entry = build_log()->LookupByOutput(output->path()))) {
+ if (!generator &&
+ BuildLog::LogEntry::HashCommand(command) != entry->command_hash) {
+ // May also be dirty due to the command changing since the last build.
+ // But if this is a generator rule, the command changing does not make us
+ // dirty.
+ EXPLAIN("command line changed for %s", output->path().c_str());
+ return true;
+ }
+ if (most_recent_input && entry->mtime < most_recent_input->mtime()) {
+ // May also be dirty due to the mtime in the log being older than the
+ // mtime of the most recent input. This can occur even when the mtime
+ // on disk is newer if a previous run wrote to the output file but
+ // exited with an error or was interrupted.
+ EXPLAIN("recorded mtime of %s older than most recent input %s (%" PRId64 " vs %" PRId64 ")",
+ output->path().c_str(), most_recent_input->path().c_str(),
+ entry->mtime, most_recent_input->mtime());
+ return true;
+ }
+ }
+ if (!entry && !generator) {
+ EXPLAIN("command line not found in log for %s", output->path().c_str());
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool DependencyScan::LoadDyndeps(Node* node, string* err) const {
+ return dyndep_loader_.LoadDyndeps(node, err);
+}
+
+bool DependencyScan::LoadDyndeps(Node* node, DyndepFile* ddf,
+ string* err) const {
+ return dyndep_loader_.LoadDyndeps(node, ddf, err);
+}
+
+bool Edge::AllInputsReady() const {
+ for (vector<Node*>::const_iterator i = inputs_.begin();
+ i != inputs_.end(); ++i) {
+ if ((*i)->in_edge() && !(*i)->in_edge()->outputs_ready())
+ return false;
+ }
+ return true;
+}
+
+/// An Env for an Edge, providing $in and $out.
+struct EdgeEnv : public Env {
+ enum EscapeKind { kShellEscape, kDoNotEscape };
+
+ EdgeEnv(const Edge* const edge, const EscapeKind escape)
+ : edge_(edge), escape_in_out_(escape), recursive_(false) {}
+ virtual string LookupVariable(const string& var);
+
+ /// Given a span of Nodes, construct a list of paths suitable for a command
+ /// line.
+ std::string MakePathList(const Node* const* span, size_t size, char sep) const;
+
+ private:
+ vector<string> lookups_;
+ const Edge* const edge_;
+ EscapeKind escape_in_out_;
+ bool recursive_;
+};
+
+string EdgeEnv::LookupVariable(const string& var) {
+ if (var == "in" || var == "in_newline") {
+ int explicit_deps_count = edge_->inputs_.size() - edge_->implicit_deps_ -
+ edge_->order_only_deps_;
+#if __cplusplus >= 201103L
+ return MakePathList(edge_->inputs_.data(), explicit_deps_count,
+#else
+ return MakePathList(&edge_->inputs_[0], explicit_deps_count,
+#endif
+ var == "in" ? ' ' : '\n');
+ } else if (var == "out") {
+ int explicit_outs_count = edge_->outputs_.size() - edge_->implicit_outs_;
+ return MakePathList(&edge_->outputs_[0], explicit_outs_count, ' ');
+ }
+
+ if (recursive_) {
+ vector<string>::const_iterator it;
+ if ((it = find(lookups_.begin(), lookups_.end(), var)) != lookups_.end()) {
+ string cycle;
+ for (; it != lookups_.end(); ++it)
+ cycle.append(*it + " -> ");
+ cycle.append(var);
+ Fatal(("cycle in rule variables: " + cycle).c_str());
+ }
+ }
+
+ // See notes on BindingEnv::LookupWithFallback.
+ const EvalString* eval = edge_->rule_->GetBinding(var);
+ if (recursive_ && eval)
+ lookups_.push_back(var);
+
+ // In practice, variables defined on rules never use another rule variable.
+ // For performance, only start checking for cycles after the first lookup.
+ recursive_ = true;
+ return edge_->env_->LookupWithFallback(var, eval, this);
+}
+
+std::string EdgeEnv::MakePathList(const Node* const* const span,
+ const size_t size, const char sep) const {
+ string result;
+ for (const Node* const* i = span; i != span + size; ++i) {
+ if (!result.empty())
+ result.push_back(sep);
+ const string& path = (*i)->PathDecanonicalized();
+ if (escape_in_out_ == kShellEscape) {
+#ifdef _WIN32
+ GetWin32EscapedString(path, &result);
+#else
+ GetShellEscapedString(path, &result);
+#endif
+ } else {
+ result.append(path);
+ }
+ }
+ return result;
+}
+
+std::string Edge::EvaluateCommand(const bool incl_rsp_file) const {
+ string command = GetBinding("command");
+ if (incl_rsp_file) {
+ string rspfile_content = GetBinding("rspfile_content");
+ if (!rspfile_content.empty())
+ command += ";rspfile=" + rspfile_content;
+ }
+ return command;
+}
+
+std::string Edge::GetBinding(const std::string& key) const {
+ EdgeEnv env(this, EdgeEnv::kShellEscape);
+ return env.LookupVariable(key);
+}
+
+bool Edge::GetBindingBool(const string& key) const {
+ return !GetBinding(key).empty();
+}
+
+string Edge::GetUnescapedDepfile() const {
+ EdgeEnv env(this, EdgeEnv::kDoNotEscape);
+ return env.LookupVariable("depfile");
+}
+
+string Edge::GetUnescapedDyndep() const {
+ EdgeEnv env(this, EdgeEnv::kDoNotEscape);
+ return env.LookupVariable("dyndep");
+}
+
+std::string Edge::GetUnescapedRspfile() const {
+ EdgeEnv env(this, EdgeEnv::kDoNotEscape);
+ return env.LookupVariable("rspfile");
+}
+
+void Edge::Dump(const char* prefix) const {
+ printf("%s[ ", prefix);
+ for (vector<Node*>::const_iterator i = inputs_.begin();
+ i != inputs_.end() && *i != NULL; ++i) {
+ printf("%s ", (*i)->path().c_str());
+ }
+ printf("--%s-> ", rule_->name().c_str());
+ for (vector<Node*>::const_iterator i = outputs_.begin();
+ i != outputs_.end() && *i != NULL; ++i) {
+ printf("%s ", (*i)->path().c_str());
+ }
+ if (pool_) {
+ if (!pool_->name().empty()) {
+ printf("(in pool '%s')", pool_->name().c_str());
+ }
+ } else {
+ printf("(null pool?)");
+ }
+ printf("] 0x%p\n", this);
+}
+
+bool Edge::is_phony() const {
+ return rule_ == &State::kPhonyRule;
+}
+
+bool Edge::use_console() const {
+ return pool() == &State::kConsolePool;
+}
+
+bool Edge::maybe_phonycycle_diagnostic() const {
+ // CMake 2.8.12.x and 3.0.x produced self-referencing phony rules
+ // of the form "build a: phony ... a ...". Restrict our
+ // "phonycycle" diagnostic option to the form it used.
+ return is_phony() && outputs_.size() == 1 && implicit_outs_ == 0 &&
+ implicit_deps_ == 0;
+}
+
+// static
+string Node::PathDecanonicalized(const string& path, uint64_t slash_bits) {
+ string result = path;
+#ifdef _WIN32
+ uint64_t mask = 1;
+ for (char* c = &result[0]; (c = strchr(c, '/')) != NULL;) {
+ if (slash_bits & mask)
+ *c = '\\';
+ c++;
+ mask <<= 1;
+ }
+#endif
+ return result;
+}
+
+void Node::Dump(const char* prefix) const {
+ printf("%s <%s 0x%p> mtime: %" PRId64 "%s, (:%s), ",
+ prefix, path().c_str(), this,
+ mtime(), mtime() ? "" : " (:missing)",
+ dirty() ? " dirty" : " clean");
+ if (in_edge()) {
+ in_edge()->Dump("in-edge: ");
+ } else {
+ printf("no in-edge\n");
+ }
+ printf(" out edges:\n");
+ for (vector<Edge*>::const_iterator e = out_edges().begin();
+ e != out_edges().end() && *e != NULL; ++e) {
+ (*e)->Dump(" +- ");
+ }
+}
+
+bool ImplicitDepLoader::LoadDeps(Edge* edge, string* err) {
+ string deps_type = edge->GetBinding("deps");
+ if (!deps_type.empty())
+ return LoadDepsFromLog(edge, err);
+
+ string depfile = edge->GetUnescapedDepfile();
+ if (!depfile.empty())
+ return LoadDepFile(edge, depfile, err);
+
+ // No deps to load.
+ return true;
+}
+
+struct matches {
+ matches(std::vector<StringPiece>::iterator i) : i_(i) {}
+
+ bool operator()(const Node* node) const {
+ StringPiece opath = StringPiece(node->path());
+ return *i_ == opath;
+ }
+
+ std::vector<StringPiece>::iterator i_;
+};
+
+bool ImplicitDepLoader::LoadDepFile(Edge* edge, const string& path,
+ string* err) {
+ METRIC_RECORD("depfile load");
+ // Read depfile content. Treat a missing depfile as empty.
+ string content;
+ switch (disk_interface_->ReadFile(path, &content, err)) {
+ case DiskInterface::Okay:
+ break;
+ case DiskInterface::NotFound:
+ err->clear();
+ break;
+ case DiskInterface::OtherError:
+ *err = "loading '" + path + "': " + *err;
+ return false;
+ }
+ // On a missing depfile: return false and empty *err.
+ if (content.empty()) {
+ EXPLAIN("depfile '%s' is missing", path.c_str());
+ return false;
+ }
+
+ DepfileParser depfile(depfile_parser_options_
+ ? *depfile_parser_options_
+ : DepfileParserOptions());
+ string depfile_err;
+ if (!depfile.Parse(&content, &depfile_err)) {
+ *err = path + ": " + depfile_err;
+ return false;
+ }
+
+ if (depfile.outs_.empty()) {
+ *err = path + ": no outputs declared";
+ return false;
+ }
+
+ uint64_t unused;
+ std::vector<StringPiece>::iterator primary_out = depfile.outs_.begin();
+ if (!CanonicalizePath(const_cast<char*>(primary_out->str_),
+ &primary_out->len_, &unused, err)) {
+ *err = path + ": " + *err;
+ return false;
+ }
+
+ // Check that this depfile matches the edge's output, if not return false to
+ // mark the edge as dirty.
+ Node* first_output = edge->outputs_[0];
+ StringPiece opath = StringPiece(first_output->path());
+ if (opath != *primary_out) {
+ EXPLAIN("expected depfile '%s' to mention '%s', got '%s'", path.c_str(),
+ first_output->path().c_str(), primary_out->AsString().c_str());
+ return false;
+ }
+
+ // Ensure that all mentioned outputs are outputs of the edge.
+ for (std::vector<StringPiece>::iterator o = depfile.outs_.begin();
+ o != depfile.outs_.end(); ++o) {
+ matches m(o);
+ if (std::find_if(edge->outputs_.begin(), edge->outputs_.end(), m) == edge->outputs_.end()) {
+ *err = path + ": depfile mentions '" + o->AsString() + "' as an output, but no such output was declared";
+ return false;
+ }
+ }
+
+ // Preallocate space in edge->inputs_ to be filled in below.
+ vector<Node*>::iterator implicit_dep =
+ PreallocateSpace(edge, depfile.ins_.size());
+
+ // Add all its in-edges.
+ for (vector<StringPiece>::iterator i = depfile.ins_.begin();
+ i != depfile.ins_.end(); ++i, ++implicit_dep) {
+ uint64_t slash_bits;
+ if (!CanonicalizePath(const_cast<char*>(i->str_), &i->len_, &slash_bits,
+ err))
+ return false;
+
+ Node* node = state_->GetNode(*i, slash_bits);
+ *implicit_dep = node;
+ node->AddOutEdge(edge);
+ CreatePhonyInEdge(node);
+ }
+
+ return true;
+}
+
+bool ImplicitDepLoader::LoadDepsFromLog(Edge* edge, string* err) {
+ // NOTE: deps are only supported for single-target edges.
+ Node* output = edge->outputs_[0];
+ DepsLog::Deps* deps = deps_log_ ? deps_log_->GetDeps(output) : NULL;
+ if (!deps) {
+ EXPLAIN("deps for '%s' are missing", output->path().c_str());
+ return false;
+ }
+
+ // Deps are invalid if the output is newer than the deps.
+ if (output->mtime() > deps->mtime) {
+ EXPLAIN("stored deps info out of date for '%s' (%" PRId64 " vs %" PRId64 ")",
+ output->path().c_str(), deps->mtime, output->mtime());
+ return false;
+ }
+
+ vector<Node*>::iterator implicit_dep =
+ PreallocateSpace(edge, deps->node_count);
+ for (int i = 0; i < deps->node_count; ++i, ++implicit_dep) {
+ Node* node = deps->nodes[i];
+ *implicit_dep = node;
+ node->AddOutEdge(edge);
+ CreatePhonyInEdge(node);
+ }
+ return true;
+}
+
+vector<Node*>::iterator ImplicitDepLoader::PreallocateSpace(Edge* edge,
+ int count) {
+ edge->inputs_.insert(edge->inputs_.end() - edge->order_only_deps_,
+ (size_t)count, 0);
+ edge->implicit_deps_ += count;
+ return edge->inputs_.end() - edge->order_only_deps_ - count;
+}
+
+void ImplicitDepLoader::CreatePhonyInEdge(Node* node) {
+ if (node->in_edge())
+ return;
+
+ Edge* phony_edge = state_->AddEdge(&State::kPhonyRule);
+ node->set_in_edge(phony_edge);
+ phony_edge->outputs_.push_back(node);
+
+ // RecomputeDirty might not be called for phony_edge if a previous call
+ // to RecomputeDirty had caused the file to be stat'ed. Because previous
+ // invocations of RecomputeDirty would have seen this node without an
+ // input edge (and therefore ready), we have to set outputs_ready_ to true
+ // to avoid a potential stuck build. If we do call RecomputeDirty for
+ // this node, it will simply set outputs_ready_ to the correct value.
+ phony_edge->outputs_ready_ = true;
+}
diff --git a/src/graph.h b/src/graph.h
new file mode 100644
index 0000000..4833f49
--- /dev/null
+++ b/src/graph.h
@@ -0,0 +1,321 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_GRAPH_H_
+#define NINJA_GRAPH_H_
+
+#include <string>
+#include <vector>
+
+#include "dyndep.h"
+#include "eval_env.h"
+#include "timestamp.h"
+#include "util.h"
+
+struct BuildLog;
+struct DepfileParserOptions;
+struct DiskInterface;
+struct DepsLog;
+struct Edge;
+struct Node;
+struct Pool;
+struct State;
+
+/// Information about a node in the dependency graph: the file, whether
+/// it's dirty, mtime, etc.
+struct Node {
+ Node(const std::string& path, uint64_t slash_bits)
+ : path_(path),
+ slash_bits_(slash_bits),
+ mtime_(-1),
+ dirty_(false),
+ dyndep_pending_(false),
+ in_edge_(NULL),
+ id_(-1) {}
+
+ /// Return false on error.
+ bool Stat(DiskInterface* disk_interface, std::string* err);
+
+ /// Return false on error.
+ bool StatIfNecessary(DiskInterface* disk_interface, std::string* err) {
+ if (status_known())
+ return true;
+ return Stat(disk_interface, err);
+ }
+
+ /// Mark as not-yet-stat()ed and not dirty.
+ void ResetState() {
+ mtime_ = -1;
+ dirty_ = false;
+ }
+
+ /// Mark the Node as already-stat()ed and missing.
+ void MarkMissing() {
+ mtime_ = 0;
+ }
+
+ bool exists() const {
+ return mtime_ != 0;
+ }
+
+ bool status_known() const {
+ return mtime_ != -1;
+ }
+
+ const std::string& path() const { return path_; }
+ /// Get |path()| but use slash_bits to convert back to original slash styles.
+ std::string PathDecanonicalized() const {
+ return PathDecanonicalized(path_, slash_bits_);
+ }
+ static std::string PathDecanonicalized(const std::string& path,
+ uint64_t slash_bits);
+ uint64_t slash_bits() const { return slash_bits_; }
+
+ TimeStamp mtime() const { return mtime_; }
+
+ bool dirty() const { return dirty_; }
+ void set_dirty(bool dirty) { dirty_ = dirty; }
+ void MarkDirty() { dirty_ = true; }
+
+ bool dyndep_pending() const { return dyndep_pending_; }
+ void set_dyndep_pending(bool pending) { dyndep_pending_ = pending; }
+
+ Edge* in_edge() const { return in_edge_; }
+ void set_in_edge(Edge* edge) { in_edge_ = edge; }
+
+ int id() const { return id_; }
+ void set_id(int id) { id_ = id; }
+
+ const std::vector<Edge*>& out_edges() const { return out_edges_; }
+ void AddOutEdge(Edge* edge) { out_edges_.push_back(edge); }
+
+ void Dump(const char* prefix="") const;
+
+private:
+ std::string path_;
+
+ /// Set bits starting from lowest for backslashes that were normalized to
+ /// forward slashes by CanonicalizePath. See |PathDecanonicalized|.
+ uint64_t slash_bits_;
+
+ /// Possible values of mtime_:
+ /// -1: file hasn't been examined
+ /// 0: we looked, and file doesn't exist
+ /// >0: actual file's mtime
+ TimeStamp mtime_;
+
+ /// Dirty is true when the underlying file is out-of-date.
+ /// But note that Edge::outputs_ready_ is also used in judging which
+ /// edges to build.
+ bool dirty_;
+
+ /// Store whether dyndep information is expected from this node but
+ /// has not yet been loaded.
+ bool dyndep_pending_;
+
+ /// The Edge that produces this Node, or NULL when there is no
+ /// known edge to produce it.
+ Edge* in_edge_;
+
+ /// All Edges that use this Node as an input.
+ std::vector<Edge*> out_edges_;
+
+ /// A dense integer id for the node, assigned and used by DepsLog.
+ int id_;
+};
+
+/// An edge in the dependency graph; links between Nodes using Rules.
+struct Edge {
+ enum VisitMark {
+ VisitNone,
+ VisitInStack,
+ VisitDone
+ };
+
+ Edge() : rule_(NULL), pool_(NULL), dyndep_(NULL), env_(NULL),
+ mark_(VisitNone), outputs_ready_(false), deps_loaded_(false),
+ deps_missing_(false), implicit_deps_(0), order_only_deps_(0),
+ implicit_outs_(0) {}
+
+ /// Return true if all inputs' in-edges are ready.
+ bool AllInputsReady() const;
+
+ /// Expand all variables in a command and return it as a string.
+ /// If incl_rsp_file is enabled, the string will also contain the
+ /// full contents of a response file (if applicable)
+ std::string EvaluateCommand(bool incl_rsp_file = false) const;
+
+ /// Returns the shell-escaped value of |key|.
+ std::string GetBinding(const std::string& key) const;
+ bool GetBindingBool(const std::string& key) const;
+
+ /// Like GetBinding("depfile"), but without shell escaping.
+ std::string GetUnescapedDepfile() const;
+ /// Like GetBinding("dyndep"), but without shell escaping.
+ std::string GetUnescapedDyndep() const;
+ /// Like GetBinding("rspfile"), but without shell escaping.
+ std::string GetUnescapedRspfile() const;
+
+ void Dump(const char* prefix="") const;
+
+ const Rule* rule_;
+ Pool* pool_;
+ std::vector<Node*> inputs_;
+ std::vector<Node*> outputs_;
+ Node* dyndep_;
+ BindingEnv* env_;
+ VisitMark mark_;
+ bool outputs_ready_;
+ bool deps_loaded_;
+ bool deps_missing_;
+
+ const Rule& rule() const { return *rule_; }
+ Pool* pool() const { return pool_; }
+ int weight() const { return 1; }
+ bool outputs_ready() const { return outputs_ready_; }
+
+ // There are three types of inputs.
+ // 1) explicit deps, which show up as $in on the command line;
+ // 2) implicit deps, which the target depends on implicitly (e.g. C headers),
+ // and changes in them cause the target to rebuild;
+ // 3) order-only deps, which are needed before the target builds but which
+ // don't cause the target to rebuild.
+ // These are stored in inputs_ in that order, and we keep counts of
+ // #2 and #3 when we need to access the various subsets.
+ int implicit_deps_;
+ int order_only_deps_;
+ bool is_implicit(size_t index) {
+ return index >= inputs_.size() - order_only_deps_ - implicit_deps_ &&
+ !is_order_only(index);
+ }
+ bool is_order_only(size_t index) {
+ return index >= inputs_.size() - order_only_deps_;
+ }
+
+ // There are two types of outputs.
+ // 1) explicit outs, which show up as $out on the command line;
+ // 2) implicit outs, which the target generates but are not part of $out.
+ // These are stored in outputs_ in that order, and we keep a count of
+ // #2 to use when we need to access the various subsets.
+ int implicit_outs_;
+ bool is_implicit_out(size_t index) const {
+ return index >= outputs_.size() - implicit_outs_;
+ }
+
+ bool is_phony() const;
+ bool use_console() const;
+ bool maybe_phonycycle_diagnostic() const;
+};
+
+
+/// ImplicitDepLoader loads implicit dependencies, as referenced via the
+/// "depfile" attribute in build files.
+struct ImplicitDepLoader {
+ ImplicitDepLoader(State* state, DepsLog* deps_log,
+ DiskInterface* disk_interface,
+ DepfileParserOptions const* depfile_parser_options)
+ : state_(state), disk_interface_(disk_interface), deps_log_(deps_log),
+ depfile_parser_options_(depfile_parser_options) {}
+
+ /// Load implicit dependencies for \a edge.
+ /// @return false on error (without filling \a err if info is just missing
+ // or out of date).
+ bool LoadDeps(Edge* edge, std::string* err);
+
+ DepsLog* deps_log() const {
+ return deps_log_;
+ }
+
+ private:
+ /// Load implicit dependencies for \a edge from a depfile attribute.
+ /// @return false on error (without filling \a err if info is just missing).
+ bool LoadDepFile(Edge* edge, const std::string& path, std::string* err);
+
+ /// Load implicit dependencies for \a edge from the DepsLog.
+ /// @return false on error (without filling \a err if info is just missing).
+ bool LoadDepsFromLog(Edge* edge, std::string* err);
+
+ /// Preallocate \a count spaces in the input array on \a edge, returning
+ /// an iterator pointing at the first new space.
+ std::vector<Node*>::iterator PreallocateSpace(Edge* edge, int count);
+
+ /// If we don't have a edge that generates this input already,
+ /// create one; this makes us not abort if the input is missing,
+ /// but instead will rebuild in that circumstance.
+ void CreatePhonyInEdge(Node* node);
+
+ State* state_;
+ DiskInterface* disk_interface_;
+ DepsLog* deps_log_;
+ DepfileParserOptions const* depfile_parser_options_;
+};
+
+
+/// DependencyScan manages the process of scanning the files in a graph
+/// and updating the dirty/outputs_ready state of all the nodes and edges.
+struct DependencyScan {
+ DependencyScan(State* state, BuildLog* build_log, DepsLog* deps_log,
+ DiskInterface* disk_interface,
+ DepfileParserOptions const* depfile_parser_options)
+ : build_log_(build_log),
+ disk_interface_(disk_interface),
+ dep_loader_(state, deps_log, disk_interface, depfile_parser_options),
+ dyndep_loader_(state, disk_interface) {}
+
+ /// Update the |dirty_| state of the given node by inspecting its input edge.
+ /// Examine inputs, outputs, and command lines to judge whether an edge
+ /// needs to be re-run, and update outputs_ready_ and each outputs' |dirty_|
+ /// state accordingly.
+ /// Returns false on failure.
+ bool RecomputeDirty(Node* node, std::string* err);
+
+ /// Recompute whether any output of the edge is dirty, if so sets |*dirty|.
+ /// Returns false on failure.
+ bool RecomputeOutputsDirty(Edge* edge, Node* most_recent_input,
+ bool* dirty, std::string* err);
+
+ BuildLog* build_log() const {
+ return build_log_;
+ }
+ void set_build_log(BuildLog* log) {
+ build_log_ = log;
+ }
+
+ DepsLog* deps_log() const {
+ return dep_loader_.deps_log();
+ }
+
+ /// Load a dyndep file from the given node's path and update the
+ /// build graph with the new information. One overload accepts
+ /// a caller-owned 'DyndepFile' object in which to store the
+ /// information loaded from the dyndep file.
+ bool LoadDyndeps(Node* node, std::string* err) const;
+ bool LoadDyndeps(Node* node, DyndepFile* ddf, std::string* err) const;
+
+ private:
+ bool RecomputeDirty(Node* node, std::vector<Node*>* stack, std::string* err);
+ bool VerifyDAG(Node* node, std::vector<Node*>* stack, std::string* err);
+
+ /// Recompute whether a given single output should be marked dirty.
+ /// Returns true if so.
+ bool RecomputeOutputDirty(const Edge* edge, const Node* most_recent_input,
+ const std::string& command, Node* output);
+
+ BuildLog* build_log_;
+ DiskInterface* disk_interface_;
+ ImplicitDepLoader dep_loader_;
+ DyndepLoader dyndep_loader_;
+};
+
+#endif // NINJA_GRAPH_H_
diff --git a/src/graph_test.cc b/src/graph_test.cc
new file mode 100644
index 0000000..14f6375
--- /dev/null
+++ b/src/graph_test.cc
@@ -0,0 +1,860 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "graph.h"
+#include "build.h"
+
+#include "test.h"
+
+using namespace std;
+
+struct GraphTest : public StateTestWithBuiltinRules {
+ GraphTest() : scan_(&state_, NULL, NULL, &fs_, NULL) {}
+
+ VirtualFileSystem fs_;
+ DependencyScan scan_;
+};
+
+TEST_F(GraphTest, MissingImplicit) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat in | implicit\n"));
+ fs_.Create("in", "");
+ fs_.Create("out", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+
+ // A missing implicit dep *should* make the output dirty.
+ // (In fact, a build will fail.)
+ // This is a change from prior semantics of ninja.
+ EXPECT_TRUE(GetNode("out")->dirty());
+}
+
+TEST_F(GraphTest, ModifiedImplicit) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat in | implicit\n"));
+ fs_.Create("in", "");
+ fs_.Create("out", "");
+ fs_.Tick();
+ fs_.Create("implicit", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+
+ // A modified implicit dep should make the output dirty.
+ EXPECT_TRUE(GetNode("out")->dirty());
+}
+
+TEST_F(GraphTest, FunkyMakefilePath) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule catdep\n"
+" depfile = $out.d\n"
+" command = cat $in > $out\n"
+"build out.o: catdep foo.cc\n"));
+ fs_.Create("foo.cc", "");
+ fs_.Create("out.o.d", "out.o: ./foo/../implicit.h\n");
+ fs_.Create("out.o", "");
+ fs_.Tick();
+ fs_.Create("implicit.h", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.o"), &err));
+ ASSERT_EQ("", err);
+
+ // implicit.h has changed, though our depfile refers to it with a
+ // non-canonical path; we should still find it.
+ EXPECT_TRUE(GetNode("out.o")->dirty());
+}
+
+TEST_F(GraphTest, ExplicitImplicit) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule catdep\n"
+" depfile = $out.d\n"
+" command = cat $in > $out\n"
+"build implicit.h: cat data\n"
+"build out.o: catdep foo.cc || implicit.h\n"));
+ fs_.Create("implicit.h", "");
+ fs_.Create("foo.cc", "");
+ fs_.Create("out.o.d", "out.o: implicit.h\n");
+ fs_.Create("out.o", "");
+ fs_.Tick();
+ fs_.Create("data", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.o"), &err));
+ ASSERT_EQ("", err);
+
+ // We have both an implicit and an explicit dep on implicit.h.
+ // The implicit dep should "win" (in the sense that it should cause
+ // the output to be dirty).
+ EXPECT_TRUE(GetNode("out.o")->dirty());
+}
+
+TEST_F(GraphTest, ImplicitOutputParse) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out | out.imp: cat in\n"));
+
+ Edge* edge = GetNode("out")->in_edge();
+ EXPECT_EQ(2, edge->outputs_.size());
+ EXPECT_EQ("out", edge->outputs_[0]->path());
+ EXPECT_EQ("out.imp", edge->outputs_[1]->path());
+ EXPECT_EQ(1, edge->implicit_outs_);
+ EXPECT_EQ(edge, GetNode("out.imp")->in_edge());
+}
+
+TEST_F(GraphTest, ImplicitOutputMissing) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out | out.imp: cat in\n"));
+ fs_.Create("in", "");
+ fs_.Create("out", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_TRUE(GetNode("out")->dirty());
+ EXPECT_TRUE(GetNode("out.imp")->dirty());
+}
+
+TEST_F(GraphTest, ImplicitOutputOutOfDate) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out | out.imp: cat in\n"));
+ fs_.Create("out.imp", "");
+ fs_.Tick();
+ fs_.Create("in", "");
+ fs_.Create("out", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_TRUE(GetNode("out")->dirty());
+ EXPECT_TRUE(GetNode("out.imp")->dirty());
+}
+
+TEST_F(GraphTest, ImplicitOutputOnlyParse) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build | out.imp: cat in\n"));
+
+ Edge* edge = GetNode("out.imp")->in_edge();
+ EXPECT_EQ(1, edge->outputs_.size());
+ EXPECT_EQ("out.imp", edge->outputs_[0]->path());
+ EXPECT_EQ(1, edge->implicit_outs_);
+ EXPECT_EQ(edge, GetNode("out.imp")->in_edge());
+}
+
+TEST_F(GraphTest, ImplicitOutputOnlyMissing) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build | out.imp: cat in\n"));
+ fs_.Create("in", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.imp"), &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_TRUE(GetNode("out.imp")->dirty());
+}
+
+TEST_F(GraphTest, ImplicitOutputOnlyOutOfDate) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build | out.imp: cat in\n"));
+ fs_.Create("out.imp", "");
+ fs_.Tick();
+ fs_.Create("in", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.imp"), &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_TRUE(GetNode("out.imp")->dirty());
+}
+
+TEST_F(GraphTest, PathWithCurrentDirectory) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule catdep\n"
+" depfile = $out.d\n"
+" command = cat $in > $out\n"
+"build ./out.o: catdep ./foo.cc\n"));
+ fs_.Create("foo.cc", "");
+ fs_.Create("out.o.d", "out.o: foo.cc\n");
+ fs_.Create("out.o", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.o"), &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_FALSE(GetNode("out.o")->dirty());
+}
+
+TEST_F(GraphTest, RootNodes) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out1: cat in1\n"
+"build mid1: cat in1\n"
+"build out2: cat mid1\n"
+"build out3 out4: cat mid1\n"));
+
+ string err;
+ vector<Node*> root_nodes = state_.RootNodes(&err);
+ EXPECT_EQ(4u, root_nodes.size());
+ for (size_t i = 0; i < root_nodes.size(); ++i) {
+ string name = root_nodes[i]->path();
+ EXPECT_EQ("out", name.substr(0, 3));
+ }
+}
+
+TEST_F(GraphTest, VarInOutPathEscaping) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build a$ b: cat no'space with$ space$$ no\"space2\n"));
+
+ Edge* edge = GetNode("a b")->in_edge();
+#ifdef _WIN32
+ EXPECT_EQ("cat no'space \"with space$\" \"no\\\"space2\" > \"a b\"",
+ edge->EvaluateCommand());
+#else
+ EXPECT_EQ("cat 'no'\\''space' 'with space$' 'no\"space2' > 'a b'",
+ edge->EvaluateCommand());
+#endif
+}
+
+// Regression test for https://github.com/ninja-build/ninja/issues/380
+TEST_F(GraphTest, DepfileWithCanonicalizablePath) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule catdep\n"
+" depfile = $out.d\n"
+" command = cat $in > $out\n"
+"build ./out.o: catdep ./foo.cc\n"));
+ fs_.Create("foo.cc", "");
+ fs_.Create("out.o.d", "out.o: bar/../foo.cc\n");
+ fs_.Create("out.o", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.o"), &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_FALSE(GetNode("out.o")->dirty());
+}
+
+// Regression test for https://github.com/ninja-build/ninja/issues/404
+TEST_F(GraphTest, DepfileRemoved) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule catdep\n"
+" depfile = $out.d\n"
+" command = cat $in > $out\n"
+"build ./out.o: catdep ./foo.cc\n"));
+ fs_.Create("foo.h", "");
+ fs_.Create("foo.cc", "");
+ fs_.Tick();
+ fs_.Create("out.o.d", "out.o: foo.h\n");
+ fs_.Create("out.o", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.o"), &err));
+ ASSERT_EQ("", err);
+ EXPECT_FALSE(GetNode("out.o")->dirty());
+
+ state_.Reset();
+ fs_.RemoveFile("out.o.d");
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.o"), &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(GetNode("out.o")->dirty());
+}
+
+// Check that rule-level variables are in scope for eval.
+TEST_F(GraphTest, RuleVariablesInScope) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule r\n"
+" depfile = x\n"
+" command = depfile is $depfile\n"
+"build out: r in\n"));
+ Edge* edge = GetNode("out")->in_edge();
+ EXPECT_EQ("depfile is x", edge->EvaluateCommand());
+}
+
+// Check that build statements can override rule builtins like depfile.
+TEST_F(GraphTest, DepfileOverride) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule r\n"
+" depfile = x\n"
+" command = unused\n"
+"build out: r in\n"
+" depfile = y\n"));
+ Edge* edge = GetNode("out")->in_edge();
+ EXPECT_EQ("y", edge->GetBinding("depfile"));
+}
+
+// Check that overridden values show up in expansion of rule-level bindings.
+TEST_F(GraphTest, DepfileOverrideParent) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule r\n"
+" depfile = x\n"
+" command = depfile is $depfile\n"
+"build out: r in\n"
+" depfile = y\n"));
+ Edge* edge = GetNode("out")->in_edge();
+ EXPECT_EQ("depfile is y", edge->GetBinding("command"));
+}
+
+// Verify that building a nested phony rule prints "no work to do"
+TEST_F(GraphTest, NestedPhonyPrintsDone) {
+ AssertParse(&state_,
+"build n1: phony \n"
+"build n2: phony n1\n"
+ );
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("n2"), &err));
+ ASSERT_EQ("", err);
+
+ Plan plan_;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("n2"), &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_EQ(0, plan_.command_edge_count());
+ ASSERT_FALSE(plan_.more_to_do());
+}
+
+TEST_F(GraphTest, PhonySelfReferenceError) {
+ ManifestParserOptions parser_opts;
+ parser_opts.phony_cycle_action_ = kPhonyCycleActionError;
+ AssertParse(&state_,
+"build a: phony a\n",
+ parser_opts);
+
+ string err;
+ EXPECT_FALSE(scan_.RecomputeDirty(GetNode("a"), &err));
+ ASSERT_EQ("dependency cycle: a -> a [-w phonycycle=err]", err);
+}
+
+TEST_F(GraphTest, DependencyCycle) {
+ AssertParse(&state_,
+"build out: cat mid\n"
+"build mid: cat in\n"
+"build in: cat pre\n"
+"build pre: cat out\n");
+
+ string err;
+ EXPECT_FALSE(scan_.RecomputeDirty(GetNode("out"), &err));
+ ASSERT_EQ("dependency cycle: out -> mid -> in -> pre -> out", err);
+}
+
+TEST_F(GraphTest, CycleInEdgesButNotInNodes1) {
+ string err;
+ AssertParse(&state_,
+"build a b: cat a\n");
+ EXPECT_FALSE(scan_.RecomputeDirty(GetNode("b"), &err));
+ ASSERT_EQ("dependency cycle: a -> a", err);
+}
+
+TEST_F(GraphTest, CycleInEdgesButNotInNodes2) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build b a: cat a\n"));
+ EXPECT_FALSE(scan_.RecomputeDirty(GetNode("b"), &err));
+ ASSERT_EQ("dependency cycle: a -> a", err);
+}
+
+TEST_F(GraphTest, CycleInEdgesButNotInNodes3) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build a b: cat c\n"
+"build c: cat a\n"));
+ EXPECT_FALSE(scan_.RecomputeDirty(GetNode("b"), &err));
+ ASSERT_EQ("dependency cycle: a -> c -> a", err);
+}
+
+TEST_F(GraphTest, CycleInEdgesButNotInNodes4) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build d: cat c\n"
+"build c: cat b\n"
+"build b: cat a\n"
+"build a e: cat d\n"
+"build f: cat e\n"));
+ EXPECT_FALSE(scan_.RecomputeDirty(GetNode("f"), &err));
+ ASSERT_EQ("dependency cycle: a -> d -> c -> b -> a", err);
+}
+
+// Verify that cycles in graphs with multiple outputs are handled correctly
+// in RecomputeDirty() and don't cause deps to be loaded multiple times.
+TEST_F(GraphTest, CycleWithLengthZeroFromDepfile) {
+ AssertParse(&state_,
+"rule deprule\n"
+" depfile = dep.d\n"
+" command = unused\n"
+"build a b: deprule\n"
+ );
+ fs_.Create("dep.d", "a: b\n");
+
+ string err;
+ EXPECT_FALSE(scan_.RecomputeDirty(GetNode("a"), &err));
+ ASSERT_EQ("dependency cycle: b -> b", err);
+
+ // Despite the depfile causing edge to be a cycle (it has outputs a and b,
+ // but the depfile also adds b as an input), the deps should have been loaded
+ // only once:
+ Edge* edge = GetNode("a")->in_edge();
+ EXPECT_EQ(1, edge->inputs_.size());
+ EXPECT_EQ("b", edge->inputs_[0]->path());
+}
+
+// Like CycleWithLengthZeroFromDepfile but with a higher cycle length.
+TEST_F(GraphTest, CycleWithLengthOneFromDepfile) {
+ AssertParse(&state_,
+"rule deprule\n"
+" depfile = dep.d\n"
+" command = unused\n"
+"rule r\n"
+" command = unused\n"
+"build a b: deprule\n"
+"build c: r b\n"
+ );
+ fs_.Create("dep.d", "a: c\n");
+
+ string err;
+ EXPECT_FALSE(scan_.RecomputeDirty(GetNode("a"), &err));
+ ASSERT_EQ("dependency cycle: b -> c -> b", err);
+
+ // Despite the depfile causing edge to be a cycle (|edge| has outputs a and b,
+ // but c's in_edge has b as input but the depfile also adds |edge| as
+ // output)), the deps should have been loaded only once:
+ Edge* edge = GetNode("a")->in_edge();
+ EXPECT_EQ(1, edge->inputs_.size());
+ EXPECT_EQ("c", edge->inputs_[0]->path());
+}
+
+// Like CycleWithLengthOneFromDepfile but building a node one hop away from
+// the cycle.
+TEST_F(GraphTest, CycleWithLengthOneFromDepfileOneHopAway) {
+ AssertParse(&state_,
+"rule deprule\n"
+" depfile = dep.d\n"
+" command = unused\n"
+"rule r\n"
+" command = unused\n"
+"build a b: deprule\n"
+"build c: r b\n"
+"build d: r a\n"
+ );
+ fs_.Create("dep.d", "a: c\n");
+
+ string err;
+ EXPECT_FALSE(scan_.RecomputeDirty(GetNode("d"), &err));
+ ASSERT_EQ("dependency cycle: b -> c -> b", err);
+
+ // Despite the depfile causing edge to be a cycle (|edge| has outputs a and b,
+ // but c's in_edge has b as input but the depfile also adds |edge| as
+ // output)), the deps should have been loaded only once:
+ Edge* edge = GetNode("a")->in_edge();
+ EXPECT_EQ(1, edge->inputs_.size());
+ EXPECT_EQ("c", edge->inputs_[0]->path());
+}
+
+#ifdef _WIN32
+TEST_F(GraphTest, Decanonicalize) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out\\out1: cat src\\in1\n"
+"build out\\out2/out3\\out4: cat mid1\n"
+"build out3 out4\\foo: cat mid1\n"));
+
+ string err;
+ vector<Node*> root_nodes = state_.RootNodes(&err);
+ EXPECT_EQ(4u, root_nodes.size());
+ EXPECT_EQ(root_nodes[0]->path(), "out/out1");
+ EXPECT_EQ(root_nodes[1]->path(), "out/out2/out3/out4");
+ EXPECT_EQ(root_nodes[2]->path(), "out3");
+ EXPECT_EQ(root_nodes[3]->path(), "out4/foo");
+ EXPECT_EQ(root_nodes[0]->PathDecanonicalized(), "out\\out1");
+ EXPECT_EQ(root_nodes[1]->PathDecanonicalized(), "out\\out2/out3\\out4");
+ EXPECT_EQ(root_nodes[2]->PathDecanonicalized(), "out3");
+ EXPECT_EQ(root_nodes[3]->PathDecanonicalized(), "out4\\foo");
+}
+#endif
+
+TEST_F(GraphTest, DyndepLoadTrivial) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build out: r in || dd\n"
+" dyndep = dd\n"
+ );
+ fs_.Create("dd",
+"ninja_dyndep_version = 1\n"
+"build out: dyndep\n"
+ );
+
+ string err;
+ ASSERT_TRUE(GetNode("dd")->dyndep_pending());
+ EXPECT_TRUE(scan_.LoadDyndeps(GetNode("dd"), &err));
+ EXPECT_EQ("", err);
+ EXPECT_FALSE(GetNode("dd")->dyndep_pending());
+
+ Edge* edge = GetNode("out")->in_edge();
+ ASSERT_EQ(1u, edge->outputs_.size());
+ EXPECT_EQ("out", edge->outputs_[0]->path());
+ ASSERT_EQ(2u, edge->inputs_.size());
+ EXPECT_EQ("in", edge->inputs_[0]->path());
+ EXPECT_EQ("dd", edge->inputs_[1]->path());
+ EXPECT_EQ(0u, edge->implicit_deps_);
+ EXPECT_EQ(1u, edge->order_only_deps_);
+ EXPECT_FALSE(edge->GetBindingBool("restat"));
+}
+
+TEST_F(GraphTest, DyndepLoadMissingFile) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build out: r in || dd\n"
+" dyndep = dd\n"
+ );
+
+ string err;
+ ASSERT_TRUE(GetNode("dd")->dyndep_pending());
+ EXPECT_FALSE(scan_.LoadDyndeps(GetNode("dd"), &err));
+ EXPECT_EQ("loading 'dd': No such file or directory", err);
+}
+
+TEST_F(GraphTest, DyndepLoadMissingEntry) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build out: r in || dd\n"
+" dyndep = dd\n"
+ );
+ fs_.Create("dd",
+"ninja_dyndep_version = 1\n"
+ );
+
+ string err;
+ ASSERT_TRUE(GetNode("dd")->dyndep_pending());
+ EXPECT_FALSE(scan_.LoadDyndeps(GetNode("dd"), &err));
+ EXPECT_EQ("'out' not mentioned in its dyndep file 'dd'", err);
+}
+
+TEST_F(GraphTest, DyndepLoadExtraEntry) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build out: r in || dd\n"
+" dyndep = dd\n"
+"build out2: r in || dd\n"
+ );
+ fs_.Create("dd",
+"ninja_dyndep_version = 1\n"
+"build out: dyndep\n"
+"build out2: dyndep\n"
+ );
+
+ string err;
+ ASSERT_TRUE(GetNode("dd")->dyndep_pending());
+ EXPECT_FALSE(scan_.LoadDyndeps(GetNode("dd"), &err));
+ EXPECT_EQ("dyndep file 'dd' mentions output 'out2' whose build statement "
+ "does not have a dyndep binding for the file", err);
+}
+
+TEST_F(GraphTest, DyndepLoadOutputWithMultipleRules1) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build out1 | out-twice.imp: r in1\n"
+"build out2: r in2 || dd\n"
+" dyndep = dd\n"
+ );
+ fs_.Create("dd",
+"ninja_dyndep_version = 1\n"
+"build out2 | out-twice.imp: dyndep\n"
+ );
+
+ string err;
+ ASSERT_TRUE(GetNode("dd")->dyndep_pending());
+ EXPECT_FALSE(scan_.LoadDyndeps(GetNode("dd"), &err));
+ EXPECT_EQ("multiple rules generate out-twice.imp", err);
+}
+
+TEST_F(GraphTest, DyndepLoadOutputWithMultipleRules2) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build out1: r in1 || dd1\n"
+" dyndep = dd1\n"
+"build out2: r in2 || dd2\n"
+" dyndep = dd2\n"
+ );
+ fs_.Create("dd1",
+"ninja_dyndep_version = 1\n"
+"build out1 | out-twice.imp: dyndep\n"
+ );
+ fs_.Create("dd2",
+"ninja_dyndep_version = 1\n"
+"build out2 | out-twice.imp: dyndep\n"
+ );
+
+ string err;
+ ASSERT_TRUE(GetNode("dd1")->dyndep_pending());
+ EXPECT_TRUE(scan_.LoadDyndeps(GetNode("dd1"), &err));
+ EXPECT_EQ("", err);
+ ASSERT_TRUE(GetNode("dd2")->dyndep_pending());
+ EXPECT_FALSE(scan_.LoadDyndeps(GetNode("dd2"), &err));
+ EXPECT_EQ("multiple rules generate out-twice.imp", err);
+}
+
+TEST_F(GraphTest, DyndepLoadMultiple) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build out1: r in1 || dd\n"
+" dyndep = dd\n"
+"build out2: r in2 || dd\n"
+" dyndep = dd\n"
+"build outNot: r in3 || dd\n"
+ );
+ fs_.Create("dd",
+"ninja_dyndep_version = 1\n"
+"build out1 | out1imp: dyndep | in1imp\n"
+"build out2: dyndep | in2imp\n"
+" restat = 1\n"
+ );
+
+ string err;
+ ASSERT_TRUE(GetNode("dd")->dyndep_pending());
+ EXPECT_TRUE(scan_.LoadDyndeps(GetNode("dd"), &err));
+ EXPECT_EQ("", err);
+ EXPECT_FALSE(GetNode("dd")->dyndep_pending());
+
+ Edge* edge1 = GetNode("out1")->in_edge();
+ ASSERT_EQ(2u, edge1->outputs_.size());
+ EXPECT_EQ("out1", edge1->outputs_[0]->path());
+ EXPECT_EQ("out1imp", edge1->outputs_[1]->path());
+ EXPECT_EQ(1u, edge1->implicit_outs_);
+ ASSERT_EQ(3u, edge1->inputs_.size());
+ EXPECT_EQ("in1", edge1->inputs_[0]->path());
+ EXPECT_EQ("in1imp", edge1->inputs_[1]->path());
+ EXPECT_EQ("dd", edge1->inputs_[2]->path());
+ EXPECT_EQ(1u, edge1->implicit_deps_);
+ EXPECT_EQ(1u, edge1->order_only_deps_);
+ EXPECT_FALSE(edge1->GetBindingBool("restat"));
+ EXPECT_EQ(edge1, GetNode("out1imp")->in_edge());
+ Node* in1imp = GetNode("in1imp");
+ ASSERT_EQ(1u, in1imp->out_edges().size());
+ EXPECT_EQ(edge1, in1imp->out_edges()[0]);
+
+ Edge* edge2 = GetNode("out2")->in_edge();
+ ASSERT_EQ(1u, edge2->outputs_.size());
+ EXPECT_EQ("out2", edge2->outputs_[0]->path());
+ EXPECT_EQ(0u, edge2->implicit_outs_);
+ ASSERT_EQ(3u, edge2->inputs_.size());
+ EXPECT_EQ("in2", edge2->inputs_[0]->path());
+ EXPECT_EQ("in2imp", edge2->inputs_[1]->path());
+ EXPECT_EQ("dd", edge2->inputs_[2]->path());
+ EXPECT_EQ(1u, edge2->implicit_deps_);
+ EXPECT_EQ(1u, edge2->order_only_deps_);
+ EXPECT_TRUE(edge2->GetBindingBool("restat"));
+ Node* in2imp = GetNode("in2imp");
+ ASSERT_EQ(1u, in2imp->out_edges().size());
+ EXPECT_EQ(edge2, in2imp->out_edges()[0]);
+}
+
+TEST_F(GraphTest, DyndepFileMissing) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build out: r || dd\n"
+" dyndep = dd\n"
+ );
+
+ string err;
+ EXPECT_FALSE(scan_.RecomputeDirty(GetNode("out"), &err));
+ ASSERT_EQ("loading 'dd': No such file or directory", err);
+}
+
+TEST_F(GraphTest, DyndepFileError) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build out: r || dd\n"
+" dyndep = dd\n"
+ );
+ fs_.Create("dd",
+"ninja_dyndep_version = 1\n"
+ );
+
+ string err;
+ EXPECT_FALSE(scan_.RecomputeDirty(GetNode("out"), &err));
+ ASSERT_EQ("'out' not mentioned in its dyndep file 'dd'", err);
+}
+
+TEST_F(GraphTest, DyndepImplicitInputNewer) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build out: r || dd\n"
+" dyndep = dd\n"
+ );
+ fs_.Create("dd",
+"ninja_dyndep_version = 1\n"
+"build out: dyndep | in\n"
+ );
+ fs_.Create("out", "");
+ fs_.Tick();
+ fs_.Create("in", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_FALSE(GetNode("in")->dirty());
+ EXPECT_FALSE(GetNode("dd")->dirty());
+
+ // "out" is dirty due to dyndep-specified implicit input
+ EXPECT_TRUE(GetNode("out")->dirty());
+}
+
+TEST_F(GraphTest, DyndepFileReady) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build dd: r dd-in\n"
+"build out: r || dd\n"
+" dyndep = dd\n"
+ );
+ fs_.Create("dd-in", "");
+ fs_.Create("dd",
+"ninja_dyndep_version = 1\n"
+"build out: dyndep | in\n"
+ );
+ fs_.Create("out", "");
+ fs_.Tick();
+ fs_.Create("in", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_FALSE(GetNode("in")->dirty());
+ EXPECT_FALSE(GetNode("dd")->dirty());
+ EXPECT_TRUE(GetNode("dd")->in_edge()->outputs_ready());
+
+ // "out" is dirty due to dyndep-specified implicit input
+ EXPECT_TRUE(GetNode("out")->dirty());
+}
+
+TEST_F(GraphTest, DyndepFileNotClean) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build dd: r dd-in\n"
+"build out: r || dd\n"
+" dyndep = dd\n"
+ );
+ fs_.Create("dd", "this-should-not-be-loaded");
+ fs_.Tick();
+ fs_.Create("dd-in", "");
+ fs_.Create("out", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_TRUE(GetNode("dd")->dirty());
+ EXPECT_FALSE(GetNode("dd")->in_edge()->outputs_ready());
+
+ // "out" is clean but not ready since "dd" is not ready
+ EXPECT_FALSE(GetNode("out")->dirty());
+ EXPECT_FALSE(GetNode("out")->in_edge()->outputs_ready());
+}
+
+TEST_F(GraphTest, DyndepFileNotReady) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build tmp: r\n"
+"build dd: r dd-in || tmp\n"
+"build out: r || dd\n"
+" dyndep = dd\n"
+ );
+ fs_.Create("dd", "this-should-not-be-loaded");
+ fs_.Create("dd-in", "");
+ fs_.Tick();
+ fs_.Create("out", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_FALSE(GetNode("dd")->dirty());
+ EXPECT_FALSE(GetNode("dd")->in_edge()->outputs_ready());
+ EXPECT_FALSE(GetNode("out")->dirty());
+ EXPECT_FALSE(GetNode("out")->in_edge()->outputs_ready());
+}
+
+TEST_F(GraphTest, DyndepFileSecondNotReady) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build dd1: r dd1-in\n"
+"build dd2-in: r || dd1\n"
+" dyndep = dd1\n"
+"build dd2: r dd2-in\n"
+"build out: r || dd2\n"
+" dyndep = dd2\n"
+ );
+ fs_.Create("dd1", "");
+ fs_.Create("dd2", "");
+ fs_.Create("dd2-in", "");
+ fs_.Tick();
+ fs_.Create("dd1-in", "");
+ fs_.Create("out", "");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_TRUE(GetNode("dd1")->dirty());
+ EXPECT_FALSE(GetNode("dd1")->in_edge()->outputs_ready());
+ EXPECT_FALSE(GetNode("dd2")->dirty());
+ EXPECT_FALSE(GetNode("dd2")->in_edge()->outputs_ready());
+ EXPECT_FALSE(GetNode("out")->dirty());
+ EXPECT_FALSE(GetNode("out")->in_edge()->outputs_ready());
+}
+
+TEST_F(GraphTest, DyndepFileCircular) {
+ AssertParse(&state_,
+"rule r\n"
+" command = unused\n"
+"build out: r in || dd\n"
+" depfile = out.d\n"
+" dyndep = dd\n"
+"build in: r circ\n"
+ );
+ fs_.Create("out.d", "out: inimp\n");
+ fs_.Create("dd",
+"ninja_dyndep_version = 1\n"
+"build out | circ: dyndep\n"
+ );
+ fs_.Create("out", "");
+
+ Edge* edge = GetNode("out")->in_edge();
+ string err;
+ EXPECT_FALSE(scan_.RecomputeDirty(GetNode("out"), &err));
+ EXPECT_EQ("dependency cycle: circ -> in -> circ", err);
+
+ // Verify that "out.d" was loaded exactly once despite
+ // circular reference discovered from dyndep file.
+ ASSERT_EQ(3u, edge->inputs_.size());
+ EXPECT_EQ("in", edge->inputs_[0]->path());
+ EXPECT_EQ("inimp", edge->inputs_[1]->path());
+ EXPECT_EQ("dd", edge->inputs_[2]->path());
+ EXPECT_EQ(1u, edge->implicit_deps_);
+ EXPECT_EQ(1u, edge->order_only_deps_);
+}
diff --git a/src/graphviz.cc b/src/graphviz.cc
new file mode 100644
index 0000000..37b7108
--- /dev/null
+++ b/src/graphviz.cc
@@ -0,0 +1,90 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "graphviz.h"
+
+#include <stdio.h>
+#include <algorithm>
+
+#include "dyndep.h"
+#include "graph.h"
+
+using namespace std;
+
+void GraphViz::AddTarget(Node* node) {
+ if (visited_nodes_.find(node) != visited_nodes_.end())
+ return;
+
+ string pathstr = node->path();
+ replace(pathstr.begin(), pathstr.end(), '\\', '/');
+ printf("\"%p\" [label=\"%s\"]\n", node, pathstr.c_str());
+ visited_nodes_.insert(node);
+
+ Edge* edge = node->in_edge();
+
+ if (!edge) {
+ // Leaf node.
+ // Draw as a rect?
+ return;
+ }
+
+ if (visited_edges_.find(edge) != visited_edges_.end())
+ return;
+ visited_edges_.insert(edge);
+
+ if (edge->dyndep_ && edge->dyndep_->dyndep_pending()) {
+ std::string err;
+ if (!dyndep_loader_.LoadDyndeps(edge->dyndep_, &err)) {
+ Warning("%s\n", err.c_str());
+ }
+ }
+
+ if (edge->inputs_.size() == 1 && edge->outputs_.size() == 1) {
+ // Can draw simply.
+ // Note extra space before label text -- this is cosmetic and feels
+ // like a graphviz bug.
+ printf("\"%p\" -> \"%p\" [label=\" %s\"]\n",
+ edge->inputs_[0], edge->outputs_[0], edge->rule_->name().c_str());
+ } else {
+ printf("\"%p\" [label=\"%s\", shape=ellipse]\n",
+ edge, edge->rule_->name().c_str());
+ for (vector<Node*>::iterator out = edge->outputs_.begin();
+ out != edge->outputs_.end(); ++out) {
+ printf("\"%p\" -> \"%p\"\n", edge, *out);
+ }
+ for (vector<Node*>::iterator in = edge->inputs_.begin();
+ in != edge->inputs_.end(); ++in) {
+ const char* order_only = "";
+ if (edge->is_order_only(in - edge->inputs_.begin()))
+ order_only = " style=dotted";
+ printf("\"%p\" -> \"%p\" [arrowhead=none%s]\n", (*in), edge, order_only);
+ }
+ }
+
+ for (vector<Node*>::iterator in = edge->inputs_.begin();
+ in != edge->inputs_.end(); ++in) {
+ AddTarget(*in);
+ }
+}
+
+void GraphViz::Start() {
+ printf("digraph ninja {\n");
+ printf("rankdir=\"LR\"\n");
+ printf("node [fontsize=10, shape=box, height=0.25]\n");
+ printf("edge [fontsize=10]\n");
+}
+
+void GraphViz::Finish() {
+ printf("}\n");
+}
diff --git a/src/graphviz.h b/src/graphviz.h
new file mode 100644
index 0000000..601c9b2
--- /dev/null
+++ b/src/graphviz.h
@@ -0,0 +1,40 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_GRAPHVIZ_H_
+#define NINJA_GRAPHVIZ_H_
+
+#include <set>
+
+#include "dyndep.h"
+
+struct DiskInterface;
+struct Node;
+struct Edge;
+struct State;
+
+/// Runs the process of creating GraphViz .dot file output.
+struct GraphViz {
+ GraphViz(State* state, DiskInterface* disk_interface)
+ : dyndep_loader_(state, disk_interface) {}
+ void Start();
+ void AddTarget(Node* node);
+ void Finish();
+
+ DyndepLoader dyndep_loader_;
+ std::set<Node*> visited_nodes_;
+ std::set<Edge*> visited_edges_;
+};
+
+#endif // NINJA_GRAPHVIZ_H_
diff --git a/src/hash_collision_bench.cc b/src/hash_collision_bench.cc
new file mode 100644
index 0000000..8f37ed0
--- /dev/null
+++ b/src/hash_collision_bench.cc
@@ -0,0 +1,65 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "build_log.h"
+
+#include <algorithm>
+
+#include <stdlib.h>
+#include <time.h>
+
+using namespace std;
+
+int random(int low, int high) {
+ return int(low + (rand() / double(RAND_MAX)) * (high - low) + 0.5);
+}
+
+void RandomCommand(char** s) {
+ int len = random(5, 100);
+ *s = new char[len+1];
+ for (int i = 0; i < len; ++i)
+ (*s)[i] = (char)random(32, 127);
+ (*s)[len] = '\0';
+}
+
+int main() {
+ const int N = 20 * 1000 * 1000;
+
+ // Leak these, else 10% of the runtime is spent destroying strings.
+ char** commands = new char*[N];
+ pair<uint64_t, int>* hashes = new pair<uint64_t, int>[N];
+
+ srand((int)time(NULL));
+
+ for (int i = 0; i < N; ++i) {
+ RandomCommand(&commands[i]);
+ hashes[i] = make_pair(BuildLog::LogEntry::HashCommand(commands[i]), i);
+ }
+
+ sort(hashes, hashes + N);
+
+ int collision_count = 0;
+ for (int i = 1; i < N; ++i) {
+ if (hashes[i - 1].first == hashes[i].first) {
+ if (strcmp(commands[hashes[i - 1].second],
+ commands[hashes[i].second]) != 0) {
+ printf("collision!\n string 1: '%s'\n string 2: '%s'\n",
+ commands[hashes[i - 1].second],
+ commands[hashes[i].second]);
+ collision_count++;
+ }
+ }
+ }
+ printf("\n\n%d collisions after %d runs\n", collision_count, N);
+}
diff --git a/src/hash_map.h b/src/hash_map.h
new file mode 100644
index 0000000..55d2c9d
--- /dev/null
+++ b/src/hash_map.h
@@ -0,0 +1,123 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_MAP_H_
+#define NINJA_MAP_H_
+
+#include <algorithm>
+#include <string.h>
+#include "string_piece.h"
+#include "util.h"
+
+// MurmurHash2, by Austin Appleby
+static inline
+unsigned int MurmurHash2(const void* key, size_t len) {
+ static const unsigned int seed = 0xDECAFBAD;
+ const unsigned int m = 0x5bd1e995;
+ const int r = 24;
+ unsigned int h = seed ^ len;
+ const unsigned char* data = (const unsigned char*)key;
+ while (len >= 4) {
+ unsigned int k;
+ memcpy(&k, data, sizeof k);
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+ data += 4;
+ len -= 4;
+ }
+ switch (len) {
+ case 3: h ^= data[2] << 16;
+ NINJA_FALLTHROUGH;
+ case 2: h ^= data[1] << 8;
+ NINJA_FALLTHROUGH;
+ case 1: h ^= data[0];
+ h *= m;
+ };
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+ return h;
+}
+
+#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900)
+#include <unordered_map>
+
+namespace std {
+template<>
+struct hash<StringPiece> {
+ typedef StringPiece argument_type;
+ typedef size_t result_type;
+
+ size_t operator()(StringPiece key) const {
+ return MurmurHash2(key.str_, key.len_);
+ }
+};
+}
+
+#elif defined(_MSC_VER)
+#include <hash_map>
+
+using stdext::hash_map;
+using stdext::hash_compare;
+
+struct StringPieceCmp : public hash_compare<StringPiece> {
+ size_t operator()(const StringPiece& key) const {
+ return MurmurHash2(key.str_, key.len_);
+ }
+ bool operator()(const StringPiece& a, const StringPiece& b) const {
+ int cmp = memcmp(a.str_, b.str_, min(a.len_, b.len_));
+ if (cmp < 0) {
+ return true;
+ } else if (cmp > 0) {
+ return false;
+ } else {
+ return a.len_ < b.len_;
+ }
+ }
+};
+
+#else
+#include <ext/hash_map>
+
+using __gnu_cxx::hash_map;
+
+namespace __gnu_cxx {
+template<>
+struct hash<StringPiece> {
+ size_t operator()(StringPiece key) const {
+ return MurmurHash2(key.str_, key.len_);
+ }
+};
+}
+#endif
+
+/// A template for hash_maps keyed by a StringPiece whose string is
+/// owned externally (typically by the values). Use like:
+/// ExternalStringHash<Foo*>::Type foos; to make foos into a hash
+/// mapping StringPiece => Foo*.
+template<typename V>
+struct ExternalStringHashMap {
+#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900)
+ typedef std::unordered_map<StringPiece, V> Type;
+#elif defined(_MSC_VER)
+ typedef hash_map<StringPiece, V, StringPieceCmp> Type;
+#else
+ typedef hash_map<StringPiece, V> Type;
+#endif
+};
+
+#endif // NINJA_MAP_H_
diff --git a/src/includes_normalize-win32.cc b/src/includes_normalize-win32.cc
new file mode 100644
index 0000000..9f8dfc2
--- /dev/null
+++ b/src/includes_normalize-win32.cc
@@ -0,0 +1,211 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "includes_normalize.h"
+
+#include "string_piece.h"
+#include "string_piece_util.h"
+#include "util.h"
+
+#include <algorithm>
+#include <iterator>
+#include <sstream>
+
+#include <windows.h>
+
+using namespace std;
+
+namespace {
+
+bool InternalGetFullPathName(const StringPiece& file_name, char* buffer,
+ size_t buffer_length, string *err) {
+ DWORD result_size = GetFullPathNameA(file_name.AsString().c_str(),
+ buffer_length, buffer, NULL);
+ if (result_size == 0) {
+ *err = "GetFullPathNameA(" + file_name.AsString() + "): " +
+ GetLastErrorString();
+ return false;
+ } else if (result_size > buffer_length) {
+ *err = "path too long";
+ return false;
+ }
+ return true;
+}
+
+bool IsPathSeparator(char c) {
+ return c == '/' || c == '\\';
+}
+
+// Return true if paths a and b are on the same windows drive.
+// Return false if this funcation cannot check
+// whether or not on the same windows drive.
+bool SameDriveFast(StringPiece a, StringPiece b) {
+ if (a.size() < 3 || b.size() < 3) {
+ return false;
+ }
+
+ if (!islatinalpha(a[0]) || !islatinalpha(b[0])) {
+ return false;
+ }
+
+ if (ToLowerASCII(a[0]) != ToLowerASCII(b[0])) {
+ return false;
+ }
+
+ if (a[1] != ':' || b[1] != ':') {
+ return false;
+ }
+
+ return IsPathSeparator(a[2]) && IsPathSeparator(b[2]);
+}
+
+// Return true if paths a and b are on the same Windows drive.
+bool SameDrive(StringPiece a, StringPiece b, string* err) {
+ if (SameDriveFast(a, b)) {
+ return true;
+ }
+
+ char a_absolute[_MAX_PATH];
+ char b_absolute[_MAX_PATH];
+ if (!InternalGetFullPathName(a, a_absolute, sizeof(a_absolute), err)) {
+ return false;
+ }
+ if (!InternalGetFullPathName(b, b_absolute, sizeof(b_absolute), err)) {
+ return false;
+ }
+ char a_drive[_MAX_DIR];
+ char b_drive[_MAX_DIR];
+ _splitpath(a_absolute, a_drive, NULL, NULL, NULL);
+ _splitpath(b_absolute, b_drive, NULL, NULL, NULL);
+ return _stricmp(a_drive, b_drive) == 0;
+}
+
+// Check path |s| is FullPath style returned by GetFullPathName.
+// This ignores difference of path separator.
+// This is used not to call very slow GetFullPathName API.
+bool IsFullPathName(StringPiece s) {
+ if (s.size() < 3 ||
+ !islatinalpha(s[0]) ||
+ s[1] != ':' ||
+ !IsPathSeparator(s[2])) {
+ return false;
+ }
+
+ // Check "." or ".." is contained in path.
+ for (size_t i = 2; i < s.size(); ++i) {
+ if (!IsPathSeparator(s[i])) {
+ continue;
+ }
+
+ // Check ".".
+ if (i + 1 < s.size() && s[i+1] == '.' &&
+ (i + 2 >= s.size() || IsPathSeparator(s[i+2]))) {
+ return false;
+ }
+
+ // Check "..".
+ if (i + 2 < s.size() && s[i+1] == '.' && s[i+2] == '.' &&
+ (i + 3 >= s.size() || IsPathSeparator(s[i+3]))) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // anonymous namespace
+
+IncludesNormalize::IncludesNormalize(const string& relative_to) {
+ string err;
+ relative_to_ = AbsPath(relative_to, &err);
+ if (!err.empty()) {
+ Fatal("Initializing IncludesNormalize(): %s", err.c_str());
+ }
+ split_relative_to_ = SplitStringPiece(relative_to_, '/');
+}
+
+string IncludesNormalize::AbsPath(StringPiece s, string* err) {
+ if (IsFullPathName(s)) {
+ string result = s.AsString();
+ for (size_t i = 0; i < result.size(); ++i) {
+ if (result[i] == '\\') {
+ result[i] = '/';
+ }
+ }
+ return result;
+ }
+
+ char result[_MAX_PATH];
+ if (!InternalGetFullPathName(s, result, sizeof(result), err)) {
+ return "";
+ }
+ for (char* c = result; *c; ++c)
+ if (*c == '\\')
+ *c = '/';
+ return result;
+}
+
+string IncludesNormalize::Relativize(
+ StringPiece path, const vector<StringPiece>& start_list, string* err) {
+ string abs_path = AbsPath(path, err);
+ if (!err->empty())
+ return "";
+ vector<StringPiece> path_list = SplitStringPiece(abs_path, '/');
+ int i;
+ for (i = 0; i < static_cast<int>(min(start_list.size(), path_list.size()));
+ ++i) {
+ if (!EqualsCaseInsensitiveASCII(start_list[i], path_list[i])) {
+ break;
+ }
+ }
+
+ vector<StringPiece> rel_list;
+ rel_list.reserve(start_list.size() - i + path_list.size() - i);
+ for (int j = 0; j < static_cast<int>(start_list.size() - i); ++j)
+ rel_list.push_back("..");
+ for (int j = i; j < static_cast<int>(path_list.size()); ++j)
+ rel_list.push_back(path_list[j]);
+ if (rel_list.size() == 0)
+ return ".";
+ return JoinStringPiece(rel_list, '/');
+}
+
+bool IncludesNormalize::Normalize(const string& input,
+ string* result, string* err) const {
+ char copy[_MAX_PATH + 1];
+ size_t len = input.size();
+ if (len > _MAX_PATH) {
+ *err = "path too long";
+ return false;
+ }
+ strncpy(copy, input.c_str(), input.size() + 1);
+ uint64_t slash_bits;
+ if (!CanonicalizePath(copy, &len, &slash_bits, err))
+ return false;
+ StringPiece partially_fixed(copy, len);
+ string abs_input = AbsPath(partially_fixed, err);
+ if (!err->empty())
+ return false;
+
+ if (!SameDrive(abs_input, relative_to_, err)) {
+ if (!err->empty())
+ return false;
+ *result = partially_fixed.AsString();
+ return true;
+ }
+ *result = Relativize(abs_input, split_relative_to_, err);
+ if (!err->empty())
+ return false;
+ return true;
+}
diff --git a/src/includes_normalize.h b/src/includes_normalize.h
new file mode 100644
index 0000000..7d50556
--- /dev/null
+++ b/src/includes_normalize.h
@@ -0,0 +1,40 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <string>
+#include <vector>
+
+struct StringPiece;
+
+/// Utility functions for normalizing include paths on Windows.
+/// TODO: this likely duplicates functionality of CanonicalizePath; refactor.
+struct IncludesNormalize {
+ /// Normalize path relative to |relative_to|.
+ IncludesNormalize(const std::string& relative_to);
+
+ // Internal utilities made available for testing, maybe useful otherwise.
+ static std::string AbsPath(StringPiece s, std::string* err);
+ static std::string Relativize(StringPiece path,
+ const std::vector<StringPiece>& start_list,
+ std::string* err);
+
+ /// Normalize by fixing slashes style, fixing redundant .. and . and makes the
+ /// path |input| relative to |this->relative_to_| and store to |result|.
+ bool Normalize(const std::string& input, std::string* result,
+ std::string* err) const;
+
+ private:
+ std::string relative_to_;
+ std::vector<StringPiece> split_relative_to_;
+};
diff --git a/src/includes_normalize_test.cc b/src/includes_normalize_test.cc
new file mode 100644
index 0000000..9214f53
--- /dev/null
+++ b/src/includes_normalize_test.cc
@@ -0,0 +1,169 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "includes_normalize.h"
+
+#include <algorithm>
+
+#include <direct.h>
+
+#include "string_piece_util.h"
+#include "test.h"
+#include "util.h"
+
+using namespace std;
+
+namespace {
+
+string GetCurDir() {
+ char buf[_MAX_PATH];
+ _getcwd(buf, sizeof(buf));
+ vector<StringPiece> parts = SplitStringPiece(buf, '\\');
+ return parts[parts.size() - 1].AsString();
+}
+
+string NormalizeAndCheckNoError(const string& input) {
+ string result, err;
+ IncludesNormalize normalizer(".");
+ EXPECT_TRUE(normalizer.Normalize(input, &result, &err));
+ EXPECT_EQ("", err);
+ return result;
+}
+
+string NormalizeRelativeAndCheckNoError(const string& input,
+ const string& relative_to) {
+ string result, err;
+ IncludesNormalize normalizer(relative_to);
+ EXPECT_TRUE(normalizer.Normalize(input, &result, &err));
+ EXPECT_EQ("", err);
+ return result;
+}
+
+} // namespace
+
+TEST(IncludesNormalize, Simple) {
+ EXPECT_EQ("b", NormalizeAndCheckNoError("a\\..\\b"));
+ EXPECT_EQ("b", NormalizeAndCheckNoError("a\\../b"));
+ EXPECT_EQ("a/b", NormalizeAndCheckNoError("a\\.\\b"));
+ EXPECT_EQ("a/b", NormalizeAndCheckNoError("a\\./b"));
+}
+
+TEST(IncludesNormalize, WithRelative) {
+ string err;
+ string currentdir = GetCurDir();
+ EXPECT_EQ("c", NormalizeRelativeAndCheckNoError("a/b/c", "a/b"));
+ EXPECT_EQ("a",
+ NormalizeAndCheckNoError(IncludesNormalize::AbsPath("a", &err)));
+ EXPECT_EQ("", err);
+ EXPECT_EQ(string("../") + currentdir + string("/a"),
+ NormalizeRelativeAndCheckNoError("a", "../b"));
+ EXPECT_EQ(string("../") + currentdir + string("/a/b"),
+ NormalizeRelativeAndCheckNoError("a/b", "../c"));
+ EXPECT_EQ("../../a", NormalizeRelativeAndCheckNoError("a", "b/c"));
+ EXPECT_EQ(".", NormalizeRelativeAndCheckNoError("a", "a"));
+}
+
+TEST(IncludesNormalize, Case) {
+ EXPECT_EQ("b", NormalizeAndCheckNoError("Abc\\..\\b"));
+ EXPECT_EQ("BdEf", NormalizeAndCheckNoError("Abc\\..\\BdEf"));
+ EXPECT_EQ("A/b", NormalizeAndCheckNoError("A\\.\\b"));
+ EXPECT_EQ("a/b", NormalizeAndCheckNoError("a\\./b"));
+ EXPECT_EQ("A/B", NormalizeAndCheckNoError("A\\.\\B"));
+ EXPECT_EQ("A/B", NormalizeAndCheckNoError("A\\./B"));
+}
+
+TEST(IncludesNormalize, DifferentDrive) {
+ EXPECT_EQ("stuff.h",
+ NormalizeRelativeAndCheckNoError("p:\\vs08\\stuff.h", "p:\\vs08"));
+ EXPECT_EQ("stuff.h",
+ NormalizeRelativeAndCheckNoError("P:\\Vs08\\stuff.h", "p:\\vs08"));
+ EXPECT_EQ("p:/vs08/stuff.h",
+ NormalizeRelativeAndCheckNoError("p:\\vs08\\stuff.h", "c:\\vs08"));
+ EXPECT_EQ("P:/vs08/stufF.h", NormalizeRelativeAndCheckNoError(
+ "P:\\vs08\\stufF.h", "D:\\stuff/things"));
+ EXPECT_EQ("P:/vs08/stuff.h", NormalizeRelativeAndCheckNoError(
+ "P:/vs08\\stuff.h", "D:\\stuff/things"));
+ EXPECT_EQ("P:/wee/stuff.h",
+ NormalizeRelativeAndCheckNoError("P:/vs08\\../wee\\stuff.h",
+ "D:\\stuff/things"));
+}
+
+TEST(IncludesNormalize, LongInvalidPath) {
+ const char kLongInputString[] =
+ "C:\\Program Files (x86)\\Microsoft Visual Studio "
+ "12.0\\VC\\INCLUDEwarning #31001: The dll for reading and writing the "
+ "pdb (for example, mspdb110.dll) could not be found on your path. This "
+ "is usually a configuration error. Compilation will continue using /Z7 "
+ "instead of /Zi, but expect a similar error when you link your program.";
+ // Too long, won't be canonicalized. Ensure doesn't crash.
+ string result, err;
+ IncludesNormalize normalizer(".");
+ EXPECT_FALSE(
+ normalizer.Normalize(kLongInputString, &result, &err));
+ EXPECT_EQ("path too long", err);
+
+
+ // Construct max size path having cwd prefix.
+ // kExactlyMaxPath = "$cwd\\a\\aaaa...aaaa\0";
+ char kExactlyMaxPath[_MAX_PATH + 1];
+ ASSERT_NE(_getcwd(kExactlyMaxPath, sizeof kExactlyMaxPath), NULL);
+
+ int cwd_len = strlen(kExactlyMaxPath);
+ ASSERT_LE(cwd_len + 3 + 1, _MAX_PATH)
+ kExactlyMaxPath[cwd_len] = '\\';
+ kExactlyMaxPath[cwd_len + 1] = 'a';
+ kExactlyMaxPath[cwd_len + 2] = '\\';
+
+ kExactlyMaxPath[cwd_len + 3] = 'a';
+
+ for (int i = cwd_len + 4; i < _MAX_PATH; ++i) {
+ if (i > cwd_len + 4 && i < _MAX_PATH - 1 && i % 10 == 0)
+ kExactlyMaxPath[i] = '\\';
+ else
+ kExactlyMaxPath[i] = 'a';
+ }
+
+ kExactlyMaxPath[_MAX_PATH] = '\0';
+ EXPECT_EQ(strlen(kExactlyMaxPath), _MAX_PATH);
+
+ string forward_slashes(kExactlyMaxPath);
+ replace(forward_slashes.begin(), forward_slashes.end(), '\\', '/');
+ // Make sure a path that's exactly _MAX_PATH long is canonicalized.
+ EXPECT_EQ(forward_slashes.substr(cwd_len + 1),
+ NormalizeAndCheckNoError(kExactlyMaxPath));
+}
+
+TEST(IncludesNormalize, ShortRelativeButTooLongAbsolutePath) {
+ string result, err;
+ IncludesNormalize normalizer(".");
+ // A short path should work
+ EXPECT_TRUE(normalizer.Normalize("a", &result, &err));
+ EXPECT_EQ("", err);
+
+ // Construct max size path having cwd prefix.
+ // kExactlyMaxPath = "aaaa\\aaaa...aaaa\0";
+ char kExactlyMaxPath[_MAX_PATH + 1];
+ for (int i = 0; i < _MAX_PATH; ++i) {
+ if (i < _MAX_PATH - 1 && i % 10 == 4)
+ kExactlyMaxPath[i] = '\\';
+ else
+ kExactlyMaxPath[i] = 'a';
+ }
+ kExactlyMaxPath[_MAX_PATH] = '\0';
+ EXPECT_EQ(strlen(kExactlyMaxPath), _MAX_PATH);
+
+ // Make sure a path that's exactly _MAX_PATH long fails with a proper error.
+ EXPECT_FALSE(normalizer.Normalize(kExactlyMaxPath, &result, &err));
+ EXPECT_TRUE(err.find("GetFullPathName") != string::npos);
+}
diff --git a/src/inline.sh b/src/inline.sh
new file mode 100755
index 0000000..5092fa2
--- /dev/null
+++ b/src/inline.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+#
+# Copyright 2001 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This quick script converts a text file into an #include-able header.
+# It expects the name of the variable as its first argument, and reads
+# stdin and writes stdout.
+
+varname="$1"
+
+# 'od' and 'sed' may not be available on all platforms, and may not support the
+# flags used here. We must ensure that the script exits with a non-zero exit
+# code in those cases.
+byte_vals=$(od -t x1 -A n -v) || exit 1
+escaped_byte_vals=$(echo "${byte_vals}" \
+ | sed -e 's|^[\t ]\{0,\}$||g; s|[\t ]\{1,\}| |g; s| \{1,\}$||g; s| |\\x|g; s|^|"|; s|$|"|') \
+ || exit 1
+
+# Only write output once we have successfully generated the required data
+printf "const char %s[] = \n%s;" "${varname}" "${escaped_byte_vals}"
diff --git a/src/lexer.cc b/src/lexer.cc
new file mode 100644
index 0000000..6e4a470
--- /dev/null
+++ b/src/lexer.cc
@@ -0,0 +1,822 @@
+/* Generated by re2c 1.1.1 */
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "lexer.h"
+
+#include <stdio.h>
+
+#include "eval_env.h"
+#include "util.h"
+
+using namespace std;
+
+bool Lexer::Error(const string& message, string* err) {
+ // Compute line/column.
+ int line = 1;
+ const char* line_start = input_.str_;
+ for (const char* p = input_.str_; p < last_token_; ++p) {
+ if (*p == '\n') {
+ ++line;
+ line_start = p + 1;
+ }
+ }
+ int col = last_token_ ? (int)(last_token_ - line_start) : 0;
+
+ char buf[1024];
+ snprintf(buf, sizeof(buf), "%s:%d: ", filename_.AsString().c_str(), line);
+ *err = buf;
+ *err += message + "\n";
+
+ // Add some context to the message.
+ const int kTruncateColumn = 72;
+ if (col > 0 && col < kTruncateColumn) {
+ int len;
+ bool truncated = true;
+ for (len = 0; len < kTruncateColumn; ++len) {
+ if (line_start[len] == 0 || line_start[len] == '\n') {
+ truncated = false;
+ break;
+ }
+ }
+ *err += string(line_start, len);
+ if (truncated)
+ *err += "...";
+ *err += "\n";
+ *err += string(col, ' ');
+ *err += "^ near here";
+ }
+
+ return false;
+}
+
+Lexer::Lexer(const char* input) {
+ Start("input", input);
+}
+
+void Lexer::Start(StringPiece filename, StringPiece input) {
+ filename_ = filename;
+ input_ = input;
+ ofs_ = input_.str_;
+ last_token_ = NULL;
+}
+
+const char* Lexer::TokenName(Token t) {
+ switch (t) {
+ case ERROR: return "lexing error";
+ case BUILD: return "'build'";
+ case COLON: return "':'";
+ case DEFAULT: return "'default'";
+ case EQUALS: return "'='";
+ case IDENT: return "identifier";
+ case INCLUDE: return "'include'";
+ case INDENT: return "indent";
+ case NEWLINE: return "newline";
+ case PIPE2: return "'||'";
+ case PIPE: return "'|'";
+ case POOL: return "'pool'";
+ case RULE: return "'rule'";
+ case SUBNINJA: return "'subninja'";
+ case TEOF: return "eof";
+ }
+ return NULL; // not reached
+}
+
+const char* Lexer::TokenErrorHint(Token expected) {
+ switch (expected) {
+ case COLON:
+ return " ($ also escapes ':')";
+ default:
+ return "";
+ }
+}
+
+string Lexer::DescribeLastError() {
+ if (last_token_) {
+ switch (last_token_[0]) {
+ case '\t':
+ return "tabs are not allowed, use spaces";
+ }
+ }
+ return "lexing error";
+}
+
+void Lexer::UnreadToken() {
+ ofs_ = last_token_;
+}
+
+Lexer::Token Lexer::ReadToken() {
+ const char* p = ofs_;
+ const char* q;
+ const char* start;
+ Lexer::Token token;
+ for (;;) {
+ start = p;
+
+{
+ unsigned char yych;
+ unsigned int yyaccept = 0;
+ static const unsigned char yybm[] = {
+ 0, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 0, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 160, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 192, 192, 128,
+ 192, 192, 192, 192, 192, 192, 192, 192,
+ 192, 192, 128, 128, 128, 128, 128, 128,
+ 128, 192, 192, 192, 192, 192, 192, 192,
+ 192, 192, 192, 192, 192, 192, 192, 192,
+ 192, 192, 192, 192, 192, 192, 192, 192,
+ 192, 192, 192, 128, 128, 128, 128, 192,
+ 128, 192, 192, 192, 192, 192, 192, 192,
+ 192, 192, 192, 192, 192, 192, 192, 192,
+ 192, 192, 192, 192, 192, 192, 192, 192,
+ 192, 192, 192, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ };
+ yych = *p;
+ if (yybm[0+yych] & 32) {
+ goto yy9;
+ }
+ if (yych <= '^') {
+ if (yych <= ',') {
+ if (yych <= '\f') {
+ if (yych <= 0x00) goto yy2;
+ if (yych == '\n') goto yy6;
+ goto yy4;
+ } else {
+ if (yych <= '\r') goto yy8;
+ if (yych == '#') goto yy12;
+ goto yy4;
+ }
+ } else {
+ if (yych <= ':') {
+ if (yych == '/') goto yy4;
+ if (yych <= '9') goto yy13;
+ goto yy16;
+ } else {
+ if (yych <= '=') {
+ if (yych <= '<') goto yy4;
+ goto yy18;
+ } else {
+ if (yych <= '@') goto yy4;
+ if (yych <= 'Z') goto yy13;
+ goto yy4;
+ }
+ }
+ }
+ } else {
+ if (yych <= 'i') {
+ if (yych <= 'b') {
+ if (yych == '`') goto yy4;
+ if (yych <= 'a') goto yy13;
+ goto yy20;
+ } else {
+ if (yych == 'd') goto yy21;
+ if (yych <= 'h') goto yy13;
+ goto yy22;
+ }
+ } else {
+ if (yych <= 'r') {
+ if (yych == 'p') goto yy23;
+ if (yych <= 'q') goto yy13;
+ goto yy24;
+ } else {
+ if (yych <= 'z') {
+ if (yych <= 's') goto yy25;
+ goto yy13;
+ } else {
+ if (yych == '|') goto yy26;
+ goto yy4;
+ }
+ }
+ }
+ }
+yy2:
+ ++p;
+ { token = TEOF; break; }
+yy4:
+ ++p;
+yy5:
+ { token = ERROR; break; }
+yy6:
+ ++p;
+ { token = NEWLINE; break; }
+yy8:
+ yych = *++p;
+ if (yych == '\n') goto yy28;
+ goto yy5;
+yy9:
+ yyaccept = 0;
+ yych = *(q = ++p);
+ if (yybm[0+yych] & 32) {
+ goto yy9;
+ }
+ if (yych <= '\f') {
+ if (yych == '\n') goto yy6;
+ } else {
+ if (yych <= '\r') goto yy30;
+ if (yych == '#') goto yy32;
+ }
+yy11:
+ { token = INDENT; break; }
+yy12:
+ yyaccept = 1;
+ yych = *(q = ++p);
+ if (yych <= 0x00) goto yy5;
+ goto yy33;
+yy13:
+ yych = *++p;
+yy14:
+ if (yybm[0+yych] & 64) {
+ goto yy13;
+ }
+ { token = IDENT; break; }
+yy16:
+ ++p;
+ { token = COLON; break; }
+yy18:
+ ++p;
+ { token = EQUALS; break; }
+yy20:
+ yych = *++p;
+ if (yych == 'u') goto yy36;
+ goto yy14;
+yy21:
+ yych = *++p;
+ if (yych == 'e') goto yy37;
+ goto yy14;
+yy22:
+ yych = *++p;
+ if (yych == 'n') goto yy38;
+ goto yy14;
+yy23:
+ yych = *++p;
+ if (yych == 'o') goto yy39;
+ goto yy14;
+yy24:
+ yych = *++p;
+ if (yych == 'u') goto yy40;
+ goto yy14;
+yy25:
+ yych = *++p;
+ if (yych == 'u') goto yy41;
+ goto yy14;
+yy26:
+ yych = *++p;
+ if (yych == '|') goto yy42;
+ { token = PIPE; break; }
+yy28:
+ ++p;
+ { token = NEWLINE; break; }
+yy30:
+ yych = *++p;
+ if (yych == '\n') goto yy28;
+yy31:
+ p = q;
+ if (yyaccept == 0) {
+ goto yy11;
+ } else {
+ goto yy5;
+ }
+yy32:
+ yych = *++p;
+yy33:
+ if (yybm[0+yych] & 128) {
+ goto yy32;
+ }
+ if (yych <= 0x00) goto yy31;
+ ++p;
+ { continue; }
+yy36:
+ yych = *++p;
+ if (yych == 'i') goto yy44;
+ goto yy14;
+yy37:
+ yych = *++p;
+ if (yych == 'f') goto yy45;
+ goto yy14;
+yy38:
+ yych = *++p;
+ if (yych == 'c') goto yy46;
+ goto yy14;
+yy39:
+ yych = *++p;
+ if (yych == 'o') goto yy47;
+ goto yy14;
+yy40:
+ yych = *++p;
+ if (yych == 'l') goto yy48;
+ goto yy14;
+yy41:
+ yych = *++p;
+ if (yych == 'b') goto yy49;
+ goto yy14;
+yy42:
+ ++p;
+ { token = PIPE2; break; }
+yy44:
+ yych = *++p;
+ if (yych == 'l') goto yy50;
+ goto yy14;
+yy45:
+ yych = *++p;
+ if (yych == 'a') goto yy51;
+ goto yy14;
+yy46:
+ yych = *++p;
+ if (yych == 'l') goto yy52;
+ goto yy14;
+yy47:
+ yych = *++p;
+ if (yych == 'l') goto yy53;
+ goto yy14;
+yy48:
+ yych = *++p;
+ if (yych == 'e') goto yy55;
+ goto yy14;
+yy49:
+ yych = *++p;
+ if (yych == 'n') goto yy57;
+ goto yy14;
+yy50:
+ yych = *++p;
+ if (yych == 'd') goto yy58;
+ goto yy14;
+yy51:
+ yych = *++p;
+ if (yych == 'u') goto yy60;
+ goto yy14;
+yy52:
+ yych = *++p;
+ if (yych == 'u') goto yy61;
+ goto yy14;
+yy53:
+ yych = *++p;
+ if (yybm[0+yych] & 64) {
+ goto yy13;
+ }
+ { token = POOL; break; }
+yy55:
+ yych = *++p;
+ if (yybm[0+yych] & 64) {
+ goto yy13;
+ }
+ { token = RULE; break; }
+yy57:
+ yych = *++p;
+ if (yych == 'i') goto yy62;
+ goto yy14;
+yy58:
+ yych = *++p;
+ if (yybm[0+yych] & 64) {
+ goto yy13;
+ }
+ { token = BUILD; break; }
+yy60:
+ yych = *++p;
+ if (yych == 'l') goto yy63;
+ goto yy14;
+yy61:
+ yych = *++p;
+ if (yych == 'd') goto yy64;
+ goto yy14;
+yy62:
+ yych = *++p;
+ if (yych == 'n') goto yy65;
+ goto yy14;
+yy63:
+ yych = *++p;
+ if (yych == 't') goto yy66;
+ goto yy14;
+yy64:
+ yych = *++p;
+ if (yych == 'e') goto yy68;
+ goto yy14;
+yy65:
+ yych = *++p;
+ if (yych == 'j') goto yy70;
+ goto yy14;
+yy66:
+ yych = *++p;
+ if (yybm[0+yych] & 64) {
+ goto yy13;
+ }
+ { token = DEFAULT; break; }
+yy68:
+ yych = *++p;
+ if (yybm[0+yych] & 64) {
+ goto yy13;
+ }
+ { token = INCLUDE; break; }
+yy70:
+ yych = *++p;
+ if (yych != 'a') goto yy14;
+ yych = *++p;
+ if (yybm[0+yych] & 64) {
+ goto yy13;
+ }
+ { token = SUBNINJA; break; }
+}
+
+ }
+
+ last_token_ = start;
+ ofs_ = p;
+ if (token != NEWLINE && token != TEOF)
+ EatWhitespace();
+ return token;
+}
+
+bool Lexer::PeekToken(Token token) {
+ Token t = ReadToken();
+ if (t == token)
+ return true;
+ UnreadToken();
+ return false;
+}
+
+void Lexer::EatWhitespace() {
+ const char* p = ofs_;
+ const char* q;
+ for (;;) {
+ ofs_ = p;
+
+{
+ unsigned char yych;
+ static const unsigned char yybm[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 128, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+ yych = *p;
+ if (yybm[0+yych] & 128) {
+ goto yy79;
+ }
+ if (yych <= 0x00) goto yy75;
+ if (yych == '$') goto yy82;
+ goto yy77;
+yy75:
+ ++p;
+ { break; }
+yy77:
+ ++p;
+yy78:
+ { break; }
+yy79:
+ yych = *++p;
+ if (yybm[0+yych] & 128) {
+ goto yy79;
+ }
+ { continue; }
+yy82:
+ yych = *(q = ++p);
+ if (yych == '\n') goto yy83;
+ if (yych == '\r') goto yy85;
+ goto yy78;
+yy83:
+ ++p;
+ { continue; }
+yy85:
+ yych = *++p;
+ if (yych == '\n') goto yy87;
+ p = q;
+ goto yy78;
+yy87:
+ ++p;
+ { continue; }
+}
+
+ }
+}
+
+bool Lexer::ReadIdent(string* out) {
+ const char* p = ofs_;
+ const char* start;
+ for (;;) {
+ start = p;
+
+{
+ unsigned char yych;
+ static const unsigned char yybm[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 128, 128, 0,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 0, 0, 0, 0, 0, 0,
+ 0, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 0, 0, 0, 0, 128,
+ 0, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+ yych = *p;
+ if (yybm[0+yych] & 128) {
+ goto yy93;
+ }
+ ++p;
+ {
+ last_token_ = start;
+ return false;
+ }
+yy93:
+ yych = *++p;
+ if (yybm[0+yych] & 128) {
+ goto yy93;
+ }
+ {
+ out->assign(start, p - start);
+ break;
+ }
+}
+
+ }
+ last_token_ = start;
+ ofs_ = p;
+ EatWhitespace();
+ return true;
+}
+
+bool Lexer::ReadEvalString(EvalString* eval, bool path, string* err) {
+ const char* p = ofs_;
+ const char* q;
+ const char* start;
+ for (;;) {
+ start = p;
+
+{
+ unsigned char yych;
+ static const unsigned char yybm[] = {
+ 0, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 0, 16, 16, 0, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 32, 16, 16, 16, 0, 16, 16, 16,
+ 16, 16, 16, 16, 16, 208, 144, 16,
+ 208, 208, 208, 208, 208, 208, 208, 208,
+ 208, 208, 0, 16, 16, 16, 16, 16,
+ 16, 208, 208, 208, 208, 208, 208, 208,
+ 208, 208, 208, 208, 208, 208, 208, 208,
+ 208, 208, 208, 208, 208, 208, 208, 208,
+ 208, 208, 208, 16, 16, 16, 16, 208,
+ 16, 208, 208, 208, 208, 208, 208, 208,
+ 208, 208, 208, 208, 208, 208, 208, 208,
+ 208, 208, 208, 208, 208, 208, 208, 208,
+ 208, 208, 208, 16, 0, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ };
+ yych = *p;
+ if (yybm[0+yych] & 16) {
+ goto yy100;
+ }
+ if (yych <= '\r') {
+ if (yych <= 0x00) goto yy98;
+ if (yych <= '\n') goto yy103;
+ goto yy105;
+ } else {
+ if (yych <= ' ') goto yy103;
+ if (yych <= '$') goto yy107;
+ goto yy103;
+ }
+yy98:
+ ++p;
+ {
+ last_token_ = start;
+ return Error("unexpected EOF", err);
+ }
+yy100:
+ yych = *++p;
+ if (yybm[0+yych] & 16) {
+ goto yy100;
+ }
+ {
+ eval->AddText(StringPiece(start, p - start));
+ continue;
+ }
+yy103:
+ ++p;
+ {
+ if (path) {
+ p = start;
+ break;
+ } else {
+ if (*start == '\n')
+ break;
+ eval->AddText(StringPiece(start, 1));
+ continue;
+ }
+ }
+yy105:
+ yych = *++p;
+ if (yych == '\n') goto yy108;
+ {
+ last_token_ = start;
+ return Error(DescribeLastError(), err);
+ }
+yy107:
+ yych = *++p;
+ if (yybm[0+yych] & 64) {
+ goto yy120;
+ }
+ if (yych <= ' ') {
+ if (yych <= '\f') {
+ if (yych == '\n') goto yy112;
+ goto yy110;
+ } else {
+ if (yych <= '\r') goto yy115;
+ if (yych <= 0x1F) goto yy110;
+ goto yy116;
+ }
+ } else {
+ if (yych <= '/') {
+ if (yych == '$') goto yy118;
+ goto yy110;
+ } else {
+ if (yych <= ':') goto yy123;
+ if (yych <= '`') goto yy110;
+ if (yych <= '{') goto yy125;
+ goto yy110;
+ }
+ }
+yy108:
+ ++p;
+ {
+ if (path)
+ p = start;
+ break;
+ }
+yy110:
+ ++p;
+yy111:
+ {
+ last_token_ = start;
+ return Error("bad $-escape (literal $ must be written as $$)", err);
+ }
+yy112:
+ yych = *++p;
+ if (yybm[0+yych] & 32) {
+ goto yy112;
+ }
+ {
+ continue;
+ }
+yy115:
+ yych = *++p;
+ if (yych == '\n') goto yy126;
+ goto yy111;
+yy116:
+ ++p;
+ {
+ eval->AddText(StringPiece(" ", 1));
+ continue;
+ }
+yy118:
+ ++p;
+ {
+ eval->AddText(StringPiece("$", 1));
+ continue;
+ }
+yy120:
+ yych = *++p;
+ if (yybm[0+yych] & 64) {
+ goto yy120;
+ }
+ {
+ eval->AddSpecial(StringPiece(start + 1, p - start - 1));
+ continue;
+ }
+yy123:
+ ++p;
+ {
+ eval->AddText(StringPiece(":", 1));
+ continue;
+ }
+yy125:
+ yych = *(q = ++p);
+ if (yybm[0+yych] & 128) {
+ goto yy129;
+ }
+ goto yy111;
+yy126:
+ yych = *++p;
+ if (yych == ' ') goto yy126;
+ {
+ continue;
+ }
+yy129:
+ yych = *++p;
+ if (yybm[0+yych] & 128) {
+ goto yy129;
+ }
+ if (yych == '}') goto yy132;
+ p = q;
+ goto yy111;
+yy132:
+ ++p;
+ {
+ eval->AddSpecial(StringPiece(start + 2, p - start - 3));
+ continue;
+ }
+}
+
+ }
+ last_token_ = start;
+ ofs_ = p;
+ if (path)
+ EatWhitespace();
+ // Non-path strings end in newlines, so there's no whitespace to eat.
+ return true;
+}
diff --git a/src/lexer.h b/src/lexer.h
new file mode 100644
index 0000000..788d948
--- /dev/null
+++ b/src/lexer.h
@@ -0,0 +1,105 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_LEXER_H_
+#define NINJA_LEXER_H_
+
+#include "string_piece.h"
+
+// Windows may #define ERROR.
+#ifdef ERROR
+#undef ERROR
+#endif
+
+struct EvalString;
+
+struct Lexer {
+ Lexer() {}
+ /// Helper ctor useful for tests.
+ explicit Lexer(const char* input);
+
+ enum Token {
+ ERROR,
+ BUILD,
+ COLON,
+ DEFAULT,
+ EQUALS,
+ IDENT,
+ INCLUDE,
+ INDENT,
+ NEWLINE,
+ PIPE,
+ PIPE2,
+ POOL,
+ RULE,
+ SUBNINJA,
+ TEOF,
+ };
+
+ /// Return a human-readable form of a token, used in error messages.
+ static const char* TokenName(Token t);
+
+ /// Return a human-readable token hint, used in error messages.
+ static const char* TokenErrorHint(Token expected);
+
+ /// If the last token read was an ERROR token, provide more info
+ /// or the empty string.
+ std::string DescribeLastError();
+
+ /// Start parsing some input.
+ void Start(StringPiece filename, StringPiece input);
+
+ /// Read a Token from the Token enum.
+ Token ReadToken();
+
+ /// Rewind to the last read Token.
+ void UnreadToken();
+
+ /// If the next token is \a token, read it and return true.
+ bool PeekToken(Token token);
+
+ /// Read a simple identifier (a rule or variable name).
+ /// Returns false if a name can't be read.
+ bool ReadIdent(std::string* out);
+
+ /// Read a path (complete with $escapes).
+ /// Returns false only on error, returned path may be empty if a delimiter
+ /// (space, newline) is hit.
+ bool ReadPath(EvalString* path, std::string* err) {
+ return ReadEvalString(path, true, err);
+ }
+
+ /// Read the value side of a var = value line (complete with $escapes).
+ /// Returns false only on error.
+ bool ReadVarValue(EvalString* value, std::string* err) {
+ return ReadEvalString(value, false, err);
+ }
+
+ /// Construct an error message with context.
+ bool Error(const std::string& message, std::string* err);
+
+private:
+ /// Skip past whitespace (called after each read token/ident/etc.).
+ void EatWhitespace();
+
+ /// Read a $-escaped string.
+ bool ReadEvalString(EvalString* eval, bool path, std::string* err);
+
+ StringPiece filename_;
+ StringPiece input_;
+ const char* ofs_;
+ const char* last_token_;
+};
+
+#endif // NINJA_LEXER_H_
diff --git a/src/lexer.in.cc b/src/lexer.in.cc
new file mode 100644
index 0000000..88007e7
--- /dev/null
+++ b/src/lexer.in.cc
@@ -0,0 +1,280 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "lexer.h"
+
+#include <stdio.h>
+
+#include "eval_env.h"
+#include "util.h"
+
+using namespace std;
+
+bool Lexer::Error(const string& message, string* err) {
+ // Compute line/column.
+ int line = 1;
+ const char* line_start = input_.str_;
+ for (const char* p = input_.str_; p < last_token_; ++p) {
+ if (*p == '\n') {
+ ++line;
+ line_start = p + 1;
+ }
+ }
+ int col = last_token_ ? (int)(last_token_ - line_start) : 0;
+
+ char buf[1024];
+ snprintf(buf, sizeof(buf), "%s:%d: ", filename_.AsString().c_str(), line);
+ *err = buf;
+ *err += message + "\n";
+
+ // Add some context to the message.
+ const int kTruncateColumn = 72;
+ if (col > 0 && col < kTruncateColumn) {
+ int len;
+ bool truncated = true;
+ for (len = 0; len < kTruncateColumn; ++len) {
+ if (line_start[len] == 0 || line_start[len] == '\n') {
+ truncated = false;
+ break;
+ }
+ }
+ *err += string(line_start, len);
+ if (truncated)
+ *err += "...";
+ *err += "\n";
+ *err += string(col, ' ');
+ *err += "^ near here";
+ }
+
+ return false;
+}
+
+Lexer::Lexer(const char* input) {
+ Start("input", input);
+}
+
+void Lexer::Start(StringPiece filename, StringPiece input) {
+ filename_ = filename;
+ input_ = input;
+ ofs_ = input_.str_;
+ last_token_ = NULL;
+}
+
+const char* Lexer::TokenName(Token t) {
+ switch (t) {
+ case ERROR: return "lexing error";
+ case BUILD: return "'build'";
+ case COLON: return "':'";
+ case DEFAULT: return "'default'";
+ case EQUALS: return "'='";
+ case IDENT: return "identifier";
+ case INCLUDE: return "'include'";
+ case INDENT: return "indent";
+ case NEWLINE: return "newline";
+ case PIPE2: return "'||'";
+ case PIPE: return "'|'";
+ case POOL: return "'pool'";
+ case RULE: return "'rule'";
+ case SUBNINJA: return "'subninja'";
+ case TEOF: return "eof";
+ }
+ return NULL; // not reached
+}
+
+const char* Lexer::TokenErrorHint(Token expected) {
+ switch (expected) {
+ case COLON:
+ return " ($ also escapes ':')";
+ default:
+ return "";
+ }
+}
+
+string Lexer::DescribeLastError() {
+ if (last_token_) {
+ switch (last_token_[0]) {
+ case '\t':
+ return "tabs are not allowed, use spaces";
+ }
+ }
+ return "lexing error";
+}
+
+void Lexer::UnreadToken() {
+ ofs_ = last_token_;
+}
+
+Lexer::Token Lexer::ReadToken() {
+ const char* p = ofs_;
+ const char* q;
+ const char* start;
+ Lexer::Token token;
+ for (;;) {
+ start = p;
+ /*!re2c
+ re2c:define:YYCTYPE = "unsigned char";
+ re2c:define:YYCURSOR = p;
+ re2c:define:YYMARKER = q;
+ re2c:yyfill:enable = 0;
+
+ nul = "\000";
+ simple_varname = [a-zA-Z0-9_-]+;
+ varname = [a-zA-Z0-9_.-]+;
+
+ [ ]*"#"[^\000\n]*"\n" { continue; }
+ [ ]*"\r\n" { token = NEWLINE; break; }
+ [ ]*"\n" { token = NEWLINE; break; }
+ [ ]+ { token = INDENT; break; }
+ "build" { token = BUILD; break; }
+ "pool" { token = POOL; break; }
+ "rule" { token = RULE; break; }
+ "default" { token = DEFAULT; break; }
+ "=" { token = EQUALS; break; }
+ ":" { token = COLON; break; }
+ "||" { token = PIPE2; break; }
+ "|" { token = PIPE; break; }
+ "include" { token = INCLUDE; break; }
+ "subninja" { token = SUBNINJA; break; }
+ varname { token = IDENT; break; }
+ nul { token = TEOF; break; }
+ [^] { token = ERROR; break; }
+ */
+ }
+
+ last_token_ = start;
+ ofs_ = p;
+ if (token != NEWLINE && token != TEOF)
+ EatWhitespace();
+ return token;
+}
+
+bool Lexer::PeekToken(Token token) {
+ Token t = ReadToken();
+ if (t == token)
+ return true;
+ UnreadToken();
+ return false;
+}
+
+void Lexer::EatWhitespace() {
+ const char* p = ofs_;
+ const char* q;
+ for (;;) {
+ ofs_ = p;
+ /*!re2c
+ [ ]+ { continue; }
+ "$\r\n" { continue; }
+ "$\n" { continue; }
+ nul { break; }
+ [^] { break; }
+ */
+ }
+}
+
+bool Lexer::ReadIdent(string* out) {
+ const char* p = ofs_;
+ const char* start;
+ for (;;) {
+ start = p;
+ /*!re2c
+ varname {
+ out->assign(start, p - start);
+ break;
+ }
+ [^] {
+ last_token_ = start;
+ return false;
+ }
+ */
+ }
+ last_token_ = start;
+ ofs_ = p;
+ EatWhitespace();
+ return true;
+}
+
+bool Lexer::ReadEvalString(EvalString* eval, bool path, string* err) {
+ const char* p = ofs_;
+ const char* q;
+ const char* start;
+ for (;;) {
+ start = p;
+ /*!re2c
+ [^$ :\r\n|\000]+ {
+ eval->AddText(StringPiece(start, p - start));
+ continue;
+ }
+ "\r\n" {
+ if (path)
+ p = start;
+ break;
+ }
+ [ :|\n] {
+ if (path) {
+ p = start;
+ break;
+ } else {
+ if (*start == '\n')
+ break;
+ eval->AddText(StringPiece(start, 1));
+ continue;
+ }
+ }
+ "$$" {
+ eval->AddText(StringPiece("$", 1));
+ continue;
+ }
+ "$ " {
+ eval->AddText(StringPiece(" ", 1));
+ continue;
+ }
+ "$\r\n"[ ]* {
+ continue;
+ }
+ "$\n"[ ]* {
+ continue;
+ }
+ "${"varname"}" {
+ eval->AddSpecial(StringPiece(start + 2, p - start - 3));
+ continue;
+ }
+ "$"simple_varname {
+ eval->AddSpecial(StringPiece(start + 1, p - start - 1));
+ continue;
+ }
+ "$:" {
+ eval->AddText(StringPiece(":", 1));
+ continue;
+ }
+ "$". {
+ last_token_ = start;
+ return Error("bad $-escape (literal $ must be written as $$)", err);
+ }
+ nul {
+ last_token_ = start;
+ return Error("unexpected EOF", err);
+ }
+ [^] {
+ last_token_ = start;
+ return Error(DescribeLastError(), err);
+ }
+ */
+ }
+ last_token_ = start;
+ ofs_ = p;
+ if (path)
+ EatWhitespace();
+ // Non-path strings end in newlines, so there's no whitespace to eat.
+ return true;
+}
diff --git a/src/lexer_test.cc b/src/lexer_test.cc
new file mode 100644
index 0000000..c5c416d
--- /dev/null
+++ b/src/lexer_test.cc
@@ -0,0 +1,98 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "lexer.h"
+
+#include "eval_env.h"
+#include "test.h"
+
+using namespace std;
+
+TEST(Lexer, ReadVarValue) {
+ Lexer lexer("plain text $var $VaR ${x}\n");
+ EvalString eval;
+ string err;
+ EXPECT_TRUE(lexer.ReadVarValue(&eval, &err));
+ EXPECT_EQ("", err);
+ EXPECT_EQ("[plain text ][$var][ ][$VaR][ ][$x]",
+ eval.Serialize());
+}
+
+TEST(Lexer, ReadEvalStringEscapes) {
+ Lexer lexer("$ $$ab c$: $\ncde\n");
+ EvalString eval;
+ string err;
+ EXPECT_TRUE(lexer.ReadVarValue(&eval, &err));
+ EXPECT_EQ("", err);
+ EXPECT_EQ("[ $ab c: cde]",
+ eval.Serialize());
+}
+
+TEST(Lexer, ReadIdent) {
+ Lexer lexer("foo baR baz_123 foo-bar");
+ string ident;
+ EXPECT_TRUE(lexer.ReadIdent(&ident));
+ EXPECT_EQ("foo", ident);
+ EXPECT_TRUE(lexer.ReadIdent(&ident));
+ EXPECT_EQ("baR", ident);
+ EXPECT_TRUE(lexer.ReadIdent(&ident));
+ EXPECT_EQ("baz_123", ident);
+ EXPECT_TRUE(lexer.ReadIdent(&ident));
+ EXPECT_EQ("foo-bar", ident);
+}
+
+TEST(Lexer, ReadIdentCurlies) {
+ // Verify that ReadIdent includes dots in the name,
+ // but in an expansion $bar.dots stops at the dot.
+ Lexer lexer("foo.dots $bar.dots ${bar.dots}\n");
+ string ident;
+ EXPECT_TRUE(lexer.ReadIdent(&ident));
+ EXPECT_EQ("foo.dots", ident);
+
+ EvalString eval;
+ string err;
+ EXPECT_TRUE(lexer.ReadVarValue(&eval, &err));
+ EXPECT_EQ("", err);
+ EXPECT_EQ("[$bar][.dots ][$bar.dots]",
+ eval.Serialize());
+}
+
+TEST(Lexer, Error) {
+ Lexer lexer("foo$\nbad $");
+ EvalString eval;
+ string err;
+ ASSERT_FALSE(lexer.ReadVarValue(&eval, &err));
+ EXPECT_EQ("input:2: bad $-escape (literal $ must be written as $$)\n"
+ "bad $\n"
+ " ^ near here"
+ , err);
+}
+
+TEST(Lexer, CommentEOF) {
+ // Verify we don't run off the end of the string when the EOF is
+ // mid-comment.
+ Lexer lexer("# foo");
+ Lexer::Token token = lexer.ReadToken();
+ EXPECT_EQ(Lexer::ERROR, token);
+}
+
+TEST(Lexer, Tabs) {
+ // Verify we print a useful error on a disallowed character.
+ Lexer lexer(" \tfoobar");
+ Lexer::Token token = lexer.ReadToken();
+ EXPECT_EQ(Lexer::INDENT, token);
+ token = lexer.ReadToken();
+ EXPECT_EQ(Lexer::ERROR, token);
+ EXPECT_EQ("tabs are not allowed, use spaces", lexer.DescribeLastError());
+}
diff --git a/src/line_printer.cc b/src/line_printer.cc
new file mode 100644
index 0000000..68c58ad
--- /dev/null
+++ b/src/line_printer.cc
@@ -0,0 +1,166 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "line_printer.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef _WIN32
+#include <windows.h>
+#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING
+#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x4
+#endif
+#else
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <termios.h>
+#include <sys/time.h>
+#endif
+
+#include "util.h"
+
+using namespace std;
+
+LinePrinter::LinePrinter() : have_blank_line_(true), console_locked_(false) {
+ const char* term = getenv("TERM");
+#ifndef _WIN32
+ smart_terminal_ = isatty(1) && term && string(term) != "dumb";
+#else
+ // Disable output buffer. It'd be nice to use line buffering but
+ // MSDN says: "For some systems, [_IOLBF] provides line
+ // buffering. However, for Win32, the behavior is the same as _IOFBF
+ // - Full Buffering."
+ if (term && string(term) == "dumb") {
+ smart_terminal_ = false;
+ } else {
+ setvbuf(stdout, NULL, _IONBF, 0);
+ console_ = GetStdHandle(STD_OUTPUT_HANDLE);
+ CONSOLE_SCREEN_BUFFER_INFO csbi;
+ smart_terminal_ = GetConsoleScreenBufferInfo(console_, &csbi);
+ }
+#endif
+ supports_color_ = smart_terminal_;
+ if (!supports_color_) {
+ const char* clicolor_force = getenv("CLICOLOR_FORCE");
+ supports_color_ = clicolor_force && string(clicolor_force) != "0";
+ }
+#ifdef _WIN32
+ // Try enabling ANSI escape sequence support on Windows 10 terminals.
+ if (supports_color_) {
+ DWORD mode;
+ if (GetConsoleMode(console_, &mode)) {
+ if (!SetConsoleMode(console_, mode | ENABLE_VIRTUAL_TERMINAL_PROCESSING)) {
+ supports_color_ = false;
+ }
+ }
+ }
+#endif
+}
+
+void LinePrinter::Print(string to_print, LineType type) {
+ if (console_locked_) {
+ line_buffer_ = to_print;
+ line_type_ = type;
+ return;
+ }
+
+ if (smart_terminal_) {
+ printf("\r"); // Print over previous line, if any.
+ // On Windows, calling a C library function writing to stdout also handles
+ // pausing the executable when the "Pause" key or Ctrl-S is pressed.
+ }
+
+ if (smart_terminal_ && type == ELIDE) {
+#ifdef _WIN32
+ CONSOLE_SCREEN_BUFFER_INFO csbi;
+ GetConsoleScreenBufferInfo(console_, &csbi);
+
+ to_print = ElideMiddle(to_print, static_cast<size_t>(csbi.dwSize.X));
+ // We don't want to have the cursor spamming back and forth, so instead of
+ // printf use WriteConsoleOutput which updates the contents of the buffer,
+ // but doesn't move the cursor position.
+ COORD buf_size = { csbi.dwSize.X, 1 };
+ COORD zero_zero = { 0, 0 };
+ SMALL_RECT target = {
+ csbi.dwCursorPosition.X, csbi.dwCursorPosition.Y,
+ static_cast<SHORT>(csbi.dwCursorPosition.X + csbi.dwSize.X - 1),
+ csbi.dwCursorPosition.Y
+ };
+ vector<CHAR_INFO> char_data(csbi.dwSize.X);
+ for (size_t i = 0; i < static_cast<size_t>(csbi.dwSize.X); ++i) {
+ char_data[i].Char.AsciiChar = i < to_print.size() ? to_print[i] : ' ';
+ char_data[i].Attributes = csbi.wAttributes;
+ }
+ WriteConsoleOutput(console_, &char_data[0], buf_size, zero_zero, &target);
+#else
+ // Limit output to width of the terminal if provided so we don't cause
+ // line-wrapping.
+ winsize size;
+ if ((ioctl(STDOUT_FILENO, TIOCGWINSZ, &size) == 0) && size.ws_col) {
+ to_print = ElideMiddle(to_print, size.ws_col);
+ }
+ printf("%s", to_print.c_str());
+ printf("\x1B[K"); // Clear to end of line.
+ fflush(stdout);
+#endif
+
+ have_blank_line_ = false;
+ } else {
+ printf("%s\n", to_print.c_str());
+ }
+}
+
+void LinePrinter::PrintOrBuffer(const char* data, size_t size) {
+ if (console_locked_) {
+ output_buffer_.append(data, size);
+ } else {
+ // Avoid printf and C strings, since the actual output might contain null
+ // bytes like UTF-16 does (yuck).
+ fwrite(data, 1, size, stdout);
+ }
+}
+
+void LinePrinter::PrintOnNewLine(const string& to_print) {
+ if (console_locked_ && !line_buffer_.empty()) {
+ output_buffer_.append(line_buffer_);
+ output_buffer_.append(1, '\n');
+ line_buffer_.clear();
+ }
+ if (!have_blank_line_) {
+ PrintOrBuffer("\n", 1);
+ }
+ if (!to_print.empty()) {
+ PrintOrBuffer(&to_print[0], to_print.size());
+ }
+ have_blank_line_ = to_print.empty() || *to_print.rbegin() == '\n';
+}
+
+void LinePrinter::SetConsoleLocked(bool locked) {
+ if (locked == console_locked_)
+ return;
+
+ if (locked)
+ PrintOnNewLine("");
+
+ console_locked_ = locked;
+
+ if (!locked) {
+ PrintOnNewLine(output_buffer_);
+ if (!line_buffer_.empty()) {
+ Print(line_buffer_, line_type_);
+ }
+ output_buffer_.clear();
+ line_buffer_.clear();
+ }
+}
diff --git a/src/line_printer.h b/src/line_printer.h
new file mode 100644
index 0000000..a8ec9ff
--- /dev/null
+++ b/src/line_printer.h
@@ -0,0 +1,76 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_LINE_PRINTER_H_
+#define NINJA_LINE_PRINTER_H_
+
+#include <stddef.h>
+#include <string>
+
+/// Prints lines of text, possibly overprinting previously printed lines
+/// if the terminal supports it.
+struct LinePrinter {
+ LinePrinter();
+
+ bool is_smart_terminal() const { return smart_terminal_; }
+ void set_smart_terminal(bool smart) { smart_terminal_ = smart; }
+
+ bool supports_color() const { return supports_color_; }
+
+ enum LineType {
+ FULL,
+ ELIDE
+ };
+ /// Overprints the current line. If type is ELIDE, elides to_print to fit on
+ /// one line.
+ void Print(std::string to_print, LineType type);
+
+ /// Prints a string on a new line, not overprinting previous output.
+ void PrintOnNewLine(const std::string& to_print);
+
+ /// Lock or unlock the console. Any output sent to the LinePrinter while the
+ /// console is locked will not be printed until it is unlocked.
+ void SetConsoleLocked(bool locked);
+
+ private:
+ /// Whether we can do fancy terminal control codes.
+ bool smart_terminal_;
+
+ /// Whether we can use ISO 6429 (ANSI) color sequences.
+ bool supports_color_;
+
+ /// Whether the caret is at the beginning of a blank line.
+ bool have_blank_line_;
+
+ /// Whether console is locked.
+ bool console_locked_;
+
+ /// Buffered current line while console is locked.
+ std::string line_buffer_;
+
+ /// Buffered line type while console is locked.
+ LineType line_type_;
+
+ /// Buffered console output while console is locked.
+ std::string output_buffer_;
+
+#ifdef _WIN32
+ void* console_;
+#endif
+
+ /// Print the given data to the console, or buffer it if it is locked.
+ void PrintOrBuffer(const char *data, size_t size);
+};
+
+#endif // NINJA_LINE_PRINTER_H_
diff --git a/src/load_status.h b/src/load_status.h
new file mode 100644
index 0000000..0b16b1a
--- /dev/null
+++ b/src/load_status.h
@@ -0,0 +1,24 @@
+// Copyright 2019 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_LOAD_STATUS_H_
+#define NINJA_LOAD_STATUS_H_
+
+enum LoadStatus {
+ LOAD_ERROR,
+ LOAD_SUCCESS,
+ LOAD_NOT_FOUND,
+};
+
+#endif // NINJA_LOAD_STATUS_H_
diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc
new file mode 100644
index 0000000..860a8fc
--- /dev/null
+++ b/src/manifest_parser.cc
@@ -0,0 +1,424 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "manifest_parser.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <vector>
+
+#include "graph.h"
+#include "state.h"
+#include "util.h"
+#include "version.h"
+
+using namespace std;
+
+ManifestParser::ManifestParser(State* state, FileReader* file_reader,
+ ManifestParserOptions options)
+ : Parser(state, file_reader),
+ options_(options), quiet_(false) {
+ env_ = &state->bindings_;
+}
+
+bool ManifestParser::Parse(const string& filename, const string& input,
+ string* err) {
+ lexer_.Start(filename, input);
+
+ for (;;) {
+ Lexer::Token token = lexer_.ReadToken();
+ switch (token) {
+ case Lexer::POOL:
+ if (!ParsePool(err))
+ return false;
+ break;
+ case Lexer::BUILD:
+ if (!ParseEdge(err))
+ return false;
+ break;
+ case Lexer::RULE:
+ if (!ParseRule(err))
+ return false;
+ break;
+ case Lexer::DEFAULT:
+ if (!ParseDefault(err))
+ return false;
+ break;
+ case Lexer::IDENT: {
+ lexer_.UnreadToken();
+ string name;
+ EvalString let_value;
+ if (!ParseLet(&name, &let_value, err))
+ return false;
+ string value = let_value.Evaluate(env_);
+ // Check ninja_required_version immediately so we can exit
+ // before encountering any syntactic surprises.
+ if (name == "ninja_required_version")
+ CheckNinjaVersion(value);
+ env_->AddBinding(name, value);
+ break;
+ }
+ case Lexer::INCLUDE:
+ if (!ParseFileInclude(false, err))
+ return false;
+ break;
+ case Lexer::SUBNINJA:
+ if (!ParseFileInclude(true, err))
+ return false;
+ break;
+ case Lexer::ERROR: {
+ return lexer_.Error(lexer_.DescribeLastError(), err);
+ }
+ case Lexer::TEOF:
+ return true;
+ case Lexer::NEWLINE:
+ break;
+ default:
+ return lexer_.Error(string("unexpected ") + Lexer::TokenName(token),
+ err);
+ }
+ }
+ return false; // not reached
+}
+
+
+bool ManifestParser::ParsePool(string* err) {
+ string name;
+ if (!lexer_.ReadIdent(&name))
+ return lexer_.Error("expected pool name", err);
+
+ if (!ExpectToken(Lexer::NEWLINE, err))
+ return false;
+
+ if (state_->LookupPool(name) != NULL)
+ return lexer_.Error("duplicate pool '" + name + "'", err);
+
+ int depth = -1;
+
+ while (lexer_.PeekToken(Lexer::INDENT)) {
+ string key;
+ EvalString value;
+ if (!ParseLet(&key, &value, err))
+ return false;
+
+ if (key == "depth") {
+ string depth_string = value.Evaluate(env_);
+ depth = atol(depth_string.c_str());
+ if (depth < 0)
+ return lexer_.Error("invalid pool depth", err);
+ } else {
+ return lexer_.Error("unexpected variable '" + key + "'", err);
+ }
+ }
+
+ if (depth < 0)
+ return lexer_.Error("expected 'depth =' line", err);
+
+ state_->AddPool(new Pool(name, depth));
+ return true;
+}
+
+
+bool ManifestParser::ParseRule(string* err) {
+ string name;
+ if (!lexer_.ReadIdent(&name))
+ return lexer_.Error("expected rule name", err);
+
+ if (!ExpectToken(Lexer::NEWLINE, err))
+ return false;
+
+ if (env_->LookupRuleCurrentScope(name) != NULL)
+ return lexer_.Error("duplicate rule '" + name + "'", err);
+
+ Rule* rule = new Rule(name); // XXX scoped_ptr
+
+ while (lexer_.PeekToken(Lexer::INDENT)) {
+ string key;
+ EvalString value;
+ if (!ParseLet(&key, &value, err))
+ return false;
+
+ if (Rule::IsReservedBinding(key)) {
+ rule->AddBinding(key, value);
+ } else {
+ // Die on other keyvals for now; revisit if we want to add a
+ // scope here.
+ return lexer_.Error("unexpected variable '" + key + "'", err);
+ }
+ }
+
+ if (rule->bindings_["rspfile"].empty() !=
+ rule->bindings_["rspfile_content"].empty()) {
+ return lexer_.Error("rspfile and rspfile_content need to be "
+ "both specified", err);
+ }
+
+ if (rule->bindings_["command"].empty())
+ return lexer_.Error("expected 'command =' line", err);
+
+ env_->AddRule(rule);
+ return true;
+}
+
+bool ManifestParser::ParseLet(string* key, EvalString* value, string* err) {
+ if (!lexer_.ReadIdent(key))
+ return lexer_.Error("expected variable name", err);
+ if (!ExpectToken(Lexer::EQUALS, err))
+ return false;
+ if (!lexer_.ReadVarValue(value, err))
+ return false;
+ return true;
+}
+
+bool ManifestParser::ParseDefault(string* err) {
+ EvalString eval;
+ if (!lexer_.ReadPath(&eval, err))
+ return false;
+ if (eval.empty())
+ return lexer_.Error("expected target name", err);
+
+ do {
+ string path = eval.Evaluate(env_);
+ string path_err;
+ uint64_t slash_bits; // Unused because this only does lookup.
+ if (!CanonicalizePath(&path, &slash_bits, &path_err))
+ return lexer_.Error(path_err, err);
+ if (!state_->AddDefault(path, &path_err))
+ return lexer_.Error(path_err, err);
+
+ eval.Clear();
+ if (!lexer_.ReadPath(&eval, err))
+ return false;
+ } while (!eval.empty());
+
+ if (!ExpectToken(Lexer::NEWLINE, err))
+ return false;
+
+ return true;
+}
+
+bool ManifestParser::ParseEdge(string* err) {
+ vector<EvalString> ins, outs;
+
+ {
+ EvalString out;
+ if (!lexer_.ReadPath(&out, err))
+ return false;
+ while (!out.empty()) {
+ outs.push_back(out);
+
+ out.Clear();
+ if (!lexer_.ReadPath(&out, err))
+ return false;
+ }
+ }
+
+ // Add all implicit outs, counting how many as we go.
+ int implicit_outs = 0;
+ if (lexer_.PeekToken(Lexer::PIPE)) {
+ for (;;) {
+ EvalString out;
+ if (!lexer_.ReadPath(&out, err))
+ return false;
+ if (out.empty())
+ break;
+ outs.push_back(out);
+ ++implicit_outs;
+ }
+ }
+
+ if (outs.empty())
+ return lexer_.Error("expected path", err);
+
+ if (!ExpectToken(Lexer::COLON, err))
+ return false;
+
+ string rule_name;
+ if (!lexer_.ReadIdent(&rule_name))
+ return lexer_.Error("expected build command name", err);
+
+ const Rule* rule = env_->LookupRule(rule_name);
+ if (!rule)
+ return lexer_.Error("unknown build rule '" + rule_name + "'", err);
+
+ for (;;) {
+ // XXX should we require one path here?
+ EvalString in;
+ if (!lexer_.ReadPath(&in, err))
+ return false;
+ if (in.empty())
+ break;
+ ins.push_back(in);
+ }
+
+ // Add all implicit deps, counting how many as we go.
+ int implicit = 0;
+ if (lexer_.PeekToken(Lexer::PIPE)) {
+ for (;;) {
+ EvalString in;
+ if (!lexer_.ReadPath(&in, err))
+ return false;
+ if (in.empty())
+ break;
+ ins.push_back(in);
+ ++implicit;
+ }
+ }
+
+ // Add all order-only deps, counting how many as we go.
+ int order_only = 0;
+ if (lexer_.PeekToken(Lexer::PIPE2)) {
+ for (;;) {
+ EvalString in;
+ if (!lexer_.ReadPath(&in, err))
+ return false;
+ if (in.empty())
+ break;
+ ins.push_back(in);
+ ++order_only;
+ }
+ }
+
+ if (!ExpectToken(Lexer::NEWLINE, err))
+ return false;
+
+ // Bindings on edges are rare, so allocate per-edge envs only when needed.
+ bool has_indent_token = lexer_.PeekToken(Lexer::INDENT);
+ BindingEnv* env = has_indent_token ? new BindingEnv(env_) : env_;
+ while (has_indent_token) {
+ string key;
+ EvalString val;
+ if (!ParseLet(&key, &val, err))
+ return false;
+
+ env->AddBinding(key, val.Evaluate(env_));
+ has_indent_token = lexer_.PeekToken(Lexer::INDENT);
+ }
+
+ Edge* edge = state_->AddEdge(rule);
+ edge->env_ = env;
+
+ string pool_name = edge->GetBinding("pool");
+ if (!pool_name.empty()) {
+ Pool* pool = state_->LookupPool(pool_name);
+ if (pool == NULL)
+ return lexer_.Error("unknown pool name '" + pool_name + "'", err);
+ edge->pool_ = pool;
+ }
+
+ edge->outputs_.reserve(outs.size());
+ for (size_t i = 0, e = outs.size(); i != e; ++i) {
+ string path = outs[i].Evaluate(env);
+ string path_err;
+ uint64_t slash_bits;
+ if (!CanonicalizePath(&path, &slash_bits, &path_err))
+ return lexer_.Error(path_err, err);
+ if (!state_->AddOut(edge, path, slash_bits)) {
+ if (options_.dupe_edge_action_ == kDupeEdgeActionError) {
+ lexer_.Error("multiple rules generate " + path + " [-w dupbuild=err]",
+ err);
+ return false;
+ } else {
+ if (!quiet_) {
+ Warning("multiple rules generate %s. "
+ "builds involving this target will not be correct; "
+ "continuing anyway [-w dupbuild=warn]",
+ path.c_str());
+ }
+ if (e - i <= static_cast<size_t>(implicit_outs))
+ --implicit_outs;
+ }
+ }
+ }
+ if (edge->outputs_.empty()) {
+ // All outputs of the edge are already created by other edges. Don't add
+ // this edge. Do this check before input nodes are connected to the edge.
+ state_->edges_.pop_back();
+ delete edge;
+ return true;
+ }
+ edge->implicit_outs_ = implicit_outs;
+
+ edge->inputs_.reserve(ins.size());
+ for (vector<EvalString>::iterator i = ins.begin(); i != ins.end(); ++i) {
+ string path = i->Evaluate(env);
+ string path_err;
+ uint64_t slash_bits;
+ if (!CanonicalizePath(&path, &slash_bits, &path_err))
+ return lexer_.Error(path_err, err);
+ state_->AddIn(edge, path, slash_bits);
+ }
+ edge->implicit_deps_ = implicit;
+ edge->order_only_deps_ = order_only;
+
+ if (options_.phony_cycle_action_ == kPhonyCycleActionWarn &&
+ edge->maybe_phonycycle_diagnostic()) {
+ // CMake 2.8.12.x and 3.0.x incorrectly write phony build statements
+ // that reference themselves. Ninja used to tolerate these in the
+ // build graph but that has since been fixed. Filter them out to
+ // support users of those old CMake versions.
+ Node* out = edge->outputs_[0];
+ vector<Node*>::iterator new_end =
+ remove(edge->inputs_.begin(), edge->inputs_.end(), out);
+ if (new_end != edge->inputs_.end()) {
+ edge->inputs_.erase(new_end, edge->inputs_.end());
+ if (!quiet_) {
+ Warning("phony target '%s' names itself as an input; "
+ "ignoring [-w phonycycle=warn]",
+ out->path().c_str());
+ }
+ }
+ }
+
+ // Lookup, validate, and save any dyndep binding. It will be used later
+ // to load generated dependency information dynamically, but it must
+ // be one of our manifest-specified inputs.
+ string dyndep = edge->GetUnescapedDyndep();
+ if (!dyndep.empty()) {
+ uint64_t slash_bits;
+ if (!CanonicalizePath(&dyndep, &slash_bits, err))
+ return false;
+ edge->dyndep_ = state_->GetNode(dyndep, slash_bits);
+ edge->dyndep_->set_dyndep_pending(true);
+ vector<Node*>::iterator dgi =
+ std::find(edge->inputs_.begin(), edge->inputs_.end(), edge->dyndep_);
+ if (dgi == edge->inputs_.end()) {
+ return lexer_.Error("dyndep '" + dyndep + "' is not an input", err);
+ }
+ }
+
+ return true;
+}
+
+bool ManifestParser::ParseFileInclude(bool new_scope, string* err) {
+ EvalString eval;
+ if (!lexer_.ReadPath(&eval, err))
+ return false;
+ string path = eval.Evaluate(env_);
+
+ ManifestParser subparser(state_, file_reader_, options_);
+ if (new_scope) {
+ subparser.env_ = new BindingEnv(env_);
+ } else {
+ subparser.env_ = env_;
+ }
+
+ if (!subparser.Load(path, err, &lexer_))
+ return false;
+
+ if (!ExpectToken(Lexer::NEWLINE, err))
+ return false;
+
+ return true;
+}
diff --git a/src/manifest_parser.h b/src/manifest_parser.h
new file mode 100644
index 0000000..954cf46
--- /dev/null
+++ b/src/manifest_parser.h
@@ -0,0 +1,72 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_MANIFEST_PARSER_H_
+#define NINJA_MANIFEST_PARSER_H_
+
+#include "parser.h"
+
+struct BindingEnv;
+struct EvalString;
+
+enum DupeEdgeAction {
+ kDupeEdgeActionWarn,
+ kDupeEdgeActionError,
+};
+
+enum PhonyCycleAction {
+ kPhonyCycleActionWarn,
+ kPhonyCycleActionError,
+};
+
+struct ManifestParserOptions {
+ ManifestParserOptions()
+ : dupe_edge_action_(kDupeEdgeActionWarn),
+ phony_cycle_action_(kPhonyCycleActionWarn) {}
+ DupeEdgeAction dupe_edge_action_;
+ PhonyCycleAction phony_cycle_action_;
+};
+
+/// Parses .ninja files.
+struct ManifestParser : public Parser {
+ ManifestParser(State* state, FileReader* file_reader,
+ ManifestParserOptions options = ManifestParserOptions());
+
+ /// Parse a text string of input. Used by tests.
+ bool ParseTest(const std::string& input, std::string* err) {
+ quiet_ = true;
+ return Parse("input", input, err);
+ }
+
+private:
+ /// Parse a file, given its contents as a string.
+ bool Parse(const std::string& filename, const std::string& input,
+ std::string* err);
+
+ /// Parse various statement types.
+ bool ParsePool(std::string* err);
+ bool ParseRule(std::string* err);
+ bool ParseLet(std::string* key, EvalString* val, std::string* err);
+ bool ParseEdge(std::string* err);
+ bool ParseDefault(std::string* err);
+
+ /// Parse either a 'subninja' or 'include' line.
+ bool ParseFileInclude(bool new_scope, std::string* err);
+
+ BindingEnv* env_;
+ ManifestParserOptions options_;
+ bool quiet_;
+};
+
+#endif // NINJA_MANIFEST_PARSER_H_
diff --git a/src/manifest_parser_perftest.cc b/src/manifest_parser_perftest.cc
new file mode 100644
index 0000000..853d8e0
--- /dev/null
+++ b/src/manifest_parser_perftest.cc
@@ -0,0 +1,123 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Tests manifest parser performance. Expects to be run in ninja's root
+// directory.
+
+#include <numeric>
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef _WIN32
+#include "getopt.h"
+#include <direct.h>
+#elif defined(_AIX)
+#include "getopt.h"
+#include <unistd.h>
+#else
+#include <getopt.h>
+#include <unistd.h>
+#endif
+
+#include "disk_interface.h"
+#include "graph.h"
+#include "manifest_parser.h"
+#include "metrics.h"
+#include "state.h"
+#include "util.h"
+
+using namespace std;
+
+bool WriteFakeManifests(const string& dir, string* err) {
+ RealDiskInterface disk_interface;
+ TimeStamp mtime = disk_interface.Stat(dir + "/build.ninja", err);
+ if (mtime != 0) // 0 means that the file doesn't exist yet.
+ return mtime != -1;
+
+ string command = "python misc/write_fake_manifests.py " + dir;
+ printf("Creating manifest data..."); fflush(stdout);
+ int exit_code = system(command.c_str());
+ printf("done.\n");
+ if (exit_code != 0)
+ *err = "Failed to run " + command;
+ return exit_code == 0;
+}
+
+int LoadManifests(bool measure_command_evaluation) {
+ string err;
+ RealDiskInterface disk_interface;
+ State state;
+ ManifestParser parser(&state, &disk_interface);
+ if (!parser.Load("build.ninja", &err)) {
+ fprintf(stderr, "Failed to read test data: %s\n", err.c_str());
+ exit(1);
+ }
+ // Doing an empty build involves reading the manifest and evaluating all
+ // commands required for the requested targets. So include command
+ // evaluation in the perftest by default.
+ int optimization_guard = 0;
+ if (measure_command_evaluation)
+ for (size_t i = 0; i < state.edges_.size(); ++i)
+ optimization_guard += state.edges_[i]->EvaluateCommand().size();
+ return optimization_guard;
+}
+
+int main(int argc, char* argv[]) {
+ bool measure_command_evaluation = true;
+ int opt;
+ while ((opt = getopt(argc, argv, const_cast<char*>("fh"))) != -1) {
+ switch (opt) {
+ case 'f':
+ measure_command_evaluation = false;
+ break;
+ case 'h':
+ default:
+ printf("usage: manifest_parser_perftest\n"
+"\n"
+"options:\n"
+" -f only measure manifest load time, not command evaluation time\n"
+ );
+ return 1;
+ }
+ }
+
+ const char kManifestDir[] = "build/manifest_perftest";
+
+ string err;
+ if (!WriteFakeManifests(kManifestDir, &err)) {
+ fprintf(stderr, "Failed to write test data: %s\n", err.c_str());
+ return 1;
+ }
+
+ if (chdir(kManifestDir) < 0)
+ Fatal("chdir: %s", strerror(errno));
+
+ const int kNumRepetitions = 5;
+ vector<int> times;
+ for (int i = 0; i < kNumRepetitions; ++i) {
+ int64_t start = GetTimeMillis();
+ int optimization_guard = LoadManifests(measure_command_evaluation);
+ int delta = (int)(GetTimeMillis() - start);
+ printf("%dms (hash: %x)\n", delta, optimization_guard);
+ times.push_back(delta);
+ }
+
+ int min = *min_element(times.begin(), times.end());
+ int max = *max_element(times.begin(), times.end());
+ float total = accumulate(times.begin(), times.end(), 0.0f);
+ printf("min %dms max %dms avg %.1fms\n", min, max, total / times.size());
+}
diff --git a/src/manifest_parser_test.cc b/src/manifest_parser_test.cc
new file mode 100644
index 0000000..ec2eeed
--- /dev/null
+++ b/src/manifest_parser_test.cc
@@ -0,0 +1,1158 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "manifest_parser.h"
+
+#include <map>
+#include <vector>
+
+#include "graph.h"
+#include "state.h"
+#include "test.h"
+
+using namespace std;
+
+struct ParserTest : public testing::Test {
+ void AssertParse(const char* input) {
+ ManifestParser parser(&state, &fs_);
+ string err;
+ EXPECT_TRUE(parser.ParseTest(input, &err));
+ ASSERT_EQ("", err);
+ VerifyGraph(state);
+ }
+
+ State state;
+ VirtualFileSystem fs_;
+};
+
+TEST_F(ParserTest, Empty) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(""));
+}
+
+TEST_F(ParserTest, Rules) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"\n"
+"rule date\n"
+" command = date > $out\n"
+"\n"
+"build result: cat in_1.cc in-2.O\n"));
+
+ ASSERT_EQ(3u, state.bindings_.GetRules().size());
+ const Rule* rule = state.bindings_.GetRules().begin()->second;
+ EXPECT_EQ("cat", rule->name());
+ EXPECT_EQ("[cat ][$in][ > ][$out]",
+ rule->GetBinding("command")->Serialize());
+}
+
+TEST_F(ParserTest, RuleAttributes) {
+ // Check that all of the allowed rule attributes are parsed ok.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = a\n"
+" depfile = a\n"
+" deps = a\n"
+" description = a\n"
+" generator = a\n"
+" restat = a\n"
+" rspfile = a\n"
+" rspfile_content = a\n"
+));
+}
+
+TEST_F(ParserTest, IgnoreIndentedComments) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+" #indented comment\n"
+"rule cat\n"
+" command = cat $in > $out\n"
+" #generator = 1\n"
+" restat = 1 # comment\n"
+" #comment\n"
+"build result: cat in_1.cc in-2.O\n"
+" #comment\n"));
+
+ ASSERT_EQ(2u, state.bindings_.GetRules().size());
+ const Rule* rule = state.bindings_.GetRules().begin()->second;
+ EXPECT_EQ("cat", rule->name());
+ Edge* edge = state.GetNode("result", 0)->in_edge();
+ EXPECT_TRUE(edge->GetBindingBool("restat"));
+ EXPECT_FALSE(edge->GetBindingBool("generator"));
+}
+
+TEST_F(ParserTest, IgnoreIndentedBlankLines) {
+ // the indented blanks used to cause parse errors
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+" \n"
+"rule cat\n"
+" command = cat $in > $out\n"
+" \n"
+"build result: cat in_1.cc in-2.O\n"
+" \n"
+"variable=1\n"));
+
+ // the variable must be in the top level environment
+ EXPECT_EQ("1", state.bindings_.LookupVariable("variable"));
+}
+
+TEST_F(ParserTest, ResponseFiles) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat_rsp\n"
+" command = cat $rspfile > $out\n"
+" rspfile = $rspfile\n"
+" rspfile_content = $in\n"
+"\n"
+"build out: cat_rsp in\n"
+" rspfile=out.rsp\n"));
+
+ ASSERT_EQ(2u, state.bindings_.GetRules().size());
+ const Rule* rule = state.bindings_.GetRules().begin()->second;
+ EXPECT_EQ("cat_rsp", rule->name());
+ EXPECT_EQ("[cat ][$rspfile][ > ][$out]",
+ rule->GetBinding("command")->Serialize());
+ EXPECT_EQ("[$rspfile]", rule->GetBinding("rspfile")->Serialize());
+ EXPECT_EQ("[$in]", rule->GetBinding("rspfile_content")->Serialize());
+}
+
+TEST_F(ParserTest, InNewline) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat_rsp\n"
+" command = cat $in_newline > $out\n"
+"\n"
+"build out: cat_rsp in in2\n"
+" rspfile=out.rsp\n"));
+
+ ASSERT_EQ(2u, state.bindings_.GetRules().size());
+ const Rule* rule = state.bindings_.GetRules().begin()->second;
+ EXPECT_EQ("cat_rsp", rule->name());
+ EXPECT_EQ("[cat ][$in_newline][ > ][$out]",
+ rule->GetBinding("command")->Serialize());
+
+ Edge* edge = state.edges_[0];
+ EXPECT_EQ("cat in\nin2 > out", edge->EvaluateCommand());
+}
+
+TEST_F(ParserTest, Variables) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"l = one-letter-test\n"
+"rule link\n"
+" command = ld $l $extra $with_under -o $out $in\n"
+"\n"
+"extra = -pthread\n"
+"with_under = -under\n"
+"build a: link b c\n"
+"nested1 = 1\n"
+"nested2 = $nested1/2\n"
+"build supernested: link x\n"
+" extra = $nested2/3\n"));
+
+ ASSERT_EQ(2u, state.edges_.size());
+ Edge* edge = state.edges_[0];
+ EXPECT_EQ("ld one-letter-test -pthread -under -o a b c",
+ edge->EvaluateCommand());
+ EXPECT_EQ("1/2", state.bindings_.LookupVariable("nested2"));
+
+ edge = state.edges_[1];
+ EXPECT_EQ("ld one-letter-test 1/2/3 -under -o supernested x",
+ edge->EvaluateCommand());
+}
+
+TEST_F(ParserTest, VariableScope) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"foo = bar\n"
+"rule cmd\n"
+" command = cmd $foo $in $out\n"
+"\n"
+"build inner: cmd a\n"
+" foo = baz\n"
+"build outer: cmd b\n"
+"\n" // Extra newline after build line tickles a regression.
+));
+
+ ASSERT_EQ(2u, state.edges_.size());
+ EXPECT_EQ("cmd baz a inner", state.edges_[0]->EvaluateCommand());
+ EXPECT_EQ("cmd bar b outer", state.edges_[1]->EvaluateCommand());
+}
+
+TEST_F(ParserTest, Continuation) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule link\n"
+" command = foo bar $\n"
+" baz\n"
+"\n"
+"build a: link c $\n"
+" d e f\n"));
+
+ ASSERT_EQ(2u, state.bindings_.GetRules().size());
+ const Rule* rule = state.bindings_.GetRules().begin()->second;
+ EXPECT_EQ("link", rule->name());
+ EXPECT_EQ("[foo bar baz]", rule->GetBinding("command")->Serialize());
+}
+
+TEST_F(ParserTest, Backslash) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"foo = bar\\baz\n"
+"foo2 = bar\\ baz\n"
+));
+ EXPECT_EQ("bar\\baz", state.bindings_.LookupVariable("foo"));
+ EXPECT_EQ("bar\\ baz", state.bindings_.LookupVariable("foo2"));
+}
+
+TEST_F(ParserTest, Comment) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"# this is a comment\n"
+"foo = not # a comment\n"));
+ EXPECT_EQ("not # a comment", state.bindings_.LookupVariable("foo"));
+}
+
+TEST_F(ParserTest, Dollars) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule foo\n"
+" command = ${out}bar$$baz$$$\n"
+"blah\n"
+"x = $$dollar\n"
+"build $x: foo y\n"
+));
+ EXPECT_EQ("$dollar", state.bindings_.LookupVariable("x"));
+#ifdef _WIN32
+ EXPECT_EQ("$dollarbar$baz$blah", state.edges_[0]->EvaluateCommand());
+#else
+ EXPECT_EQ("'$dollar'bar$baz$blah", state.edges_[0]->EvaluateCommand());
+#endif
+}
+
+TEST_F(ParserTest, EscapeSpaces) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule spaces\n"
+" command = something\n"
+"build foo$ bar: spaces $$one two$$$ three\n"
+));
+ EXPECT_TRUE(state.LookupNode("foo bar"));
+ EXPECT_EQ(state.edges_[0]->outputs_[0]->path(), "foo bar");
+ EXPECT_EQ(state.edges_[0]->inputs_[0]->path(), "$one");
+ EXPECT_EQ(state.edges_[0]->inputs_[1]->path(), "two$ three");
+ EXPECT_EQ(state.edges_[0]->EvaluateCommand(), "something");
+}
+
+TEST_F(ParserTest, CanonicalizeFile) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build out: cat in/1 in//2\n"
+"build in/1: cat\n"
+"build in/2: cat\n"));
+
+ EXPECT_TRUE(state.LookupNode("in/1"));
+ EXPECT_TRUE(state.LookupNode("in/2"));
+ EXPECT_FALSE(state.LookupNode("in//1"));
+ EXPECT_FALSE(state.LookupNode("in//2"));
+}
+
+#ifdef _WIN32
+TEST_F(ParserTest, CanonicalizeFileBackslashes) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build out: cat in\\1 in\\\\2\n"
+"build in\\1: cat\n"
+"build in\\2: cat\n"));
+
+ Node* node = state.LookupNode("in/1");;
+ EXPECT_TRUE(node);
+ EXPECT_EQ(1, node->slash_bits());
+ node = state.LookupNode("in/2");
+ EXPECT_TRUE(node);
+ EXPECT_EQ(1, node->slash_bits());
+ EXPECT_FALSE(state.LookupNode("in//1"));
+ EXPECT_FALSE(state.LookupNode("in//2"));
+}
+#endif
+
+TEST_F(ParserTest, PathVariables) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"dir = out\n"
+"build $dir/exe: cat src\n"));
+
+ EXPECT_FALSE(state.LookupNode("$dir/exe"));
+ EXPECT_TRUE(state.LookupNode("out/exe"));
+}
+
+TEST_F(ParserTest, CanonicalizePaths) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build ./out.o: cat ./bar/baz/../foo.cc\n"));
+
+ EXPECT_FALSE(state.LookupNode("./out.o"));
+ EXPECT_TRUE(state.LookupNode("out.o"));
+ EXPECT_FALSE(state.LookupNode("./bar/baz/../foo.cc"));
+ EXPECT_TRUE(state.LookupNode("bar/foo.cc"));
+}
+
+#ifdef _WIN32
+TEST_F(ParserTest, CanonicalizePathsBackslashes) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build ./out.o: cat ./bar/baz/../foo.cc\n"
+"build .\\out2.o: cat .\\bar/baz\\..\\foo.cc\n"
+"build .\\out3.o: cat .\\bar\\baz\\..\\foo3.cc\n"
+));
+
+ EXPECT_FALSE(state.LookupNode("./out.o"));
+ EXPECT_FALSE(state.LookupNode(".\\out2.o"));
+ EXPECT_FALSE(state.LookupNode(".\\out3.o"));
+ EXPECT_TRUE(state.LookupNode("out.o"));
+ EXPECT_TRUE(state.LookupNode("out2.o"));
+ EXPECT_TRUE(state.LookupNode("out3.o"));
+ EXPECT_FALSE(state.LookupNode("./bar/baz/../foo.cc"));
+ EXPECT_FALSE(state.LookupNode(".\\bar/baz\\..\\foo.cc"));
+ EXPECT_FALSE(state.LookupNode(".\\bar/baz\\..\\foo3.cc"));
+ Node* node = state.LookupNode("bar/foo.cc");
+ EXPECT_TRUE(node);
+ EXPECT_EQ(0, node->slash_bits());
+ node = state.LookupNode("bar/foo3.cc");
+ EXPECT_TRUE(node);
+ EXPECT_EQ(1, node->slash_bits());
+}
+#endif
+
+TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputs) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build out1 out2: cat in1\n"
+"build out1: cat in2\n"
+"build final: cat out1\n"
+));
+ // AssertParse() checks that the generated build graph is self-consistent.
+ // That's all the checking that this test needs.
+}
+
+TEST_F(ParserTest, NoDeadPointerFromDuplicateEdge) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build out: cat in\n"
+"build out: cat in\n"
+));
+ // AssertParse() checks that the generated build graph is self-consistent.
+ // That's all the checking that this test needs.
+}
+
+TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputsError) {
+ const char kInput[] =
+"rule cat\n"
+" command = cat $in > $out\n"
+"build out1 out2: cat in1\n"
+"build out1: cat in2\n"
+"build final: cat out1\n";
+ ManifestParserOptions parser_opts;
+ parser_opts.dupe_edge_action_ = kDupeEdgeActionError;
+ ManifestParser parser(&state, &fs_, parser_opts);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:5: multiple rules generate out1 [-w dupbuild=err]\n", err);
+}
+
+TEST_F(ParserTest, DuplicateEdgeInIncludedFile) {
+ fs_.Create("sub.ninja",
+ "rule cat\n"
+ " command = cat $in > $out\n"
+ "build out1 out2: cat in1\n"
+ "build out1: cat in2\n"
+ "build final: cat out1\n");
+ const char kInput[] =
+ "subninja sub.ninja\n";
+ ManifestParserOptions parser_opts;
+ parser_opts.dupe_edge_action_ = kDupeEdgeActionError;
+ ManifestParser parser(&state, &fs_, parser_opts);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("sub.ninja:5: multiple rules generate out1 [-w dupbuild=err]\n",
+ err);
+}
+
+TEST_F(ParserTest, PhonySelfReferenceIgnored) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"build a: phony a\n"
+));
+
+ Node* node = state.LookupNode("a");
+ Edge* edge = node->in_edge();
+ ASSERT_TRUE(edge->inputs_.empty());
+}
+
+TEST_F(ParserTest, PhonySelfReferenceKept) {
+ const char kInput[] =
+"build a: phony a\n";
+ ManifestParserOptions parser_opts;
+ parser_opts.phony_cycle_action_ = kPhonyCycleActionError;
+ ManifestParser parser(&state, &fs_, parser_opts);
+ string err;
+ EXPECT_TRUE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("", err);
+
+ Node* node = state.LookupNode("a");
+ Edge* edge = node->in_edge();
+ ASSERT_EQ(edge->inputs_.size(), 1);
+ ASSERT_EQ(edge->inputs_[0], node);
+}
+
+TEST_F(ParserTest, ReservedWords) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule build\n"
+" command = rule run $out\n"
+"build subninja: build include default foo.cc\n"
+"default subninja\n"));
+}
+
+TEST_F(ParserTest, Errors) {
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(string("subn", 4), &err));
+ EXPECT_EQ("input:1: expected '=', got eof\n"
+ "subn\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("foobar", &err));
+ EXPECT_EQ("input:1: expected '=', got eof\n"
+ "foobar\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("x 3", &err));
+ EXPECT_EQ("input:1: expected '=', got identifier\n"
+ "x 3\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("x = 3", &err));
+ EXPECT_EQ("input:1: unexpected EOF\n"
+ "x = 3\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("x = 3\ny 2", &err));
+ EXPECT_EQ("input:2: expected '=', got identifier\n"
+ "y 2\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("x = $", &err));
+ EXPECT_EQ("input:1: bad $-escape (literal $ must be written as $$)\n"
+ "x = $\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("x = $\n $[\n", &err));
+ EXPECT_EQ("input:2: bad $-escape (literal $ must be written as $$)\n"
+ " $[\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("x = a$\n b$\n $\n", &err));
+ EXPECT_EQ("input:4: unexpected EOF\n"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("build\n", &err));
+ EXPECT_EQ("input:1: expected path\n"
+ "build\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("build x: y z\n", &err));
+ EXPECT_EQ("input:1: unknown build rule 'y'\n"
+ "build x: y z\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("build x:: y z\n", &err));
+ EXPECT_EQ("input:1: expected build command name\n"
+ "build x:: y z\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cat\n command = cat ok\n"
+ "build x: cat $\n :\n",
+ &err));
+ EXPECT_EQ("input:4: expected newline, got ':'\n"
+ " :\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cat\n",
+ &err));
+ EXPECT_EQ("input:2: expected 'command =' line\n", err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cat\n"
+ " command = echo\n"
+ "rule cat\n"
+ " command = echo\n", &err));
+ EXPECT_EQ("input:3: duplicate rule 'cat'\n"
+ "rule cat\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cat\n"
+ " command = echo\n"
+ " rspfile = cat.rsp\n", &err));
+ EXPECT_EQ(
+ "input:4: rspfile and rspfile_content need to be both specified\n",
+ err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cat\n"
+ " command = ${fafsd\n"
+ "foo = bar\n",
+ &err));
+ EXPECT_EQ("input:2: bad $-escape (literal $ must be written as $$)\n"
+ " command = ${fafsd\n"
+ " ^ near here"
+ , err);
+ }
+
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cat\n"
+ " command = cat\n"
+ "build $.: cat foo\n",
+ &err));
+ EXPECT_EQ("input:3: bad $-escape (literal $ must be written as $$)\n"
+ "build $.: cat foo\n"
+ " ^ near here"
+ , err);
+ }
+
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cat\n"
+ " command = cat\n"
+ "build $: cat foo\n",
+ &err));
+ EXPECT_EQ("input:3: expected ':', got newline ($ also escapes ':')\n"
+ "build $: cat foo\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule %foo\n",
+ &err));
+ EXPECT_EQ("input:1: expected rule name\n"
+ "rule %foo\n"
+ " ^ near here",
+ err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cc\n"
+ " command = foo\n"
+ " othervar = bar\n",
+ &err));
+ EXPECT_EQ("input:3: unexpected variable 'othervar'\n"
+ " othervar = bar\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cc\n command = foo\n"
+ "build $.: cc bar.cc\n",
+ &err));
+ EXPECT_EQ("input:3: bad $-escape (literal $ must be written as $$)\n"
+ "build $.: cc bar.cc\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cc\n command = foo\n && bar",
+ &err));
+ EXPECT_EQ("input:3: expected variable name\n"
+ " && bar\n"
+ " ^ near here",
+ err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cc\n command = foo\n"
+ "build $: cc bar.cc\n",
+ &err));
+ EXPECT_EQ("input:3: expected ':', got newline ($ also escapes ':')\n"
+ "build $: cc bar.cc\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("default\n",
+ &err));
+ EXPECT_EQ("input:1: expected target name\n"
+ "default\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("default nonexistent\n",
+ &err));
+ EXPECT_EQ("input:1: unknown target 'nonexistent'\n"
+ "default nonexistent\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule r\n command = r\n"
+ "build b: r\n"
+ "default b:\n",
+ &err));
+ EXPECT_EQ("input:4: expected newline, got ':'\n"
+ "default b:\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("default $a\n", &err));
+ EXPECT_EQ("input:1: empty path\n"
+ "default $a\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule r\n"
+ " command = r\n"
+ "build $a: r $c\n", &err));
+ // XXX the line number is wrong; we should evaluate paths in ParseEdge
+ // as we see them, not after we've read them all!
+ EXPECT_EQ("input:4: empty path\n", err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ // the indented blank line must terminate the rule
+ // this also verifies that "unexpected (token)" errors are correct
+ EXPECT_FALSE(parser.ParseTest("rule r\n"
+ " command = r\n"
+ " \n"
+ " generator = 1\n", &err));
+ EXPECT_EQ("input:4: unexpected indent\n", err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("pool\n", &err));
+ EXPECT_EQ("input:1: expected pool name\n"
+ "pool\n"
+ " ^ near here", err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("pool foo\n", &err));
+ EXPECT_EQ("input:2: expected 'depth =' line\n", err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("pool foo\n"
+ " depth = 4\n"
+ "pool foo\n", &err));
+ EXPECT_EQ("input:3: duplicate pool 'foo'\n"
+ "pool foo\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("pool foo\n"
+ " depth = -1\n", &err));
+ EXPECT_EQ("input:2: invalid pool depth\n"
+ " depth = -1\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("pool foo\n"
+ " bar = 1\n", &err));
+ EXPECT_EQ("input:2: unexpected variable 'bar'\n"
+ " bar = 1\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ // Pool names are dereferenced at edge parsing time.
+ EXPECT_FALSE(parser.ParseTest("rule run\n"
+ " command = echo\n"
+ " pool = unnamed_pool\n"
+ "build out: run in\n", &err));
+ EXPECT_EQ("input:5: unknown pool name 'unnamed_pool'\n", err);
+ }
+}
+
+TEST_F(ParserTest, MissingInput) {
+ State local_state;
+ ManifestParser parser(&local_state, &fs_);
+ string err;
+ EXPECT_FALSE(parser.Load("build.ninja", &err));
+ EXPECT_EQ("loading 'build.ninja': No such file or directory", err);
+}
+
+TEST_F(ParserTest, MultipleOutputs) {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_TRUE(parser.ParseTest("rule cc\n command = foo\n depfile = bar\n"
+ "build a.o b.o: cc c.cc\n",
+ &err));
+ EXPECT_EQ("", err);
+}
+
+TEST_F(ParserTest, MultipleOutputsWithDeps) {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+ EXPECT_TRUE(parser.ParseTest("rule cc\n command = foo\n deps = gcc\n"
+ "build a.o b.o: cc c.cc\n",
+ &err));
+ EXPECT_EQ("", err);
+}
+
+TEST_F(ParserTest, SubNinja) {
+ fs_.Create("test.ninja",
+ "var = inner\n"
+ "build $builddir/inner: varref\n");
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"builddir = some_dir/\n"
+"rule varref\n"
+" command = varref $var\n"
+"var = outer\n"
+"build $builddir/outer: varref\n"
+"subninja test.ninja\n"
+"build $builddir/outer2: varref\n"));
+ ASSERT_EQ(1u, fs_.files_read_.size());
+
+ EXPECT_EQ("test.ninja", fs_.files_read_[0]);
+ EXPECT_TRUE(state.LookupNode("some_dir/outer"));
+ // Verify our builddir setting is inherited.
+ EXPECT_TRUE(state.LookupNode("some_dir/inner"));
+
+ ASSERT_EQ(3u, state.edges_.size());
+ EXPECT_EQ("varref outer", state.edges_[0]->EvaluateCommand());
+ EXPECT_EQ("varref inner", state.edges_[1]->EvaluateCommand());
+ EXPECT_EQ("varref outer", state.edges_[2]->EvaluateCommand());
+}
+
+TEST_F(ParserTest, MissingSubNinja) {
+ ManifestParser parser(&state, &fs_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("subninja foo.ninja\n", &err));
+ EXPECT_EQ("input:1: loading 'foo.ninja': No such file or directory\n"
+ "subninja foo.ninja\n"
+ " ^ near here"
+ , err);
+}
+
+TEST_F(ParserTest, DuplicateRuleInDifferentSubninjas) {
+ // Test that rules are scoped to subninjas.
+ fs_.Create("test.ninja", "rule cat\n"
+ " command = cat\n");
+ ManifestParser parser(&state, &fs_);
+ string err;
+ EXPECT_TRUE(parser.ParseTest("rule cat\n"
+ " command = cat\n"
+ "subninja test.ninja\n", &err));
+}
+
+TEST_F(ParserTest, DuplicateRuleInDifferentSubninjasWithInclude) {
+ // Test that rules are scoped to subninjas even with includes.
+ fs_.Create("rules.ninja", "rule cat\n"
+ " command = cat\n");
+ fs_.Create("test.ninja", "include rules.ninja\n"
+ "build x : cat\n");
+ ManifestParser parser(&state, &fs_);
+ string err;
+ EXPECT_TRUE(parser.ParseTest("include rules.ninja\n"
+ "subninja test.ninja\n"
+ "build y : cat\n", &err));
+}
+
+TEST_F(ParserTest, Include) {
+ fs_.Create("include.ninja", "var = inner\n");
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"var = outer\n"
+"include include.ninja\n"));
+
+ ASSERT_EQ(1u, fs_.files_read_.size());
+ EXPECT_EQ("include.ninja", fs_.files_read_[0]);
+ EXPECT_EQ("inner", state.bindings_.LookupVariable("var"));
+}
+
+TEST_F(ParserTest, BrokenInclude) {
+ fs_.Create("include.ninja", "build\n");
+ ManifestParser parser(&state, &fs_);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("include include.ninja\n", &err));
+ EXPECT_EQ("include.ninja:1: expected path\n"
+ "build\n"
+ " ^ near here"
+ , err);
+}
+
+TEST_F(ParserTest, Implicit) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build foo: cat bar | baz\n"));
+
+ Edge* edge = state.LookupNode("foo")->in_edge();
+ ASSERT_TRUE(edge->is_implicit(1));
+}
+
+TEST_F(ParserTest, OrderOnly) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n command = cat $in > $out\n"
+"build foo: cat bar || baz\n"));
+
+ Edge* edge = state.LookupNode("foo")->in_edge();
+ ASSERT_TRUE(edge->is_order_only(1));
+}
+
+TEST_F(ParserTest, ImplicitOutput) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build foo | imp: cat bar\n"));
+
+ Edge* edge = state.LookupNode("imp")->in_edge();
+ ASSERT_EQ(edge->outputs_.size(), 2);
+ EXPECT_TRUE(edge->is_implicit_out(1));
+}
+
+TEST_F(ParserTest, ImplicitOutputEmpty) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build foo | : cat bar\n"));
+
+ Edge* edge = state.LookupNode("foo")->in_edge();
+ ASSERT_EQ(edge->outputs_.size(), 1);
+ EXPECT_FALSE(edge->is_implicit_out(0));
+}
+
+TEST_F(ParserTest, ImplicitOutputDupe) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build foo baz | foo baq foo: cat bar\n"));
+
+ Edge* edge = state.LookupNode("foo")->in_edge();
+ ASSERT_EQ(edge->outputs_.size(), 3);
+ EXPECT_FALSE(edge->is_implicit_out(0));
+ EXPECT_FALSE(edge->is_implicit_out(1));
+ EXPECT_TRUE(edge->is_implicit_out(2));
+}
+
+TEST_F(ParserTest, ImplicitOutputDupes) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build foo foo foo | foo foo foo foo: cat bar\n"));
+
+ Edge* edge = state.LookupNode("foo")->in_edge();
+ ASSERT_EQ(edge->outputs_.size(), 1);
+ EXPECT_FALSE(edge->is_implicit_out(0));
+}
+
+TEST_F(ParserTest, NoExplicitOutput) {
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_TRUE(parser.ParseTest(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build | imp : cat bar\n", &err));
+}
+
+TEST_F(ParserTest, DefaultDefault) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n command = cat $in > $out\n"
+"build a: cat foo\n"
+"build b: cat foo\n"
+"build c: cat foo\n"
+"build d: cat foo\n"));
+
+ string err;
+ EXPECT_EQ(4u, state.DefaultNodes(&err).size());
+ EXPECT_EQ("", err);
+}
+
+TEST_F(ParserTest, DefaultDefaultCycle) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n command = cat $in > $out\n"
+"build a: cat a\n"));
+
+ string err;
+ EXPECT_EQ(0u, state.DefaultNodes(&err).size());
+ EXPECT_EQ("could not determine root nodes of build graph", err);
+}
+
+TEST_F(ParserTest, DefaultStatements) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n command = cat $in > $out\n"
+"build a: cat foo\n"
+"build b: cat foo\n"
+"build c: cat foo\n"
+"build d: cat foo\n"
+"third = c\n"
+"default a b\n"
+"default $third\n"));
+
+ string err;
+ vector<Node*> nodes = state.DefaultNodes(&err);
+ EXPECT_EQ("", err);
+ ASSERT_EQ(3u, nodes.size());
+ EXPECT_EQ("a", nodes[0]->path());
+ EXPECT_EQ("b", nodes[1]->path());
+ EXPECT_EQ("c", nodes[2]->path());
+}
+
+TEST_F(ParserTest, UTF8) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule utf8\n"
+" command = true\n"
+" description = compilaci\xC3\xB3\n"));
+}
+
+TEST_F(ParserTest, CRLF) {
+ State local_state;
+ ManifestParser parser(&local_state, NULL);
+ string err;
+
+ EXPECT_TRUE(parser.ParseTest("# comment with crlf\r\n", &err));
+ EXPECT_TRUE(parser.ParseTest("foo = foo\nbar = bar\r\n", &err));
+ EXPECT_TRUE(parser.ParseTest(
+ "pool link_pool\r\n"
+ " depth = 15\r\n\r\n"
+ "rule xyz\r\n"
+ " command = something$expand \r\n"
+ " description = YAY!\r\n",
+ &err));
+}
+
+TEST_F(ParserTest, DyndepNotSpecified) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build result: cat in\n"));
+ Edge* edge = state.GetNode("result", 0)->in_edge();
+ ASSERT_FALSE(edge->dyndep_);
+}
+
+TEST_F(ParserTest, DyndepNotInput) {
+ State lstate;
+ ManifestParser parser(&lstate, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(
+"rule touch\n"
+" command = touch $out\n"
+"build result: touch\n"
+" dyndep = notin\n",
+ &err));
+ EXPECT_EQ("input:5: dyndep 'notin' is not an input\n", err);
+}
+
+TEST_F(ParserTest, DyndepExplicitInput) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build result: cat in\n"
+" dyndep = in\n"));
+ Edge* edge = state.GetNode("result", 0)->in_edge();
+ ASSERT_TRUE(edge->dyndep_);
+ EXPECT_TRUE(edge->dyndep_->dyndep_pending());
+ EXPECT_EQ(edge->dyndep_->path(), "in");
+}
+
+TEST_F(ParserTest, DyndepImplicitInput) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build result: cat in | dd\n"
+" dyndep = dd\n"));
+ Edge* edge = state.GetNode("result", 0)->in_edge();
+ ASSERT_TRUE(edge->dyndep_);
+ EXPECT_TRUE(edge->dyndep_->dyndep_pending());
+ EXPECT_EQ(edge->dyndep_->path(), "dd");
+}
+
+TEST_F(ParserTest, DyndepOrderOnlyInput) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build result: cat in || dd\n"
+" dyndep = dd\n"));
+ Edge* edge = state.GetNode("result", 0)->in_edge();
+ ASSERT_TRUE(edge->dyndep_);
+ EXPECT_TRUE(edge->dyndep_->dyndep_pending());
+ EXPECT_EQ(edge->dyndep_->path(), "dd");
+}
+
+TEST_F(ParserTest, DyndepRuleInput) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+" dyndep = $in\n"
+"build result: cat in\n"));
+ Edge* edge = state.GetNode("result", 0)->in_edge();
+ ASSERT_TRUE(edge->dyndep_);
+ EXPECT_TRUE(edge->dyndep_->dyndep_pending());
+ EXPECT_EQ(edge->dyndep_->path(), "in");
+}
diff --git a/src/metrics.cc b/src/metrics.cc
new file mode 100644
index 0000000..dbaf221
--- /dev/null
+++ b/src/metrics.cc
@@ -0,0 +1,129 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "metrics.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#ifndef _WIN32
+#include <sys/time.h>
+#else
+#include <windows.h>
+#endif
+
+#include <algorithm>
+
+#include "util.h"
+
+using namespace std;
+
+Metrics* g_metrics = NULL;
+
+namespace {
+
+#ifndef _WIN32
+/// Compute a platform-specific high-res timer value that fits into an int64.
+int64_t HighResTimer() {
+ timeval tv;
+ if (gettimeofday(&tv, NULL) < 0)
+ Fatal("gettimeofday: %s", strerror(errno));
+ return (int64_t)tv.tv_sec * 1000*1000 + tv.tv_usec;
+}
+
+/// Convert a delta of HighResTimer() values to microseconds.
+int64_t TimerToMicros(int64_t dt) {
+ // No conversion necessary.
+ return dt;
+}
+#else
+int64_t LargeIntegerToInt64(const LARGE_INTEGER& i) {
+ return ((int64_t)i.HighPart) << 32 | i.LowPart;
+}
+
+int64_t HighResTimer() {
+ LARGE_INTEGER counter;
+ if (!QueryPerformanceCounter(&counter))
+ Fatal("QueryPerformanceCounter: %s", GetLastErrorString().c_str());
+ return LargeIntegerToInt64(counter);
+}
+
+int64_t TimerToMicros(int64_t dt) {
+ static int64_t ticks_per_sec = 0;
+ if (!ticks_per_sec) {
+ LARGE_INTEGER freq;
+ if (!QueryPerformanceFrequency(&freq))
+ Fatal("QueryPerformanceFrequency: %s", GetLastErrorString().c_str());
+ ticks_per_sec = LargeIntegerToInt64(freq);
+ }
+
+ // dt is in ticks. We want microseconds.
+ return (dt * 1000000) / ticks_per_sec;
+}
+#endif
+
+} // anonymous namespace
+
+
+ScopedMetric::ScopedMetric(Metric* metric) {
+ metric_ = metric;
+ if (!metric_)
+ return;
+ start_ = HighResTimer();
+}
+ScopedMetric::~ScopedMetric() {
+ if (!metric_)
+ return;
+ metric_->count++;
+ int64_t dt = TimerToMicros(HighResTimer() - start_);
+ metric_->sum += dt;
+}
+
+Metric* Metrics::NewMetric(const string& name) {
+ Metric* metric = new Metric;
+ metric->name = name;
+ metric->count = 0;
+ metric->sum = 0;
+ metrics_.push_back(metric);
+ return metric;
+}
+
+void Metrics::Report() {
+ int width = 0;
+ for (vector<Metric*>::iterator i = metrics_.begin();
+ i != metrics_.end(); ++i) {
+ width = max((int)(*i)->name.size(), width);
+ }
+
+ printf("%-*s\t%-6s\t%-9s\t%s\n", width,
+ "metric", "count", "avg (us)", "total (ms)");
+ for (vector<Metric*>::iterator i = metrics_.begin();
+ i != metrics_.end(); ++i) {
+ Metric* metric = *i;
+ double total = metric->sum / (double)1000;
+ double avg = metric->sum / (double)metric->count;
+ printf("%-*s\t%-6d\t%-8.1f\t%.1f\n", width, metric->name.c_str(),
+ metric->count, avg, total);
+ }
+}
+
+uint64_t Stopwatch::Now() const {
+ return TimerToMicros(HighResTimer());
+}
+
+int64_t GetTimeMillis() {
+ return TimerToMicros(HighResTimer()) / 1000;
+}
+
diff --git a/src/metrics.h b/src/metrics.h
new file mode 100644
index 0000000..11239b5
--- /dev/null
+++ b/src/metrics.h
@@ -0,0 +1,91 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_METRICS_H_
+#define NINJA_METRICS_H_
+
+#include <string>
+#include <vector>
+
+#include "util.h" // For int64_t.
+
+/// The Metrics module is used for the debug mode that dumps timing stats of
+/// various actions. To use, see METRIC_RECORD below.
+
+/// A single metrics we're tracking, like "depfile load time".
+struct Metric {
+ std::string name;
+ /// Number of times we've hit the code path.
+ int count;
+ /// Total time (in micros) we've spent on the code path.
+ int64_t sum;
+};
+
+
+/// A scoped object for recording a metric across the body of a function.
+/// Used by the METRIC_RECORD macro.
+struct ScopedMetric {
+ explicit ScopedMetric(Metric* metric);
+ ~ScopedMetric();
+
+private:
+ Metric* metric_;
+ /// Timestamp when the measurement started.
+ /// Value is platform-dependent.
+ int64_t start_;
+};
+
+/// The singleton that stores metrics and prints the report.
+struct Metrics {
+ Metric* NewMetric(const std::string& name);
+
+ /// Print a summary report to stdout.
+ void Report();
+
+private:
+ std::vector<Metric*> metrics_;
+};
+
+/// Get the current time as relative to some epoch.
+/// Epoch varies between platforms; only useful for measuring elapsed time.
+int64_t GetTimeMillis();
+
+/// A simple stopwatch which returns the time
+/// in seconds since Restart() was called.
+struct Stopwatch {
+ public:
+ Stopwatch() : started_(0) {}
+
+ /// Seconds since Restart() call.
+ double Elapsed() const {
+ return 1e-6 * static_cast<double>(Now() - started_);
+ }
+
+ void Restart() { started_ = Now(); }
+
+ private:
+ uint64_t started_;
+ uint64_t Now() const;
+};
+
+/// The primary interface to metrics. Use METRIC_RECORD("foobar") at the top
+/// of a function to get timing stats recorded for each call of the function.
+#define METRIC_RECORD(name) \
+ static Metric* metrics_h_metric = \
+ g_metrics ? g_metrics->NewMetric(name) : NULL; \
+ ScopedMetric metrics_h_scoped(metrics_h_metric);
+
+extern Metrics* g_metrics;
+
+#endif // NINJA_METRICS_H_
diff --git a/src/minidump-win32.cc b/src/minidump-win32.cc
new file mode 100644
index 0000000..9aea767
--- /dev/null
+++ b/src/minidump-win32.cc
@@ -0,0 +1,89 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifdef _MSC_VER
+
+#include <windows.h>
+#include <DbgHelp.h>
+
+#include "util.h"
+
+using namespace std;
+
+typedef BOOL (WINAPI *MiniDumpWriteDumpFunc) (
+ IN HANDLE,
+ IN DWORD,
+ IN HANDLE,
+ IN MINIDUMP_TYPE,
+ IN CONST PMINIDUMP_EXCEPTION_INFORMATION, OPTIONAL
+ IN CONST PMINIDUMP_USER_STREAM_INFORMATION, OPTIONAL
+ IN CONST PMINIDUMP_CALLBACK_INFORMATION OPTIONAL
+ );
+
+/// Creates a windows minidump in temp folder.
+void CreateWin32MiniDump(_EXCEPTION_POINTERS* pep) {
+ char temp_path[MAX_PATH];
+ GetTempPathA(sizeof(temp_path), temp_path);
+ char temp_file[MAX_PATH];
+ sprintf(temp_file, "%s\\ninja_crash_dump_%lu.dmp",
+ temp_path, GetCurrentProcessId());
+
+ // Delete any previous minidump of the same name.
+ DeleteFileA(temp_file);
+
+ // Load DbgHelp.dll dynamically, as library is not present on all
+ // Windows versions.
+ HMODULE dbghelp = LoadLibraryA("dbghelp.dll");
+ if (dbghelp == NULL) {
+ Error("failed to create minidump: LoadLibrary('dbghelp.dll'): %s",
+ GetLastErrorString().c_str());
+ return;
+ }
+
+ MiniDumpWriteDumpFunc mini_dump_write_dump =
+ (MiniDumpWriteDumpFunc)GetProcAddress(dbghelp, "MiniDumpWriteDump");
+ if (mini_dump_write_dump == NULL) {
+ Error("failed to create minidump: GetProcAddress('MiniDumpWriteDump'): %s",
+ GetLastErrorString().c_str());
+ return;
+ }
+
+ HANDLE hFile = CreateFileA(temp_file, GENERIC_READ | GENERIC_WRITE, 0, NULL,
+ CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (hFile == NULL) {
+ Error("failed to create minidump: CreateFileA(%s): %s",
+ temp_file, GetLastErrorString().c_str());
+ return;
+ }
+
+ MINIDUMP_EXCEPTION_INFORMATION mdei;
+ mdei.ThreadId = GetCurrentThreadId();
+ mdei.ExceptionPointers = pep;
+ mdei.ClientPointers = FALSE;
+ MINIDUMP_TYPE mdt = (MINIDUMP_TYPE) (MiniDumpWithDataSegs |
+ MiniDumpWithHandleData);
+
+ BOOL rv = mini_dump_write_dump(GetCurrentProcess(), GetCurrentProcessId(),
+ hFile, mdt, (pep != 0) ? &mdei : 0, 0, 0);
+ CloseHandle(hFile);
+
+ if (!rv) {
+ Error("MiniDumpWriteDump failed: %s", GetLastErrorString().c_str());
+ return;
+ }
+
+ Warning("minidump created: %s", temp_file);
+}
+
+#endif // _MSC_VER
diff --git a/src/msvc_helper-win32.cc b/src/msvc_helper-win32.cc
new file mode 100644
index 0000000..1148ae5
--- /dev/null
+++ b/src/msvc_helper-win32.cc
@@ -0,0 +1,108 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "msvc_helper.h"
+
+#include <windows.h>
+
+#include "util.h"
+
+using namespace std;
+
+namespace {
+
+string Replace(const string& input, const string& find, const string& replace) {
+ string result = input;
+ size_t start_pos = 0;
+ while ((start_pos = result.find(find, start_pos)) != string::npos) {
+ result.replace(start_pos, find.length(), replace);
+ start_pos += replace.length();
+ }
+ return result;
+}
+
+} // anonymous namespace
+
+string EscapeForDepfile(const string& path) {
+ // Depfiles don't escape single \.
+ return Replace(path, " ", "\\ ");
+}
+
+int CLWrapper::Run(const string& command, string* output) {
+ SECURITY_ATTRIBUTES security_attributes = {};
+ security_attributes.nLength = sizeof(SECURITY_ATTRIBUTES);
+ security_attributes.bInheritHandle = TRUE;
+
+ // Must be inheritable so subprocesses can dup to children.
+ HANDLE nul =
+ CreateFileA("NUL", GENERIC_READ,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ &security_attributes, OPEN_EXISTING, 0, NULL);
+ if (nul == INVALID_HANDLE_VALUE)
+ Fatal("couldn't open nul");
+
+ HANDLE stdout_read, stdout_write;
+ if (!CreatePipe(&stdout_read, &stdout_write, &security_attributes, 0))
+ Win32Fatal("CreatePipe");
+
+ if (!SetHandleInformation(stdout_read, HANDLE_FLAG_INHERIT, 0))
+ Win32Fatal("SetHandleInformation");
+
+ PROCESS_INFORMATION process_info = {};
+ STARTUPINFOA startup_info = {};
+ startup_info.cb = sizeof(STARTUPINFOA);
+ startup_info.hStdInput = nul;
+ startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE);
+ startup_info.hStdOutput = stdout_write;
+ startup_info.dwFlags |= STARTF_USESTDHANDLES;
+
+ if (!CreateProcessA(NULL, (char*)command.c_str(), NULL, NULL,
+ /* inherit handles */ TRUE, 0,
+ env_block_, NULL,
+ &startup_info, &process_info)) {
+ Win32Fatal("CreateProcess");
+ }
+
+ if (!CloseHandle(nul) ||
+ !CloseHandle(stdout_write)) {
+ Win32Fatal("CloseHandle");
+ }
+
+ // Read all output of the subprocess.
+ DWORD read_len = 1;
+ while (read_len) {
+ char buf[64 << 10];
+ read_len = 0;
+ if (!::ReadFile(stdout_read, buf, sizeof(buf), &read_len, NULL) &&
+ GetLastError() != ERROR_BROKEN_PIPE) {
+ Win32Fatal("ReadFile");
+ }
+ output->append(buf, read_len);
+ }
+
+ // Wait for it to exit and grab its exit code.
+ if (WaitForSingleObject(process_info.hProcess, INFINITE) == WAIT_FAILED)
+ Win32Fatal("WaitForSingleObject");
+ DWORD exit_code = 0;
+ if (!GetExitCodeProcess(process_info.hProcess, &exit_code))
+ Win32Fatal("GetExitCodeProcess");
+
+ if (!CloseHandle(stdout_read) ||
+ !CloseHandle(process_info.hProcess) ||
+ !CloseHandle(process_info.hThread)) {
+ Win32Fatal("CloseHandle");
+ }
+
+ return exit_code;
+}
diff --git a/src/msvc_helper.h b/src/msvc_helper.h
new file mode 100644
index 0000000..568b9f9
--- /dev/null
+++ b/src/msvc_helper.h
@@ -0,0 +1,32 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <string>
+
+std::string EscapeForDepfile(const std::string& path);
+
+/// Wraps a synchronous execution of a CL subprocess.
+struct CLWrapper {
+ CLWrapper() : env_block_(NULL) {}
+
+ /// Set the environment block (as suitable for CreateProcess) to be used
+ /// by Run().
+ void SetEnvBlock(void* env_block) { env_block_ = env_block; }
+
+ /// Start a process and gather its raw output. Returns its exit code.
+ /// Crashes (calls Fatal()) on error.
+ int Run(const std::string& command, std::string* output);
+
+ void* env_block_;
+};
diff --git a/src/msvc_helper_main-win32.cc b/src/msvc_helper_main-win32.cc
new file mode 100644
index 0000000..7d59307
--- /dev/null
+++ b/src/msvc_helper_main-win32.cc
@@ -0,0 +1,150 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "msvc_helper.h"
+
+#include <fcntl.h>
+#include <io.h>
+#include <stdio.h>
+#include <windows.h>
+
+#include "clparser.h"
+#include "util.h"
+
+#include "getopt.h"
+
+using namespace std;
+
+namespace {
+
+void Usage() {
+ printf(
+"usage: ninja -t msvc [options] -- cl.exe /showIncludes /otherArgs\n"
+"options:\n"
+" -e ENVFILE load environment block from ENVFILE as environment\n"
+" -o FILE write output dependency information to FILE.d\n"
+" -p STRING localized prefix of msvc's /showIncludes output\n"
+ );
+}
+
+void PushPathIntoEnvironment(const string& env_block) {
+ const char* as_str = env_block.c_str();
+ while (as_str[0]) {
+ if (_strnicmp(as_str, "path=", 5) == 0) {
+ _putenv(as_str);
+ return;
+ } else {
+ as_str = &as_str[strlen(as_str) + 1];
+ }
+ }
+}
+
+void WriteDepFileOrDie(const char* object_path, const CLParser& parse) {
+ string depfile_path = string(object_path) + ".d";
+ FILE* depfile = fopen(depfile_path.c_str(), "w");
+ if (!depfile) {
+ unlink(object_path);
+ Fatal("opening %s: %s", depfile_path.c_str(),
+ GetLastErrorString().c_str());
+ }
+ if (fprintf(depfile, "%s: ", object_path) < 0) {
+ unlink(object_path);
+ fclose(depfile);
+ unlink(depfile_path.c_str());
+ Fatal("writing %s", depfile_path.c_str());
+ }
+ const set<string>& headers = parse.includes_;
+ for (set<string>::const_iterator i = headers.begin();
+ i != headers.end(); ++i) {
+ if (fprintf(depfile, "%s\n", EscapeForDepfile(*i).c_str()) < 0) {
+ unlink(object_path);
+ fclose(depfile);
+ unlink(depfile_path.c_str());
+ Fatal("writing %s", depfile_path.c_str());
+ }
+ }
+ fclose(depfile);
+}
+
+} // anonymous namespace
+
+int MSVCHelperMain(int argc, char** argv) {
+ const char* output_filename = NULL;
+ const char* envfile = NULL;
+
+ const option kLongOptions[] = {
+ { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 }
+ };
+ int opt;
+ string deps_prefix;
+ while ((opt = getopt_long(argc, argv, "e:o:p:h", kLongOptions, NULL)) != -1) {
+ switch (opt) {
+ case 'e':
+ envfile = optarg;
+ break;
+ case 'o':
+ output_filename = optarg;
+ break;
+ case 'p':
+ deps_prefix = optarg;
+ break;
+ case 'h':
+ default:
+ Usage();
+ return 0;
+ }
+ }
+
+ string env;
+ if (envfile) {
+ string err;
+ if (ReadFile(envfile, &env, &err) != 0)
+ Fatal("couldn't open %s: %s", envfile, err.c_str());
+ PushPathIntoEnvironment(env);
+ }
+
+ char* command = GetCommandLineA();
+ command = strstr(command, " -- ");
+ if (!command) {
+ Fatal("expected command line to end with \" -- command args\"");
+ }
+ command += 4;
+
+ CLWrapper cl;
+ if (!env.empty())
+ cl.SetEnvBlock((void*)env.data());
+ string output;
+ int exit_code = cl.Run(command, &output);
+
+ if (output_filename) {
+ CLParser parser;
+ string err;
+ if (!parser.Parse(output, deps_prefix, &output, &err))
+ Fatal("%s\n", err.c_str());
+ WriteDepFileOrDie(output_filename, parser);
+ }
+
+ if (output.empty())
+ return exit_code;
+
+ // CLWrapper's output already as \r\n line endings, make sure the C runtime
+ // doesn't expand this to \r\r\n.
+ _setmode(_fileno(stdout), _O_BINARY);
+ // Avoid printf and C strings, since the actual output might contain null
+ // bytes like UTF-16 does (yuck).
+ fwrite(&output[0], 1, output.size(), stdout);
+
+ return exit_code;
+}
diff --git a/src/msvc_helper_test.cc b/src/msvc_helper_test.cc
new file mode 100644
index 0000000..d9e2ee6
--- /dev/null
+++ b/src/msvc_helper_test.cc
@@ -0,0 +1,41 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "msvc_helper.h"
+
+#include "test.h"
+#include "util.h"
+
+using namespace std;
+
+TEST(EscapeForDepfileTest, SpacesInFilename) {
+ ASSERT_EQ("sub\\some\\ sdk\\foo.h",
+ EscapeForDepfile("sub\\some sdk\\foo.h"));
+}
+
+TEST(MSVCHelperTest, EnvBlock) {
+ char env_block[] = "foo=bar\0";
+ CLWrapper cl;
+ cl.SetEnvBlock(env_block);
+ string output;
+ cl.Run("cmd /c \"echo foo is %foo%", &output);
+ ASSERT_EQ("foo is bar\r\n", output);
+}
+
+TEST(MSVCHelperTest, NoReadOfStderr) {
+ CLWrapper cl;
+ string output;
+ cl.Run("cmd /c \"echo to stdout&& echo to stderr 1>&2", &output);
+ ASSERT_EQ("to stdout\r\n", output);
+}
diff --git a/src/ninja.cc b/src/ninja.cc
new file mode 100644
index 0000000..471a023
--- /dev/null
+++ b/src/ninja.cc
@@ -0,0 +1,1457 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <cstdlib>
+
+#ifdef _WIN32
+#include "getopt.h"
+#include <direct.h>
+#include <windows.h>
+#elif defined(_AIX)
+#include "getopt.h"
+#include <unistd.h>
+#else
+#include <getopt.h>
+#include <unistd.h>
+#endif
+
+#include "browse.h"
+#include "build.h"
+#include "build_log.h"
+#include "deps_log.h"
+#include "clean.h"
+#include "debug_flags.h"
+#include "disk_interface.h"
+#include "graph.h"
+#include "graphviz.h"
+#include "manifest_parser.h"
+#include "metrics.h"
+#include "state.h"
+#include "util.h"
+#include "version.h"
+
+using namespace std;
+
+#ifdef _MSC_VER
+// Defined in msvc_helper_main-win32.cc.
+int MSVCHelperMain(int argc, char** argv);
+
+// Defined in minidump-win32.cc.
+void CreateWin32MiniDump(_EXCEPTION_POINTERS* pep);
+#endif
+
+namespace {
+
+struct Tool;
+
+/// Command-line options.
+struct Options {
+ /// Build file to load.
+ const char* input_file;
+
+ /// Directory to change into before running.
+ const char* working_dir;
+
+ /// Tool to run rather than building.
+ const Tool* tool;
+
+ /// Whether duplicate rules for one target should warn or print an error.
+ bool dupe_edges_should_err;
+
+ /// Whether phony cycles should warn or print an error.
+ bool phony_cycle_should_err;
+};
+
+/// The Ninja main() loads up a series of data structures; various tools need
+/// to poke into these, so store them as fields on an object.
+struct NinjaMain : public BuildLogUser {
+ NinjaMain(const char* ninja_command, const BuildConfig& config) :
+ ninja_command_(ninja_command), config_(config) {}
+
+ /// Command line used to run Ninja.
+ const char* ninja_command_;
+
+ /// Build configuration set from flags (e.g. parallelism).
+ const BuildConfig& config_;
+
+ /// Loaded state (rules, nodes).
+ State state_;
+
+ /// Functions for accessing the disk.
+ RealDiskInterface disk_interface_;
+
+ /// The build directory, used for storing the build log etc.
+ string build_dir_;
+
+ BuildLog build_log_;
+ DepsLog deps_log_;
+
+ /// The type of functions that are the entry points to tools (subcommands).
+ typedef int (NinjaMain::*ToolFunc)(const Options*, int, char**);
+
+ /// Get the Node for a given command-line path, handling features like
+ /// spell correction.
+ Node* CollectTarget(const char* cpath, string* err);
+
+ /// CollectTarget for all command-line arguments, filling in \a targets.
+ bool CollectTargetsFromArgs(int argc, char* argv[],
+ vector<Node*>* targets, string* err);
+
+ // The various subcommands, run via "-t XXX".
+ int ToolGraph(const Options* options, int argc, char* argv[]);
+ int ToolQuery(const Options* options, int argc, char* argv[]);
+ int ToolDeps(const Options* options, int argc, char* argv[]);
+ int ToolBrowse(const Options* options, int argc, char* argv[]);
+ int ToolMSVC(const Options* options, int argc, char* argv[]);
+ int ToolTargets(const Options* options, int argc, char* argv[]);
+ int ToolCommands(const Options* options, int argc, char* argv[]);
+ int ToolClean(const Options* options, int argc, char* argv[]);
+ int ToolCleanDead(const Options* options, int argc, char* argv[]);
+ int ToolCompilationDatabase(const Options* options, int argc, char* argv[]);
+ int ToolRecompact(const Options* options, int argc, char* argv[]);
+ int ToolRestat(const Options* options, int argc, char* argv[]);
+ int ToolUrtle(const Options* options, int argc, char** argv);
+ int ToolRules(const Options* options, int argc, char* argv[]);
+
+ /// Open the build log.
+ /// @return LOAD_ERROR on error.
+ bool OpenBuildLog(bool recompact_only = false);
+
+ /// Open the deps log: load it, then open for writing.
+ /// @return LOAD_ERROR on error.
+ bool OpenDepsLog(bool recompact_only = false);
+
+ /// Ensure the build directory exists, creating it if necessary.
+ /// @return false on error.
+ bool EnsureBuildDirExists();
+
+ /// Rebuild the manifest, if necessary.
+ /// Fills in \a err on error.
+ /// @return true if the manifest was rebuilt.
+ bool RebuildManifest(const char* input_file, string* err);
+
+ /// Build the targets listed on the command line.
+ /// @return an exit code.
+ int RunBuild(int argc, char** argv);
+
+ /// Dump the output requested by '-d stats'.
+ void DumpMetrics();
+
+ virtual bool IsPathDead(StringPiece s) const {
+ Node* n = state_.LookupNode(s);
+ if (n && n->in_edge())
+ return false;
+ // Just checking n isn't enough: If an old output is both in the build log
+ // and in the deps log, it will have a Node object in state_. (It will also
+ // have an in edge if one of its inputs is another output that's in the deps
+ // log, but having a deps edge product an output that's input to another deps
+ // edge is rare, and the first recompaction will delete all old outputs from
+ // the deps log, and then a second recompaction will clear the build log,
+ // which seems good enough for this corner case.)
+ // Do keep entries around for files which still exist on disk, for
+ // generators that want to use this information.
+ string err;
+ TimeStamp mtime = disk_interface_.Stat(s.AsString(), &err);
+ if (mtime == -1)
+ Error("%s", err.c_str()); // Log and ignore Stat() errors.
+ return mtime == 0;
+ }
+};
+
+/// Subtools, accessible via "-t foo".
+struct Tool {
+ /// Short name of the tool.
+ const char* name;
+
+ /// Description (shown in "-t list").
+ const char* desc;
+
+ /// When to run the tool.
+ enum {
+ /// Run after parsing the command-line flags and potentially changing
+ /// the current working directory (as early as possible).
+ RUN_AFTER_FLAGS,
+
+ /// Run after loading build.ninja.
+ RUN_AFTER_LOAD,
+
+ /// Run after loading the build/deps logs.
+ RUN_AFTER_LOGS,
+ } when;
+
+ /// Implementation of the tool.
+ NinjaMain::ToolFunc func;
+};
+
+/// Print usage information.
+void Usage(const BuildConfig& config) {
+ fprintf(stderr,
+"usage: ninja [options] [targets...]\n"
+"\n"
+"if targets are unspecified, builds the 'default' target (see manual).\n"
+"\n"
+"options:\n"
+" --version print ninja version (\"%s\")\n"
+" -v, --verbose show all command lines while building\n"
+"\n"
+" -C DIR change to DIR before doing anything else\n"
+" -f FILE specify input build file [default=build.ninja]\n"
+"\n"
+" -j N run N jobs in parallel (0 means infinity) [default=%d on this system]\n"
+" -k N keep going until N jobs fail (0 means infinity) [default=1]\n"
+" -l N do not start new jobs if the load average is greater than N\n"
+" -n dry run (don't run commands but act like they succeeded)\n"
+"\n"
+" -d MODE enable debugging (use '-d list' to list modes)\n"
+" -t TOOL run a subtool (use '-t list' to list subtools)\n"
+" terminates toplevel options; further flags are passed to the tool\n"
+" -w FLAG adjust warnings (use '-w list' to list warnings)\n",
+ kNinjaVersion, config.parallelism);
+}
+
+/// Choose a default value for the -j (parallelism) flag.
+int GuessParallelism() {
+ switch (int processors = GetProcessorCount()) {
+ case 0:
+ case 1:
+ return 2;
+ case 2:
+ return 3;
+ default:
+ return processors + 2;
+ }
+}
+
+/// Rebuild the build manifest, if necessary.
+/// Returns true if the manifest was rebuilt.
+bool NinjaMain::RebuildManifest(const char* input_file, string* err) {
+ string path = input_file;
+ uint64_t slash_bits; // Unused because this path is only used for lookup.
+ if (!CanonicalizePath(&path, &slash_bits, err))
+ return false;
+ Node* node = state_.LookupNode(path);
+ if (!node)
+ return false;
+
+ Builder builder(&state_, config_, &build_log_, &deps_log_, &disk_interface_);
+ if (!builder.AddTarget(node, err))
+ return false;
+
+ if (builder.AlreadyUpToDate())
+ return false; // Not an error, but we didn't rebuild.
+
+ if (!builder.Build(err))
+ return false;
+
+ // The manifest was only rebuilt if it is now dirty (it may have been cleaned
+ // by a restat).
+ if (!node->dirty()) {
+ // Reset the state to prevent problems like
+ // https://github.com/ninja-build/ninja/issues/874
+ state_.Reset();
+ return false;
+ }
+
+ return true;
+}
+
+Node* NinjaMain::CollectTarget(const char* cpath, string* err) {
+ string path = cpath;
+ uint64_t slash_bits;
+ if (!CanonicalizePath(&path, &slash_bits, err))
+ return NULL;
+
+ // Special syntax: "foo.cc^" means "the first output of foo.cc".
+ bool first_dependent = false;
+ if (!path.empty() && path[path.size() - 1] == '^') {
+ path.resize(path.size() - 1);
+ first_dependent = true;
+ }
+
+ Node* node = state_.LookupNode(path);
+ if (node) {
+ if (first_dependent) {
+ if (node->out_edges().empty()) {
+ *err = "'" + path + "' has no out edge";
+ return NULL;
+ }
+ Edge* edge = node->out_edges()[0];
+ if (edge->outputs_.empty()) {
+ edge->Dump();
+ Fatal("edge has no outputs");
+ }
+ node = edge->outputs_[0];
+ }
+ return node;
+ } else {
+ *err =
+ "unknown target '" + Node::PathDecanonicalized(path, slash_bits) + "'";
+ if (path == "clean") {
+ *err += ", did you mean 'ninja -t clean'?";
+ } else if (path == "help") {
+ *err += ", did you mean 'ninja -h'?";
+ } else {
+ Node* suggestion = state_.SpellcheckNode(path);
+ if (suggestion) {
+ *err += ", did you mean '" + suggestion->path() + "'?";
+ }
+ }
+ return NULL;
+ }
+}
+
+bool NinjaMain::CollectTargetsFromArgs(int argc, char* argv[],
+ vector<Node*>* targets, string* err) {
+ if (argc == 0) {
+ *targets = state_.DefaultNodes(err);
+ return err->empty();
+ }
+
+ for (int i = 0; i < argc; ++i) {
+ Node* node = CollectTarget(argv[i], err);
+ if (node == NULL)
+ return false;
+ targets->push_back(node);
+ }
+ return true;
+}
+
+int NinjaMain::ToolGraph(const Options* options, int argc, char* argv[]) {
+ vector<Node*> nodes;
+ string err;
+ if (!CollectTargetsFromArgs(argc, argv, &nodes, &err)) {
+ Error("%s", err.c_str());
+ return 1;
+ }
+
+ GraphViz graph(&state_, &disk_interface_);
+ graph.Start();
+ for (vector<Node*>::const_iterator n = nodes.begin(); n != nodes.end(); ++n)
+ graph.AddTarget(*n);
+ graph.Finish();
+
+ return 0;
+}
+
+int NinjaMain::ToolQuery(const Options* options, int argc, char* argv[]) {
+ if (argc == 0) {
+ Error("expected a target to query");
+ return 1;
+ }
+
+ DyndepLoader dyndep_loader(&state_, &disk_interface_);
+
+ for (int i = 0; i < argc; ++i) {
+ string err;
+ Node* node = CollectTarget(argv[i], &err);
+ if (!node) {
+ Error("%s", err.c_str());
+ return 1;
+ }
+
+ printf("%s:\n", node->path().c_str());
+ if (Edge* edge = node->in_edge()) {
+ if (edge->dyndep_ && edge->dyndep_->dyndep_pending()) {
+ if (!dyndep_loader.LoadDyndeps(edge->dyndep_, &err)) {
+ Warning("%s\n", err.c_str());
+ }
+ }
+ printf(" input: %s\n", edge->rule_->name().c_str());
+ for (int in = 0; in < (int)edge->inputs_.size(); in++) {
+ const char* label = "";
+ if (edge->is_implicit(in))
+ label = "| ";
+ else if (edge->is_order_only(in))
+ label = "|| ";
+ printf(" %s%s\n", label, edge->inputs_[in]->path().c_str());
+ }
+ }
+ printf(" outputs:\n");
+ for (vector<Edge*>::const_iterator edge = node->out_edges().begin();
+ edge != node->out_edges().end(); ++edge) {
+ for (vector<Node*>::iterator out = (*edge)->outputs_.begin();
+ out != (*edge)->outputs_.end(); ++out) {
+ printf(" %s\n", (*out)->path().c_str());
+ }
+ }
+ }
+ return 0;
+}
+
+#if defined(NINJA_HAVE_BROWSE)
+int NinjaMain::ToolBrowse(const Options* options, int argc, char* argv[]) {
+ RunBrowsePython(&state_, ninja_command_, options->input_file, argc, argv);
+ // If we get here, the browse failed.
+ return 1;
+}
+#else
+int NinjaMain::ToolBrowse(const Options*, int, char**) {
+ Fatal("browse tool not supported on this platform");
+ return 1;
+}
+#endif
+
+#if defined(_MSC_VER)
+int NinjaMain::ToolMSVC(const Options* options, int argc, char* argv[]) {
+ // Reset getopt: push one argument onto the front of argv, reset optind.
+ argc++;
+ argv--;
+ optind = 0;
+ return MSVCHelperMain(argc, argv);
+}
+#endif
+
+int ToolTargetsList(const vector<Node*>& nodes, int depth, int indent) {
+ for (vector<Node*>::const_iterator n = nodes.begin();
+ n != nodes.end();
+ ++n) {
+ for (int i = 0; i < indent; ++i)
+ printf(" ");
+ const char* target = (*n)->path().c_str();
+ if ((*n)->in_edge()) {
+ printf("%s: %s\n", target, (*n)->in_edge()->rule_->name().c_str());
+ if (depth > 1 || depth <= 0)
+ ToolTargetsList((*n)->in_edge()->inputs_, depth - 1, indent + 1);
+ } else {
+ printf("%s\n", target);
+ }
+ }
+ return 0;
+}
+
+int ToolTargetsSourceList(State* state) {
+ for (vector<Edge*>::iterator e = state->edges_.begin();
+ e != state->edges_.end(); ++e) {
+ for (vector<Node*>::iterator inps = (*e)->inputs_.begin();
+ inps != (*e)->inputs_.end(); ++inps) {
+ if (!(*inps)->in_edge())
+ printf("%s\n", (*inps)->path().c_str());
+ }
+ }
+ return 0;
+}
+
+int ToolTargetsList(State* state, const string& rule_name) {
+ set<string> rules;
+
+ // Gather the outputs.
+ for (vector<Edge*>::iterator e = state->edges_.begin();
+ e != state->edges_.end(); ++e) {
+ if ((*e)->rule_->name() == rule_name) {
+ for (vector<Node*>::iterator out_node = (*e)->outputs_.begin();
+ out_node != (*e)->outputs_.end(); ++out_node) {
+ rules.insert((*out_node)->path());
+ }
+ }
+ }
+
+ // Print them.
+ for (set<string>::const_iterator i = rules.begin();
+ i != rules.end(); ++i) {
+ printf("%s\n", (*i).c_str());
+ }
+
+ return 0;
+}
+
+int ToolTargetsList(State* state) {
+ for (vector<Edge*>::iterator e = state->edges_.begin();
+ e != state->edges_.end(); ++e) {
+ for (vector<Node*>::iterator out_node = (*e)->outputs_.begin();
+ out_node != (*e)->outputs_.end(); ++out_node) {
+ printf("%s: %s\n",
+ (*out_node)->path().c_str(),
+ (*e)->rule_->name().c_str());
+ }
+ }
+ return 0;
+}
+
+int NinjaMain::ToolDeps(const Options* options, int argc, char** argv) {
+ vector<Node*> nodes;
+ if (argc == 0) {
+ for (vector<Node*>::const_iterator ni = deps_log_.nodes().begin();
+ ni != deps_log_.nodes().end(); ++ni) {
+ if (deps_log_.IsDepsEntryLiveFor(*ni))
+ nodes.push_back(*ni);
+ }
+ } else {
+ string err;
+ if (!CollectTargetsFromArgs(argc, argv, &nodes, &err)) {
+ Error("%s", err.c_str());
+ return 1;
+ }
+ }
+
+ RealDiskInterface disk_interface;
+ for (vector<Node*>::iterator it = nodes.begin(), end = nodes.end();
+ it != end; ++it) {
+ DepsLog::Deps* deps = deps_log_.GetDeps(*it);
+ if (!deps) {
+ printf("%s: deps not found\n", (*it)->path().c_str());
+ continue;
+ }
+
+ string err;
+ TimeStamp mtime = disk_interface.Stat((*it)->path(), &err);
+ if (mtime == -1)
+ Error("%s", err.c_str()); // Log and ignore Stat() errors;
+ printf("%s: #deps %d, deps mtime %" PRId64 " (%s)\n",
+ (*it)->path().c_str(), deps->node_count, deps->mtime,
+ (!mtime || mtime > deps->mtime ? "STALE":"VALID"));
+ for (int i = 0; i < deps->node_count; ++i)
+ printf(" %s\n", deps->nodes[i]->path().c_str());
+ printf("\n");
+ }
+
+ return 0;
+}
+
+int NinjaMain::ToolTargets(const Options* options, int argc, char* argv[]) {
+ int depth = 1;
+ if (argc >= 1) {
+ string mode = argv[0];
+ if (mode == "rule") {
+ string rule;
+ if (argc > 1)
+ rule = argv[1];
+ if (rule.empty())
+ return ToolTargetsSourceList(&state_);
+ else
+ return ToolTargetsList(&state_, rule);
+ } else if (mode == "depth") {
+ if (argc > 1)
+ depth = atoi(argv[1]);
+ } else if (mode == "all") {
+ return ToolTargetsList(&state_);
+ } else {
+ const char* suggestion =
+ SpellcheckString(mode.c_str(), "rule", "depth", "all", NULL);
+ if (suggestion) {
+ Error("unknown target tool mode '%s', did you mean '%s'?",
+ mode.c_str(), suggestion);
+ } else {
+ Error("unknown target tool mode '%s'", mode.c_str());
+ }
+ return 1;
+ }
+ }
+
+ string err;
+ vector<Node*> root_nodes = state_.RootNodes(&err);
+ if (err.empty()) {
+ return ToolTargetsList(root_nodes, depth, 0);
+ } else {
+ Error("%s", err.c_str());
+ return 1;
+ }
+}
+
+int NinjaMain::ToolRules(const Options* options, int argc, char* argv[]) {
+ // Parse options.
+
+ // The rules tool uses getopt, and expects argv[0] to contain the name of
+ // the tool, i.e. "rules".
+ argc++;
+ argv--;
+
+ bool print_description = false;
+
+ optind = 1;
+ int opt;
+ while ((opt = getopt(argc, argv, const_cast<char*>("hd"))) != -1) {
+ switch (opt) {
+ case 'd':
+ print_description = true;
+ break;
+ case 'h':
+ default:
+ printf("usage: ninja -t rules [options]\n"
+ "\n"
+ "options:\n"
+ " -d also print the description of the rule\n"
+ " -h print this message\n"
+ );
+ return 1;
+ }
+ }
+ argv += optind;
+ argc -= optind;
+
+ // Print rules
+
+ typedef map<string, const Rule*> Rules;
+ const Rules& rules = state_.bindings_.GetRules();
+ for (Rules::const_iterator i = rules.begin(); i != rules.end(); ++i) {
+ printf("%s", i->first.c_str());
+ if (print_description) {
+ const Rule* rule = i->second;
+ const EvalString* description = rule->GetBinding("description");
+ if (description != NULL) {
+ printf(": %s", description->Unparse().c_str());
+ }
+ }
+ printf("\n");
+ }
+ return 0;
+}
+
+enum PrintCommandMode { PCM_Single, PCM_All };
+void PrintCommands(Edge* edge, set<Edge*>* seen, PrintCommandMode mode) {
+ if (!edge)
+ return;
+ if (!seen->insert(edge).second)
+ return;
+
+ if (mode == PCM_All) {
+ for (vector<Node*>::iterator in = edge->inputs_.begin();
+ in != edge->inputs_.end(); ++in)
+ PrintCommands((*in)->in_edge(), seen, mode);
+ }
+
+ if (!edge->is_phony())
+ puts(edge->EvaluateCommand().c_str());
+}
+
+int NinjaMain::ToolCommands(const Options* options, int argc, char* argv[]) {
+ // The clean tool uses getopt, and expects argv[0] to contain the name of
+ // the tool, i.e. "commands".
+ ++argc;
+ --argv;
+
+ PrintCommandMode mode = PCM_All;
+
+ optind = 1;
+ int opt;
+ while ((opt = getopt(argc, argv, const_cast<char*>("hs"))) != -1) {
+ switch (opt) {
+ case 's':
+ mode = PCM_Single;
+ break;
+ case 'h':
+ default:
+ printf("usage: ninja -t commands [options] [targets]\n"
+"\n"
+"options:\n"
+" -s only print the final command to build [target], not the whole chain\n"
+ );
+ return 1;
+ }
+ }
+ argv += optind;
+ argc -= optind;
+
+ vector<Node*> nodes;
+ string err;
+ if (!CollectTargetsFromArgs(argc, argv, &nodes, &err)) {
+ Error("%s", err.c_str());
+ return 1;
+ }
+
+ set<Edge*> seen;
+ for (vector<Node*>::iterator in = nodes.begin(); in != nodes.end(); ++in)
+ PrintCommands((*in)->in_edge(), &seen, mode);
+
+ return 0;
+}
+
+int NinjaMain::ToolClean(const Options* options, int argc, char* argv[]) {
+ // The clean tool uses getopt, and expects argv[0] to contain the name of
+ // the tool, i.e. "clean".
+ argc++;
+ argv--;
+
+ bool generator = false;
+ bool clean_rules = false;
+
+ optind = 1;
+ int opt;
+ while ((opt = getopt(argc, argv, const_cast<char*>("hgr"))) != -1) {
+ switch (opt) {
+ case 'g':
+ generator = true;
+ break;
+ case 'r':
+ clean_rules = true;
+ break;
+ case 'h':
+ default:
+ printf("usage: ninja -t clean [options] [targets]\n"
+"\n"
+"options:\n"
+" -g also clean files marked as ninja generator output\n"
+" -r interpret targets as a list of rules to clean instead\n"
+ );
+ return 1;
+ }
+ }
+ argv += optind;
+ argc -= optind;
+
+ if (clean_rules && argc == 0) {
+ Error("expected a rule to clean");
+ return 1;
+ }
+
+ Cleaner cleaner(&state_, config_, &disk_interface_);
+ if (argc >= 1) {
+ if (clean_rules)
+ return cleaner.CleanRules(argc, argv);
+ else
+ return cleaner.CleanTargets(argc, argv);
+ } else {
+ return cleaner.CleanAll(generator);
+ }
+}
+
+int NinjaMain::ToolCleanDead(const Options* options, int argc, char* argv[]) {
+ Cleaner cleaner(&state_, config_, &disk_interface_);
+ return cleaner.CleanDead(build_log_.entries());
+}
+
+void EncodeJSONString(const char *str) {
+ while (*str) {
+ if (*str == '"' || *str == '\\')
+ putchar('\\');
+ putchar(*str);
+ str++;
+ }
+}
+
+enum EvaluateCommandMode {
+ ECM_NORMAL,
+ ECM_EXPAND_RSPFILE
+};
+std::string EvaluateCommandWithRspfile(const Edge* edge,
+ const EvaluateCommandMode mode) {
+ string command = edge->EvaluateCommand();
+ if (mode == ECM_NORMAL)
+ return command;
+
+ string rspfile = edge->GetUnescapedRspfile();
+ if (rspfile.empty())
+ return command;
+
+ size_t index = command.find(rspfile);
+ if (index == 0 || index == string::npos || command[index - 1] != '@')
+ return command;
+
+ string rspfile_content = edge->GetBinding("rspfile_content");
+ size_t newline_index = 0;
+ while ((newline_index = rspfile_content.find('\n', newline_index)) !=
+ string::npos) {
+ rspfile_content.replace(newline_index, 1, 1, ' ');
+ ++newline_index;
+ }
+ command.replace(index - 1, rspfile.length() + 1, rspfile_content);
+ return command;
+}
+
+void printCompdb(const char* const directory, const Edge* const edge,
+ const EvaluateCommandMode eval_mode) {
+ printf("\n {\n \"directory\": \"");
+ EncodeJSONString(directory);
+ printf("\",\n \"command\": \"");
+ EncodeJSONString(EvaluateCommandWithRspfile(edge, eval_mode).c_str());
+ printf("\",\n \"file\": \"");
+ EncodeJSONString(edge->inputs_[0]->path().c_str());
+ printf("\",\n \"output\": \"");
+ EncodeJSONString(edge->outputs_[0]->path().c_str());
+ printf("\"\n }");
+}
+
+int NinjaMain::ToolCompilationDatabase(const Options* options, int argc,
+ char* argv[]) {
+ // The compdb tool uses getopt, and expects argv[0] to contain the name of
+ // the tool, i.e. "compdb".
+ argc++;
+ argv--;
+
+ EvaluateCommandMode eval_mode = ECM_NORMAL;
+
+ optind = 1;
+ int opt;
+ while ((opt = getopt(argc, argv, const_cast<char*>("hx"))) != -1) {
+ switch(opt) {
+ case 'x':
+ eval_mode = ECM_EXPAND_RSPFILE;
+ break;
+
+ case 'h':
+ default:
+ printf(
+ "usage: ninja -t compdb [options] [rules]\n"
+ "\n"
+ "options:\n"
+ " -x expand @rspfile style response file invocations\n"
+ );
+ return 1;
+ }
+ }
+ argv += optind;
+ argc -= optind;
+
+ bool first = true;
+ vector<char> cwd;
+ char* success = NULL;
+
+ do {
+ cwd.resize(cwd.size() + 1024);
+ errno = 0;
+ success = getcwd(&cwd[0], cwd.size());
+ } while (!success && errno == ERANGE);
+ if (!success) {
+ Error("cannot determine working directory: %s", strerror(errno));
+ return 1;
+ }
+
+ putchar('[');
+ for (vector<Edge*>::iterator e = state_.edges_.begin();
+ e != state_.edges_.end(); ++e) {
+ if ((*e)->inputs_.empty())
+ continue;
+ if (argc == 0) {
+ if (!first) {
+ putchar(',');
+ }
+ printCompdb(&cwd[0], *e, eval_mode);
+ first = false;
+ } else {
+ for (int i = 0; i != argc; ++i) {
+ if ((*e)->rule_->name() == argv[i]) {
+ if (!first) {
+ putchar(',');
+ }
+ printCompdb(&cwd[0], *e, eval_mode);
+ first = false;
+ }
+ }
+ }
+ }
+
+ puts("\n]");
+ return 0;
+}
+
+int NinjaMain::ToolRecompact(const Options* options, int argc, char* argv[]) {
+ if (!EnsureBuildDirExists())
+ return 1;
+
+ if (OpenBuildLog(/*recompact_only=*/true) == LOAD_ERROR ||
+ OpenDepsLog(/*recompact_only=*/true) == LOAD_ERROR)
+ return 1;
+
+ return 0;
+}
+
+int NinjaMain::ToolRestat(const Options* options, int argc, char* argv[]) {
+ // The restat tool uses getopt, and expects argv[0] to contain the name of the
+ // tool, i.e. "restat"
+ argc++;
+ argv--;
+
+ optind = 1;
+ int opt;
+ while ((opt = getopt(argc, argv, const_cast<char*>("h"))) != -1) {
+ switch (opt) {
+ case 'h':
+ default:
+ printf("usage: ninja -t restat [outputs]\n");
+ return 1;
+ }
+ }
+ argv += optind;
+ argc -= optind;
+
+ if (!EnsureBuildDirExists())
+ return 1;
+
+ string log_path = ".ninja_log";
+ if (!build_dir_.empty())
+ log_path = build_dir_ + "/" + log_path;
+
+ string err;
+ const LoadStatus status = build_log_.Load(log_path, &err);
+ if (status == LOAD_ERROR) {
+ Error("loading build log %s: %s", log_path.c_str(), err.c_str());
+ return EXIT_FAILURE;
+ }
+ if (status == LOAD_NOT_FOUND) {
+ // Nothing to restat, ignore this
+ return EXIT_SUCCESS;
+ }
+ if (!err.empty()) {
+ // Hack: Load() can return a warning via err by returning LOAD_SUCCESS.
+ Warning("%s", err.c_str());
+ err.clear();
+ }
+
+ bool success = build_log_.Restat(log_path, disk_interface_, argc, argv, &err);
+ if (!success) {
+ Error("failed recompaction: %s", err.c_str());
+ return EXIT_FAILURE;
+ }
+
+ if (!config_.dry_run) {
+ if (!build_log_.OpenForWrite(log_path, *this, &err)) {
+ Error("opening build log: %s", err.c_str());
+ return EXIT_FAILURE;
+ }
+ }
+
+ return EXIT_SUCCESS;
+}
+
+int NinjaMain::ToolUrtle(const Options* options, int argc, char** argv) {
+ // RLE encoded.
+ const char* urtle =
+" 13 ,3;2!2;\n8 ,;<11!;\n5 `'<10!(2`'2!\n11 ,6;, `\\. `\\9 .,c13$ec,.\n6 "
+",2;11!>; `. ,;!2> .e8$2\".2 \"?7$e.\n <:<8!'` 2.3,.2` ,3!' ;,(?7\";2!2'<"
+"; `?6$PF ,;,\n2 `'4!8;<!3'`2 3! ;,`'2`2'3!;4!`2.`!;2 3,2 .<!2'`).\n5 3`5"
+"'2`9 `!2 `4!><3;5! J2$b,`!>;2!:2!`,d?b`!>\n26 `'-;,(<9!> $F3 )3.:!.2 d\""
+"2 ) !>\n30 7`2'<3!- \"=-='5 .2 `2-=\",!>\n25 .ze9$er2 .,cd16$bc.'\n22 .e"
+"14$,26$.\n21 z45$c .\n20 J50$c\n20 14$P\"`?34$b\n20 14$ dbc `2\"?22$?7$c"
+"\n20 ?18$c.6 4\"8?4\" c8$P\n9 .2,.8 \"20$c.3 ._14 J9$\n .2,2c9$bec,.2 `?"
+"21$c.3`4%,3%,3 c8$P\"\n22$c2 2\"?21$bc2,.2` .2,c7$P2\",cb\n23$b bc,.2\"2"
+"?14$2F2\"5?2\",J5$P\" ,zd3$\n24$ ?$3?%3 `2\"2?12$bcucd3$P3\"2 2=7$\n23$P"
+"\" ,3;<5!>2;,. `4\"6?2\"2 ,9;, `\"?2$\n";
+ int count = 0;
+ for (const char* p = urtle; *p; p++) {
+ if ('0' <= *p && *p <= '9') {
+ count = count*10 + *p - '0';
+ } else {
+ for (int i = 0; i < max(count, 1); ++i)
+ printf("%c", *p);
+ count = 0;
+ }
+ }
+ return 0;
+}
+
+/// Find the function to execute for \a tool_name and return it via \a func.
+/// Returns a Tool, or NULL if Ninja should exit.
+const Tool* ChooseTool(const string& tool_name) {
+ static const Tool kTools[] = {
+ { "browse", "browse dependency graph in a web browser",
+ Tool::RUN_AFTER_LOAD, &NinjaMain::ToolBrowse },
+#if defined(_MSC_VER)
+ { "msvc", "build helper for MSVC cl.exe (EXPERIMENTAL)",
+ Tool::RUN_AFTER_FLAGS, &NinjaMain::ToolMSVC },
+#endif
+ { "clean", "clean built files",
+ Tool::RUN_AFTER_LOAD, &NinjaMain::ToolClean },
+ { "commands", "list all commands required to rebuild given targets",
+ Tool::RUN_AFTER_LOAD, &NinjaMain::ToolCommands },
+ { "deps", "show dependencies stored in the deps log",
+ Tool::RUN_AFTER_LOGS, &NinjaMain::ToolDeps },
+ { "graph", "output graphviz dot file for targets",
+ Tool::RUN_AFTER_LOAD, &NinjaMain::ToolGraph },
+ { "query", "show inputs/outputs for a path",
+ Tool::RUN_AFTER_LOGS, &NinjaMain::ToolQuery },
+ { "targets", "list targets by their rule or depth in the DAG",
+ Tool::RUN_AFTER_LOAD, &NinjaMain::ToolTargets },
+ { "compdb", "dump JSON compilation database to stdout",
+ Tool::RUN_AFTER_LOAD, &NinjaMain::ToolCompilationDatabase },
+ { "recompact", "recompacts ninja-internal data structures",
+ Tool::RUN_AFTER_LOAD, &NinjaMain::ToolRecompact },
+ { "restat", "restats all outputs in the build log",
+ Tool::RUN_AFTER_FLAGS, &NinjaMain::ToolRestat },
+ { "rules", "list all rules",
+ Tool::RUN_AFTER_LOAD, &NinjaMain::ToolRules },
+ { "cleandead", "clean built files that are no longer produced by the manifest",
+ Tool::RUN_AFTER_LOGS, &NinjaMain::ToolCleanDead },
+ { "urtle", NULL,
+ Tool::RUN_AFTER_FLAGS, &NinjaMain::ToolUrtle },
+ { NULL, NULL, Tool::RUN_AFTER_FLAGS, NULL }
+ };
+
+ if (tool_name == "list") {
+ printf("ninja subtools:\n");
+ for (const Tool* tool = &kTools[0]; tool->name; ++tool) {
+ if (tool->desc)
+ printf("%10s %s\n", tool->name, tool->desc);
+ }
+ return NULL;
+ }
+
+ for (const Tool* tool = &kTools[0]; tool->name; ++tool) {
+ if (tool->name == tool_name)
+ return tool;
+ }
+
+ vector<const char*> words;
+ for (const Tool* tool = &kTools[0]; tool->name; ++tool)
+ words.push_back(tool->name);
+ const char* suggestion = SpellcheckStringV(tool_name, words);
+ if (suggestion) {
+ Fatal("unknown tool '%s', did you mean '%s'?",
+ tool_name.c_str(), suggestion);
+ } else {
+ Fatal("unknown tool '%s'", tool_name.c_str());
+ }
+ return NULL; // Not reached.
+}
+
+/// Enable a debugging mode. Returns false if Ninja should exit instead
+/// of continuing.
+bool DebugEnable(const string& name) {
+ if (name == "list") {
+ printf("debugging modes:\n"
+" stats print operation counts/timing info\n"
+" explain explain what caused a command to execute\n"
+" keepdepfile don't delete depfiles after they're read by ninja\n"
+" keeprsp don't delete @response files on success\n"
+#ifdef _WIN32
+" nostatcache don't batch stat() calls per directory and cache them\n"
+#endif
+"multiple modes can be enabled via -d FOO -d BAR\n");
+ return false;
+ } else if (name == "stats") {
+ g_metrics = new Metrics;
+ return true;
+ } else if (name == "explain") {
+ g_explaining = true;
+ return true;
+ } else if (name == "keepdepfile") {
+ g_keep_depfile = true;
+ return true;
+ } else if (name == "keeprsp") {
+ g_keep_rsp = true;
+ return true;
+ } else if (name == "nostatcache") {
+ g_experimental_statcache = false;
+ return true;
+ } else {
+ const char* suggestion =
+ SpellcheckString(name.c_str(),
+ "stats", "explain", "keepdepfile", "keeprsp",
+ "nostatcache", NULL);
+ if (suggestion) {
+ Error("unknown debug setting '%s', did you mean '%s'?",
+ name.c_str(), suggestion);
+ } else {
+ Error("unknown debug setting '%s'", name.c_str());
+ }
+ return false;
+ }
+}
+
+/// Set a warning flag. Returns false if Ninja should exit instead of
+/// continuing.
+bool WarningEnable(const string& name, Options* options) {
+ if (name == "list") {
+ printf("warning flags:\n"
+" dupbuild={err,warn} multiple build lines for one target\n"
+" phonycycle={err,warn} phony build statement references itself\n"
+ );
+ return false;
+ } else if (name == "dupbuild=err") {
+ options->dupe_edges_should_err = true;
+ return true;
+ } else if (name == "dupbuild=warn") {
+ options->dupe_edges_should_err = false;
+ return true;
+ } else if (name == "phonycycle=err") {
+ options->phony_cycle_should_err = true;
+ return true;
+ } else if (name == "phonycycle=warn") {
+ options->phony_cycle_should_err = false;
+ return true;
+ } else if (name == "depfilemulti=err" ||
+ name == "depfilemulti=warn") {
+ Warning("deprecated warning 'depfilemulti'");
+ return true;
+ } else {
+ const char* suggestion =
+ SpellcheckString(name.c_str(), "dupbuild=err", "dupbuild=warn",
+ "phonycycle=err", "phonycycle=warn", NULL);
+ if (suggestion) {
+ Error("unknown warning flag '%s', did you mean '%s'?",
+ name.c_str(), suggestion);
+ } else {
+ Error("unknown warning flag '%s'", name.c_str());
+ }
+ return false;
+ }
+}
+
+bool NinjaMain::OpenBuildLog(bool recompact_only) {
+ string log_path = ".ninja_log";
+ if (!build_dir_.empty())
+ log_path = build_dir_ + "/" + log_path;
+
+ string err;
+ const LoadStatus status = build_log_.Load(log_path, &err);
+ if (status == LOAD_ERROR) {
+ Error("loading build log %s: %s", log_path.c_str(), err.c_str());
+ return false;
+ }
+ if (!err.empty()) {
+ // Hack: Load() can return a warning via err by returning LOAD_SUCCESS.
+ Warning("%s", err.c_str());
+ err.clear();
+ }
+
+ if (recompact_only) {
+ if (status == LOAD_NOT_FOUND) {
+ return true;
+ }
+ bool success = build_log_.Recompact(log_path, *this, &err);
+ if (!success)
+ Error("failed recompaction: %s", err.c_str());
+ return success;
+ }
+
+ if (!config_.dry_run) {
+ if (!build_log_.OpenForWrite(log_path, *this, &err)) {
+ Error("opening build log: %s", err.c_str());
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/// Open the deps log: load it, then open for writing.
+/// @return false on error.
+bool NinjaMain::OpenDepsLog(bool recompact_only) {
+ string path = ".ninja_deps";
+ if (!build_dir_.empty())
+ path = build_dir_ + "/" + path;
+
+ string err;
+ const LoadStatus status = deps_log_.Load(path, &state_, &err);
+ if (status == LOAD_ERROR) {
+ Error("loading deps log %s: %s", path.c_str(), err.c_str());
+ return false;
+ }
+ if (!err.empty()) {
+ // Hack: Load() can return a warning via err by returning LOAD_SUCCESS.
+ Warning("%s", err.c_str());
+ err.clear();
+ }
+
+ if (recompact_only) {
+ if (status == LOAD_NOT_FOUND) {
+ return true;
+ }
+ bool success = deps_log_.Recompact(path, &err);
+ if (!success)
+ Error("failed recompaction: %s", err.c_str());
+ return success;
+ }
+
+ if (!config_.dry_run) {
+ if (!deps_log_.OpenForWrite(path, &err)) {
+ Error("opening deps log: %s", err.c_str());
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void NinjaMain::DumpMetrics() {
+ g_metrics->Report();
+
+ printf("\n");
+ int count = (int)state_.paths_.size();
+ int buckets = (int)state_.paths_.bucket_count();
+ printf("path->node hash load %.2f (%d entries / %d buckets)\n",
+ count / (double) buckets, count, buckets);
+}
+
+bool NinjaMain::EnsureBuildDirExists() {
+ build_dir_ = state_.bindings_.LookupVariable("builddir");
+ if (!build_dir_.empty() && !config_.dry_run) {
+ if (!disk_interface_.MakeDirs(build_dir_ + "/.") && errno != EEXIST) {
+ Error("creating build directory %s: %s",
+ build_dir_.c_str(), strerror(errno));
+ return false;
+ }
+ }
+ return true;
+}
+
+int NinjaMain::RunBuild(int argc, char** argv) {
+ string err;
+ vector<Node*> targets;
+ if (!CollectTargetsFromArgs(argc, argv, &targets, &err)) {
+ Error("%s", err.c_str());
+ return 1;
+ }
+
+ disk_interface_.AllowStatCache(g_experimental_statcache);
+
+ Builder builder(&state_, config_, &build_log_, &deps_log_, &disk_interface_);
+ for (size_t i = 0; i < targets.size(); ++i) {
+ if (!builder.AddTarget(targets[i], &err)) {
+ if (!err.empty()) {
+ Error("%s", err.c_str());
+ return 1;
+ } else {
+ // Added a target that is already up-to-date; not really
+ // an error.
+ }
+ }
+ }
+
+ // Make sure restat rules do not see stale timestamps.
+ disk_interface_.AllowStatCache(false);
+
+ if (builder.AlreadyUpToDate()) {
+ printf("ninja: no work to do.\n");
+ return 0;
+ }
+
+ if (!builder.Build(&err)) {
+ printf("ninja: build stopped: %s.\n", err.c_str());
+ if (err.find("interrupted by user") != string::npos) {
+ return 2;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef _MSC_VER
+
+/// This handler processes fatal crashes that you can't catch
+/// Test example: C++ exception in a stack-unwind-block
+/// Real-world example: ninja launched a compiler to process a tricky
+/// C++ input file. The compiler got itself into a state where it
+/// generated 3 GB of output and caused ninja to crash.
+void TerminateHandler() {
+ CreateWin32MiniDump(NULL);
+ Fatal("terminate handler called");
+}
+
+/// On Windows, we want to prevent error dialogs in case of exceptions.
+/// This function handles the exception, and writes a minidump.
+int ExceptionFilter(unsigned int code, struct _EXCEPTION_POINTERS *ep) {
+ Error("exception: 0x%X", code); // e.g. EXCEPTION_ACCESS_VIOLATION
+ fflush(stderr);
+ CreateWin32MiniDump(ep);
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+#endif // _MSC_VER
+
+/// Parse argv for command-line options.
+/// Returns an exit code, or -1 if Ninja should continue.
+int ReadFlags(int* argc, char*** argv,
+ Options* options, BuildConfig* config) {
+ config->parallelism = GuessParallelism();
+
+ enum { OPT_VERSION = 1 };
+ const option kLongOptions[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "version", no_argument, NULL, OPT_VERSION },
+ { "verbose", no_argument, NULL, 'v' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ int opt;
+ while (!options->tool &&
+ (opt = getopt_long(*argc, *argv, "d:f:j:k:l:nt:vw:C:h", kLongOptions,
+ NULL)) != -1) {
+ switch (opt) {
+ case 'd':
+ if (!DebugEnable(optarg))
+ return 1;
+ break;
+ case 'f':
+ options->input_file = optarg;
+ break;
+ case 'j': {
+ char* end;
+ int value = strtol(optarg, &end, 10);
+ if (*end != 0 || value < 0)
+ Fatal("invalid -j parameter");
+
+ // We want to run N jobs in parallel. For N = 0, INT_MAX
+ // is close enough to infinite for most sane builds.
+ config->parallelism = value > 0 ? value : INT_MAX;
+ break;
+ }
+ case 'k': {
+ char* end;
+ int value = strtol(optarg, &end, 10);
+ if (*end != 0)
+ Fatal("-k parameter not numeric; did you mean -k 0?");
+
+ // We want to go until N jobs fail, which means we should allow
+ // N failures and then stop. For N <= 0, INT_MAX is close enough
+ // to infinite for most sane builds.
+ config->failures_allowed = value > 0 ? value : INT_MAX;
+ break;
+ }
+ case 'l': {
+ char* end;
+ double value = strtod(optarg, &end);
+ if (end == optarg)
+ Fatal("-l parameter not numeric: did you mean -l 0.0?");
+ config->max_load_average = value;
+ break;
+ }
+ case 'n':
+ config->dry_run = true;
+ break;
+ case 't':
+ options->tool = ChooseTool(optarg);
+ if (!options->tool)
+ return 0;
+ break;
+ case 'v':
+ config->verbosity = BuildConfig::VERBOSE;
+ break;
+ case 'w':
+ if (!WarningEnable(optarg, options))
+ return 1;
+ break;
+ case 'C':
+ options->working_dir = optarg;
+ break;
+ case OPT_VERSION:
+ printf("%s\n", kNinjaVersion);
+ return 0;
+ case 'h':
+ default:
+ Usage(*config);
+ return 1;
+ }
+ }
+ *argv += optind;
+ *argc -= optind;
+
+ return -1;
+}
+
+NORETURN void real_main(int argc, char** argv) {
+ // Use exit() instead of return in this function to avoid potentially
+ // expensive cleanup when destructing NinjaMain.
+ BuildConfig config;
+ Options options = {};
+ options.input_file = "build.ninja";
+ options.dupe_edges_should_err = true;
+
+ setvbuf(stdout, NULL, _IOLBF, BUFSIZ);
+ const char* ninja_command = argv[0];
+
+ int exit_code = ReadFlags(&argc, &argv, &options, &config);
+ if (exit_code >= 0)
+ exit(exit_code);
+
+ if (options.working_dir) {
+ // The formatting of this string, complete with funny quotes, is
+ // so Emacs can properly identify that the cwd has changed for
+ // subsequent commands.
+ // Don't print this if a tool is being used, so that tool output
+ // can be piped into a file without this string showing up.
+ if (!options.tool)
+ printf("ninja: Entering directory `%s'\n", options.working_dir);
+ if (chdir(options.working_dir) < 0) {
+ Fatal("chdir to '%s' - %s", options.working_dir, strerror(errno));
+ }
+ }
+
+ if (options.tool && options.tool->when == Tool::RUN_AFTER_FLAGS) {
+ // None of the RUN_AFTER_FLAGS actually use a NinjaMain, but it's needed
+ // by other tools.
+ NinjaMain ninja(ninja_command, config);
+ exit((ninja.*options.tool->func)(&options, argc, argv));
+ }
+
+ // Limit number of rebuilds, to prevent infinite loops.
+ const int kCycleLimit = 100;
+ for (int cycle = 1; cycle <= kCycleLimit; ++cycle) {
+ NinjaMain ninja(ninja_command, config);
+
+ ManifestParserOptions parser_opts;
+ if (options.dupe_edges_should_err) {
+ parser_opts.dupe_edge_action_ = kDupeEdgeActionError;
+ }
+ if (options.phony_cycle_should_err) {
+ parser_opts.phony_cycle_action_ = kPhonyCycleActionError;
+ }
+ ManifestParser parser(&ninja.state_, &ninja.disk_interface_, parser_opts);
+ string err;
+ if (!parser.Load(options.input_file, &err)) {
+ Error("%s", err.c_str());
+ exit(1);
+ }
+
+ if (options.tool && options.tool->when == Tool::RUN_AFTER_LOAD)
+ exit((ninja.*options.tool->func)(&options, argc, argv));
+
+ if (!ninja.EnsureBuildDirExists())
+ exit(1);
+
+ if (!ninja.OpenBuildLog() || !ninja.OpenDepsLog())
+ exit(1);
+
+ if (options.tool && options.tool->when == Tool::RUN_AFTER_LOGS)
+ exit((ninja.*options.tool->func)(&options, argc, argv));
+
+ // Attempt to rebuild the manifest before building anything else
+ if (ninja.RebuildManifest(options.input_file, &err)) {
+ // In dry_run mode the regeneration will succeed without changing the
+ // manifest forever. Better to return immediately.
+ if (config.dry_run)
+ exit(0);
+ // Start the build over with the new manifest.
+ continue;
+ } else if (!err.empty()) {
+ Error("rebuilding '%s': %s", options.input_file, err.c_str());
+ exit(1);
+ }
+
+ int result = ninja.RunBuild(argc, argv);
+ if (g_metrics)
+ ninja.DumpMetrics();
+ exit(result);
+ }
+
+ Error("manifest '%s' still dirty after %d tries\n",
+ options.input_file, kCycleLimit);
+ exit(1);
+}
+
+} // anonymous namespace
+
+int main(int argc, char** argv) {
+#if defined(_MSC_VER)
+ // Set a handler to catch crashes not caught by the __try..__except
+ // block (e.g. an exception in a stack-unwind-block).
+ std::set_terminate(TerminateHandler);
+ __try {
+ // Running inside __try ... __except suppresses any Windows error
+ // dialogs for errors such as bad_alloc.
+ real_main(argc, argv);
+ }
+ __except(ExceptionFilter(GetExceptionCode(), GetExceptionInformation())) {
+ // Common error situations return exitCode=1. 2 was chosen to
+ // indicate a more serious problem.
+ return 2;
+ }
+#else
+ real_main(argc, argv);
+#endif
+}
diff --git a/src/ninja_test.cc b/src/ninja_test.cc
new file mode 100644
index 0000000..b40e176
--- /dev/null
+++ b/src/ninja_test.cc
@@ -0,0 +1,162 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#ifdef _WIN32
+#include "getopt.h"
+#elif defined(_AIX)
+#include "getopt.h"
+#include <unistd.h>
+#else
+#include <getopt.h>
+#endif
+
+#include "test.h"
+#include "line_printer.h"
+
+using namespace std;
+
+struct RegisteredTest {
+ testing::Test* (*factory)();
+ const char *name;
+ bool should_run;
+};
+// This can't be a vector because tests call RegisterTest from static
+// initializers and the order static initializers run it isn't specified. So
+// the vector constructor isn't guaranteed to run before all of the
+// RegisterTest() calls.
+static RegisteredTest tests[10000];
+testing::Test* g_current_test;
+static int ntests;
+static LinePrinter printer;
+
+void RegisterTest(testing::Test* (*factory)(), const char* name) {
+ tests[ntests].factory = factory;
+ tests[ntests++].name = name;
+}
+
+namespace {
+string StringPrintf(const char* format, ...) {
+ const int N = 1024;
+ char buf[N];
+
+ va_list ap;
+ va_start(ap, format);
+ vsnprintf(buf, N, format, ap);
+ va_end(ap);
+
+ return buf;
+}
+
+void Usage() {
+ fprintf(stderr,
+"usage: ninja_tests [options]\n"
+"\n"
+"options:\n"
+" --gtest_filter=POSTIVE_PATTERN[-NEGATIVE_PATTERN]\n"
+" Run tests whose names match the positive but not the negative pattern.\n"
+" '*' matches any substring. (gtest's ':', '?' are not implemented).\n");
+}
+
+bool PatternMatchesString(const char* pattern, const char* str) {
+ switch (*pattern) {
+ case '\0':
+ case '-': return *str == '\0';
+ case '*': return (*str != '\0' && PatternMatchesString(pattern, str + 1)) ||
+ PatternMatchesString(pattern + 1, str);
+ default: return *pattern == *str &&
+ PatternMatchesString(pattern + 1, str + 1);
+ }
+}
+
+bool TestMatchesFilter(const char* test, const char* filter) {
+ // Split --gtest_filter at '-' into positive and negative filters.
+ const char* const dash = strchr(filter, '-');
+ const char* pos = dash == filter ? "*" : filter; //Treat '-test1' as '*-test1'
+ const char* neg = dash ? dash + 1 : "";
+ return PatternMatchesString(pos, test) && !PatternMatchesString(neg, test);
+}
+
+bool ReadFlags(int* argc, char*** argv, const char** test_filter) {
+ enum { OPT_GTEST_FILTER = 1 };
+ const option kLongOptions[] = {
+ { "gtest_filter", required_argument, NULL, OPT_GTEST_FILTER },
+ { NULL, 0, NULL, 0 }
+ };
+
+ int opt;
+ while ((opt = getopt_long(*argc, *argv, "h", kLongOptions, NULL)) != -1) {
+ switch (opt) {
+ case OPT_GTEST_FILTER:
+ if (strchr(optarg, '?') == NULL && strchr(optarg, ':') == NULL) {
+ *test_filter = optarg;
+ break;
+ } // else fall through.
+ default:
+ Usage();
+ return false;
+ }
+ }
+ *argv += optind;
+ *argc -= optind;
+ return true;
+}
+
+} // namespace
+
+bool testing::Test::Check(bool condition, const char* file, int line,
+ const char* error) {
+ if (!condition) {
+ printer.PrintOnNewLine(
+ StringPrintf("*** Failure in %s:%d\n%s\n", file, line, error));
+ failed_ = true;
+ }
+ return condition;
+}
+
+int main(int argc, char **argv) {
+ int tests_started = 0;
+
+ const char* test_filter = "*";
+ if (!ReadFlags(&argc, &argv, &test_filter))
+ return 1;
+
+ int nactivetests = 0;
+ for (int i = 0; i < ntests; i++)
+ if ((tests[i].should_run = TestMatchesFilter(tests[i].name, test_filter)))
+ ++nactivetests;
+
+ bool passed = true;
+ for (int i = 0; i < ntests; i++) {
+ if (!tests[i].should_run) continue;
+
+ ++tests_started;
+ testing::Test* test = tests[i].factory();
+ printer.Print(
+ StringPrintf("[%d/%d] %s", tests_started, nactivetests, tests[i].name),
+ LinePrinter::ELIDE);
+ test->SetUp();
+ test->Run();
+ test->TearDown();
+ if (test->Failed())
+ passed = false;
+ delete test;
+ }
+
+ printer.PrintOnNewLine(passed ? "passed\n" : "failed\n");
+ return passed ? EXIT_SUCCESS : EXIT_FAILURE;
+}
diff --git a/src/parser.cc b/src/parser.cc
new file mode 100644
index 0000000..756922d
--- /dev/null
+++ b/src/parser.cc
@@ -0,0 +1,53 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "parser.h"
+
+#include "disk_interface.h"
+#include "metrics.h"
+
+using namespace std;
+
+bool Parser::Load(const string& filename, string* err, Lexer* parent) {
+ METRIC_RECORD(".ninja parse");
+ string contents;
+ string read_err;
+ if (file_reader_->ReadFile(filename, &contents, &read_err) !=
+ FileReader::Okay) {
+ *err = "loading '" + filename + "': " + read_err;
+ if (parent)
+ parent->Error(string(*err), err);
+ return false;
+ }
+
+ // The lexer needs a nul byte at the end of its input, to know when it's done.
+ // It takes a StringPiece, and StringPiece's string constructor uses
+ // string::data(). data()'s return value isn't guaranteed to be
+ // null-terminated (although in practice - libc++, libstdc++, msvc's stl --
+ // it is, and C++11 demands that too), so add an explicit nul byte.
+ contents.resize(contents.size() + 1);
+
+ return Parse(filename, contents, err);
+}
+
+bool Parser::ExpectToken(Lexer::Token expected, string* err) {
+ Lexer::Token token = lexer_.ReadToken();
+ if (token != expected) {
+ string message = string("expected ") + Lexer::TokenName(expected);
+ message += string(", got ") + Lexer::TokenName(token);
+ message += Lexer::TokenErrorHint(expected);
+ return lexer_.Error(message, err);
+ }
+ return true;
+}
diff --git a/src/parser.h b/src/parser.h
new file mode 100644
index 0000000..011fad8
--- /dev/null
+++ b/src/parser.h
@@ -0,0 +1,48 @@
+// Copyright 2018 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_PARSER_H_
+#define NINJA_PARSER_H_
+
+#include <string>
+
+#include "lexer.h"
+
+struct FileReader;
+struct State;
+
+/// Base class for parsers.
+struct Parser {
+ Parser(State* state, FileReader* file_reader)
+ : state_(state), file_reader_(file_reader) {}
+
+ /// Load and parse a file.
+ bool Load(const std::string& filename, std::string* err, Lexer* parent = NULL);
+
+protected:
+ /// If the next token is not \a expected, produce an error string
+ /// saying "expected foo, got bar".
+ bool ExpectToken(Lexer::Token expected, std::string* err);
+
+ State* state_;
+ FileReader* file_reader_;
+ Lexer lexer_;
+
+private:
+ /// Parse a file, given its contents as a string.
+ virtual bool Parse(const std::string& filename, const std::string& input,
+ std::string* err) = 0;
+};
+
+#endif // NINJA_PARSER_H_
diff --git a/src/state.cc b/src/state.cc
new file mode 100644
index 0000000..d3a9e29
--- /dev/null
+++ b/src/state.cc
@@ -0,0 +1,214 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "state.h"
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "edit_distance.h"
+#include "graph.h"
+#include "metrics.h"
+#include "util.h"
+
+using namespace std;
+
+void Pool::EdgeScheduled(const Edge& edge) {
+ if (depth_ != 0)
+ current_use_ += edge.weight();
+}
+
+void Pool::EdgeFinished(const Edge& edge) {
+ if (depth_ != 0)
+ current_use_ -= edge.weight();
+}
+
+void Pool::DelayEdge(Edge* edge) {
+ assert(depth_ != 0);
+ delayed_.insert(edge);
+}
+
+void Pool::RetrieveReadyEdges(set<Edge*>* ready_queue) {
+ DelayedEdges::iterator it = delayed_.begin();
+ while (it != delayed_.end()) {
+ Edge* edge = *it;
+ if (current_use_ + edge->weight() > depth_)
+ break;
+ ready_queue->insert(edge);
+ EdgeScheduled(*edge);
+ ++it;
+ }
+ delayed_.erase(delayed_.begin(), it);
+}
+
+void Pool::Dump() const {
+ printf("%s (%d/%d) ->\n", name_.c_str(), current_use_, depth_);
+ for (DelayedEdges::const_iterator it = delayed_.begin();
+ it != delayed_.end(); ++it)
+ {
+ printf("\t");
+ (*it)->Dump();
+ }
+}
+
+// static
+bool Pool::WeightedEdgeCmp(const Edge* a, const Edge* b) {
+ if (!a) return b;
+ if (!b) return false;
+ int weight_diff = a->weight() - b->weight();
+ return ((weight_diff < 0) || (weight_diff == 0 && a < b));
+}
+
+Pool State::kDefaultPool("", 0);
+Pool State::kConsolePool("console", 1);
+const Rule State::kPhonyRule("phony");
+
+State::State() {
+ bindings_.AddRule(&kPhonyRule);
+ AddPool(&kDefaultPool);
+ AddPool(&kConsolePool);
+}
+
+void State::AddPool(Pool* pool) {
+ assert(LookupPool(pool->name()) == NULL);
+ pools_[pool->name()] = pool;
+}
+
+Pool* State::LookupPool(const string& pool_name) {
+ map<string, Pool*>::iterator i = pools_.find(pool_name);
+ if (i == pools_.end())
+ return NULL;
+ return i->second;
+}
+
+Edge* State::AddEdge(const Rule* rule) {
+ Edge* edge = new Edge();
+ edge->rule_ = rule;
+ edge->pool_ = &State::kDefaultPool;
+ edge->env_ = &bindings_;
+ edges_.push_back(edge);
+ return edge;
+}
+
+Node* State::GetNode(StringPiece path, uint64_t slash_bits) {
+ Node* node = LookupNode(path);
+ if (node)
+ return node;
+ node = new Node(path.AsString(), slash_bits);
+ paths_[node->path()] = node;
+ return node;
+}
+
+Node* State::LookupNode(StringPiece path) const {
+ METRIC_RECORD("lookup node");
+ Paths::const_iterator i = paths_.find(path);
+ if (i != paths_.end())
+ return i->second;
+ return NULL;
+}
+
+Node* State::SpellcheckNode(const string& path) {
+ const bool kAllowReplacements = true;
+ const int kMaxValidEditDistance = 3;
+
+ int min_distance = kMaxValidEditDistance + 1;
+ Node* result = NULL;
+ for (Paths::iterator i = paths_.begin(); i != paths_.end(); ++i) {
+ int distance = EditDistance(
+ i->first, path, kAllowReplacements, kMaxValidEditDistance);
+ if (distance < min_distance && i->second) {
+ min_distance = distance;
+ result = i->second;
+ }
+ }
+ return result;
+}
+
+void State::AddIn(Edge* edge, StringPiece path, uint64_t slash_bits) {
+ Node* node = GetNode(path, slash_bits);
+ edge->inputs_.push_back(node);
+ node->AddOutEdge(edge);
+}
+
+bool State::AddOut(Edge* edge, StringPiece path, uint64_t slash_bits) {
+ Node* node = GetNode(path, slash_bits);
+ if (node->in_edge())
+ return false;
+ edge->outputs_.push_back(node);
+ node->set_in_edge(edge);
+ return true;
+}
+
+bool State::AddDefault(StringPiece path, string* err) {
+ Node* node = LookupNode(path);
+ if (!node) {
+ *err = "unknown target '" + path.AsString() + "'";
+ return false;
+ }
+ defaults_.push_back(node);
+ return true;
+}
+
+vector<Node*> State::RootNodes(string* err) const {
+ vector<Node*> root_nodes;
+ // Search for nodes with no output.
+ for (vector<Edge*>::const_iterator e = edges_.begin();
+ e != edges_.end(); ++e) {
+ for (vector<Node*>::const_iterator out = (*e)->outputs_.begin();
+ out != (*e)->outputs_.end(); ++out) {
+ if ((*out)->out_edges().empty())
+ root_nodes.push_back(*out);
+ }
+ }
+
+ if (!edges_.empty() && root_nodes.empty())
+ *err = "could not determine root nodes of build graph";
+
+ return root_nodes;
+}
+
+vector<Node*> State::DefaultNodes(string* err) const {
+ return defaults_.empty() ? RootNodes(err) : defaults_;
+}
+
+void State::Reset() {
+ for (Paths::iterator i = paths_.begin(); i != paths_.end(); ++i)
+ i->second->ResetState();
+ for (vector<Edge*>::iterator e = edges_.begin(); e != edges_.end(); ++e) {
+ (*e)->outputs_ready_ = false;
+ (*e)->deps_loaded_ = false;
+ (*e)->mark_ = Edge::VisitNone;
+ }
+}
+
+void State::Dump() {
+ for (Paths::iterator i = paths_.begin(); i != paths_.end(); ++i) {
+ Node* node = i->second;
+ printf("%s %s [id:%d]\n",
+ node->path().c_str(),
+ node->status_known() ? (node->dirty() ? "dirty" : "clean")
+ : "unknown",
+ node->id());
+ }
+ if (!pools_.empty()) {
+ printf("resource_pools:\n");
+ for (map<string, Pool*>::const_iterator it = pools_.begin();
+ it != pools_.end(); ++it)
+ {
+ if (!it->second->name().empty()) {
+ it->second->Dump();
+ }
+ }
+ }
+}
diff --git a/src/state.h b/src/state.h
new file mode 100644
index 0000000..f553ed4
--- /dev/null
+++ b/src/state.h
@@ -0,0 +1,130 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_STATE_H_
+#define NINJA_STATE_H_
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "eval_env.h"
+#include "hash_map.h"
+#include "util.h"
+
+struct Edge;
+struct Node;
+struct Rule;
+
+/// A pool for delayed edges.
+/// Pools are scoped to a State. Edges within a State will share Pools. A Pool
+/// will keep a count of the total 'weight' of the currently scheduled edges. If
+/// a Plan attempts to schedule an Edge which would cause the total weight to
+/// exceed the depth of the Pool, the Pool will enqueue the Edge instead of
+/// allowing the Plan to schedule it. The Pool will relinquish queued Edges when
+/// the total scheduled weight diminishes enough (i.e. when a scheduled edge
+/// completes).
+struct Pool {
+ Pool(const std::string& name, int depth)
+ : name_(name), current_use_(0), depth_(depth), delayed_(&WeightedEdgeCmp) {}
+
+ // A depth of 0 is infinite
+ bool is_valid() const { return depth_ >= 0; }
+ int depth() const { return depth_; }
+ const std::string& name() const { return name_; }
+ int current_use() const { return current_use_; }
+
+ /// true if the Pool might delay this edge
+ bool ShouldDelayEdge() const { return depth_ != 0; }
+
+ /// informs this Pool that the given edge is committed to be run.
+ /// Pool will count this edge as using resources from this pool.
+ void EdgeScheduled(const Edge& edge);
+
+ /// informs this Pool that the given edge is no longer runnable, and should
+ /// relinquish its resources back to the pool
+ void EdgeFinished(const Edge& edge);
+
+ /// adds the given edge to this Pool to be delayed.
+ void DelayEdge(Edge* edge);
+
+ /// Pool will add zero or more edges to the ready_queue
+ void RetrieveReadyEdges(std::set<Edge*>* ready_queue);
+
+ /// Dump the Pool and its edges (useful for debugging).
+ void Dump() const;
+
+ private:
+ std::string name_;
+
+ /// |current_use_| is the total of the weights of the edges which are
+ /// currently scheduled in the Plan (i.e. the edges in Plan::ready_).
+ int current_use_;
+ int depth_;
+
+ static bool WeightedEdgeCmp(const Edge* a, const Edge* b);
+
+ typedef std::set<Edge*,bool(*)(const Edge*, const Edge*)> DelayedEdges;
+ DelayedEdges delayed_;
+};
+
+/// Global state (file status) for a single run.
+struct State {
+ static Pool kDefaultPool;
+ static Pool kConsolePool;
+ static const Rule kPhonyRule;
+
+ State();
+
+ void AddPool(Pool* pool);
+ Pool* LookupPool(const std::string& pool_name);
+
+ Edge* AddEdge(const Rule* rule);
+
+ Node* GetNode(StringPiece path, uint64_t slash_bits);
+ Node* LookupNode(StringPiece path) const;
+ Node* SpellcheckNode(const std::string& path);
+
+ void AddIn(Edge* edge, StringPiece path, uint64_t slash_bits);
+ bool AddOut(Edge* edge, StringPiece path, uint64_t slash_bits);
+ bool AddDefault(StringPiece path, std::string* error);
+
+ /// Reset state. Keeps all nodes and edges, but restores them to the
+ /// state where we haven't yet examined the disk for dirty state.
+ void Reset();
+
+ /// Dump the nodes and Pools (useful for debugging).
+ void Dump();
+
+ /// @return the root node(s) of the graph. (Root nodes have no output edges).
+ /// @param error where to write the error message if somethings went wrong.
+ std::vector<Node*> RootNodes(std::string* error) const;
+ std::vector<Node*> DefaultNodes(std::string* error) const;
+
+ /// Mapping of path -> Node.
+ typedef ExternalStringHashMap<Node*>::Type Paths;
+ Paths paths_;
+
+ /// All the pools used in the graph.
+ std::map<std::string, Pool*> pools_;
+
+ /// All the edges of the graph.
+ std::vector<Edge*> edges_;
+
+ BindingEnv bindings_;
+ std::vector<Node*> defaults_;
+};
+
+#endif // NINJA_STATE_H_
diff --git a/src/state_test.cc b/src/state_test.cc
new file mode 100644
index 0000000..96469f9
--- /dev/null
+++ b/src/state_test.cc
@@ -0,0 +1,48 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "graph.h"
+#include "state.h"
+#include "test.h"
+
+using namespace std;
+
+namespace {
+
+TEST(State, Basic) {
+ State state;
+
+ EvalString command;
+ command.AddText("cat ");
+ command.AddSpecial("in");
+ command.AddText(" > ");
+ command.AddSpecial("out");
+
+ Rule* rule = new Rule("cat");
+ rule->AddBinding("command", command);
+ state.bindings_.AddRule(rule);
+
+ Edge* edge = state.AddEdge(rule);
+ state.AddIn(edge, "in1", 0);
+ state.AddIn(edge, "in2", 0);
+ state.AddOut(edge, "out", 0);
+
+ EXPECT_EQ("cat in1 in2 > out", edge->EvaluateCommand());
+
+ EXPECT_FALSE(state.GetNode("in1", 0)->dirty());
+ EXPECT_FALSE(state.GetNode("in2", 0)->dirty());
+ EXPECT_FALSE(state.GetNode("out", 0)->dirty());
+}
+
+} // namespace
diff --git a/src/string_piece.h b/src/string_piece.h
new file mode 100644
index 0000000..1c0bee6
--- /dev/null
+++ b/src/string_piece.h
@@ -0,0 +1,70 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_STRINGPIECE_H_
+#define NINJA_STRINGPIECE_H_
+
+#include <string>
+
+#include <string.h>
+
+/// StringPiece represents a slice of a string whose memory is managed
+/// externally. It is useful for reducing the number of std::strings
+/// we need to allocate.
+struct StringPiece {
+ typedef const char* const_iterator;
+
+ StringPiece() : str_(NULL), len_(0) {}
+
+ /// The constructors intentionally allow for implicit conversions.
+ StringPiece(const std::string& str) : str_(str.data()), len_(str.size()) {}
+ StringPiece(const char* str) : str_(str), len_(strlen(str)) {}
+
+ StringPiece(const char* str, size_t len) : str_(str), len_(len) {}
+
+ bool operator==(const StringPiece& other) const {
+ return len_ == other.len_ && memcmp(str_, other.str_, len_) == 0;
+ }
+
+ bool operator!=(const StringPiece& other) const {
+ return !(*this == other);
+ }
+
+ /// Convert the slice into a full-fledged std::string, copying the
+ /// data into a new string.
+ std::string AsString() const {
+ return len_ ? std::string(str_, len_) : std::string();
+ }
+
+ const_iterator begin() const {
+ return str_;
+ }
+
+ const_iterator end() const {
+ return str_ + len_;
+ }
+
+ char operator[](size_t pos) const {
+ return str_[pos];
+ }
+
+ size_t size() const {
+ return len_;
+ }
+
+ const char* str_;
+ size_t len_;
+};
+
+#endif // NINJA_STRINGPIECE_H_
diff --git a/src/string_piece_util.cc b/src/string_piece_util.cc
new file mode 100644
index 0000000..69513f5
--- /dev/null
+++ b/src/string_piece_util.cc
@@ -0,0 +1,78 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "string_piece_util.h"
+
+#include <algorithm>
+#include <string>
+#include <vector>
+using namespace std;
+
+vector<StringPiece> SplitStringPiece(StringPiece input, char sep) {
+ vector<StringPiece> elems;
+ elems.reserve(count(input.begin(), input.end(), sep) + 1);
+
+ StringPiece::const_iterator pos = input.begin();
+
+ for (;;) {
+ const char* next_pos = find(pos, input.end(), sep);
+ if (next_pos == input.end()) {
+ elems.push_back(StringPiece(pos, input.end() - pos));
+ break;
+ }
+ elems.push_back(StringPiece(pos, next_pos - pos));
+ pos = next_pos + 1;
+ }
+
+ return elems;
+}
+
+string JoinStringPiece(const vector<StringPiece>& list, char sep) {
+ if (list.empty()) {
+ return "";
+ }
+
+ string ret;
+
+ {
+ size_t cap = list.size() - 1;
+ for (size_t i = 0; i < list.size(); ++i) {
+ cap += list[i].len_;
+ }
+ ret.reserve(cap);
+ }
+
+ for (size_t i = 0; i < list.size(); ++i) {
+ if (i != 0) {
+ ret += sep;
+ }
+ ret.append(list[i].str_, list[i].len_);
+ }
+
+ return ret;
+}
+
+bool EqualsCaseInsensitiveASCII(StringPiece a, StringPiece b) {
+ if (a.len_ != b.len_) {
+ return false;
+ }
+
+ for (size_t i = 0; i < a.len_; ++i) {
+ if (ToLowerASCII(a.str_[i]) != ToLowerASCII(b.str_[i])) {
+ return false;
+ }
+ }
+
+ return true;
+}
diff --git a/src/string_piece_util.h b/src/string_piece_util.h
new file mode 100644
index 0000000..28470f1
--- /dev/null
+++ b/src/string_piece_util.h
@@ -0,0 +1,33 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_STRINGPIECE_UTIL_H_
+#define NINJA_STRINGPIECE_UTIL_H_
+
+#include <string>
+#include <vector>
+
+#include "string_piece.h"
+
+std::vector<StringPiece> SplitStringPiece(StringPiece input, char sep);
+
+std::string JoinStringPiece(const std::vector<StringPiece>& list, char sep);
+
+inline char ToLowerASCII(char c) {
+ return (c >= 'A' && c <= 'Z') ? (c + ('a' - 'A')) : c;
+}
+
+bool EqualsCaseInsensitiveASCII(StringPiece a, StringPiece b);
+
+#endif // NINJA_STRINGPIECE_UTIL_H_
diff --git a/src/string_piece_util_test.cc b/src/string_piece_util_test.cc
new file mode 100644
index 0000000..61586dd
--- /dev/null
+++ b/src/string_piece_util_test.cc
@@ -0,0 +1,131 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "string_piece_util.h"
+
+#include "test.h"
+
+using namespace std;
+
+TEST(StringPieceUtilTest, SplitStringPiece) {
+ {
+ string input("a:b:c");
+ vector<StringPiece> list = SplitStringPiece(input, ':');
+
+ EXPECT_EQ(list.size(), 3);
+
+ EXPECT_EQ(list[0], "a");
+ EXPECT_EQ(list[1], "b");
+ EXPECT_EQ(list[2], "c");
+ }
+
+ {
+ string empty;
+ vector<StringPiece> list = SplitStringPiece(empty, ':');
+
+ EXPECT_EQ(list.size(), 1);
+
+ EXPECT_EQ(list[0], "");
+ }
+
+ {
+ string one("a");
+ vector<StringPiece> list = SplitStringPiece(one, ':');
+
+ EXPECT_EQ(list.size(), 1);
+
+ EXPECT_EQ(list[0], "a");
+ }
+
+ {
+ string sep_only(":");
+ vector<StringPiece> list = SplitStringPiece(sep_only, ':');
+
+ EXPECT_EQ(list.size(), 2);
+
+ EXPECT_EQ(list[0], "");
+ EXPECT_EQ(list[1], "");
+ }
+
+ {
+ string sep(":a:b:c:");
+ vector<StringPiece> list = SplitStringPiece(sep, ':');
+
+ EXPECT_EQ(list.size(), 5);
+
+ EXPECT_EQ(list[0], "");
+ EXPECT_EQ(list[1], "a");
+ EXPECT_EQ(list[2], "b");
+ EXPECT_EQ(list[3], "c");
+ EXPECT_EQ(list[4], "");
+ }
+}
+
+TEST(StringPieceUtilTest, JoinStringPiece) {
+ {
+ string input("a:b:c");
+ vector<StringPiece> list = SplitStringPiece(input, ':');
+
+ EXPECT_EQ("a:b:c", JoinStringPiece(list, ':'));
+ EXPECT_EQ("a/b/c", JoinStringPiece(list, '/'));
+ }
+
+ {
+ string empty;
+ vector<StringPiece> list = SplitStringPiece(empty, ':');
+
+ EXPECT_EQ("", JoinStringPiece(list, ':'));
+ }
+
+ {
+ vector<StringPiece> empty_list;
+
+ EXPECT_EQ("", JoinStringPiece(empty_list, ':'));
+ }
+
+ {
+ string one("a");
+ vector<StringPiece> single_list = SplitStringPiece(one, ':');
+
+ EXPECT_EQ("a", JoinStringPiece(single_list, ':'));
+ }
+
+ {
+ string sep(":a:b:c:");
+ vector<StringPiece> list = SplitStringPiece(sep, ':');
+
+ EXPECT_EQ(":a:b:c:", JoinStringPiece(list, ':'));
+ }
+}
+
+TEST(StringPieceUtilTest, ToLowerASCII) {
+ EXPECT_EQ('a', ToLowerASCII('A'));
+ EXPECT_EQ('z', ToLowerASCII('Z'));
+ EXPECT_EQ('a', ToLowerASCII('a'));
+ EXPECT_EQ('z', ToLowerASCII('z'));
+ EXPECT_EQ('/', ToLowerASCII('/'));
+ EXPECT_EQ('1', ToLowerASCII('1'));
+}
+
+TEST(StringPieceUtilTest, EqualsCaseInsensitiveASCII) {
+ EXPECT_TRUE(EqualsCaseInsensitiveASCII("abc", "abc"));
+ EXPECT_TRUE(EqualsCaseInsensitiveASCII("abc", "ABC"));
+ EXPECT_TRUE(EqualsCaseInsensitiveASCII("abc", "aBc"));
+ EXPECT_TRUE(EqualsCaseInsensitiveASCII("AbC", "aBc"));
+ EXPECT_TRUE(EqualsCaseInsensitiveASCII("", ""));
+
+ EXPECT_FALSE(EqualsCaseInsensitiveASCII("a", "ac"));
+ EXPECT_FALSE(EqualsCaseInsensitiveASCII("/", "\\"));
+ EXPECT_FALSE(EqualsCaseInsensitiveASCII("1", "10"));
+}
diff --git a/src/subprocess-posix.cc b/src/subprocess-posix.cc
new file mode 100644
index 0000000..8e78540
--- /dev/null
+++ b/src/subprocess-posix.cc
@@ -0,0 +1,368 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "subprocess.h"
+
+#include <sys/select.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/wait.h>
+#include <spawn.h>
+
+#if defined(USE_PPOLL)
+#include <poll.h>
+#else
+#include <sys/select.h>
+#endif
+
+extern char** environ;
+
+#include "util.h"
+
+using namespace std;
+
+Subprocess::Subprocess(bool use_console) : fd_(-1), pid_(-1),
+ use_console_(use_console) {
+}
+
+Subprocess::~Subprocess() {
+ if (fd_ >= 0)
+ close(fd_);
+ // Reap child if forgotten.
+ if (pid_ != -1)
+ Finish();
+}
+
+bool Subprocess::Start(SubprocessSet* set, const string& command) {
+ int output_pipe[2];
+ if (pipe(output_pipe) < 0)
+ Fatal("pipe: %s", strerror(errno));
+ fd_ = output_pipe[0];
+#if !defined(USE_PPOLL)
+ // If available, we use ppoll in DoWork(); otherwise we use pselect
+ // and so must avoid overly-large FDs.
+ if (fd_ >= static_cast<int>(FD_SETSIZE))
+ Fatal("pipe: %s", strerror(EMFILE));
+#endif // !USE_PPOLL
+ SetCloseOnExec(fd_);
+
+ posix_spawn_file_actions_t action;
+ int err = posix_spawn_file_actions_init(&action);
+ if (err != 0)
+ Fatal("posix_spawn_file_actions_init: %s", strerror(err));
+
+ err = posix_spawn_file_actions_addclose(&action, output_pipe[0]);
+ if (err != 0)
+ Fatal("posix_spawn_file_actions_addclose: %s", strerror(err));
+
+ posix_spawnattr_t attr;
+ err = posix_spawnattr_init(&attr);
+ if (err != 0)
+ Fatal("posix_spawnattr_init: %s", strerror(err));
+
+ short flags = 0;
+
+ flags |= POSIX_SPAWN_SETSIGMASK;
+ err = posix_spawnattr_setsigmask(&attr, &set->old_mask_);
+ if (err != 0)
+ Fatal("posix_spawnattr_setsigmask: %s", strerror(err));
+ // Signals which are set to be caught in the calling process image are set to
+ // default action in the new process image, so no explicit
+ // POSIX_SPAWN_SETSIGDEF parameter is needed.
+
+ if (!use_console_) {
+ // Put the child in its own process group, so ctrl-c won't reach it.
+ flags |= POSIX_SPAWN_SETPGROUP;
+ // No need to posix_spawnattr_setpgroup(&attr, 0), it's the default.
+
+ // Open /dev/null over stdin.
+ err = posix_spawn_file_actions_addopen(&action, 0, "/dev/null", O_RDONLY,
+ 0);
+ if (err != 0) {
+ Fatal("posix_spawn_file_actions_addopen: %s", strerror(err));
+ }
+
+ err = posix_spawn_file_actions_adddup2(&action, output_pipe[1], 1);
+ if (err != 0)
+ Fatal("posix_spawn_file_actions_adddup2: %s", strerror(err));
+ err = posix_spawn_file_actions_adddup2(&action, output_pipe[1], 2);
+ if (err != 0)
+ Fatal("posix_spawn_file_actions_adddup2: %s", strerror(err));
+ err = posix_spawn_file_actions_addclose(&action, output_pipe[1]);
+ if (err != 0)
+ Fatal("posix_spawn_file_actions_addclose: %s", strerror(err));
+ // In the console case, output_pipe is still inherited by the child and
+ // closed when the subprocess finishes, which then notifies ninja.
+ }
+#ifdef POSIX_SPAWN_USEVFORK
+ flags |= POSIX_SPAWN_USEVFORK;
+#endif
+
+ err = posix_spawnattr_setflags(&attr, flags);
+ if (err != 0)
+ Fatal("posix_spawnattr_setflags: %s", strerror(err));
+
+ const char* spawned_args[] = { "/bin/sh", "-c", command.c_str(), NULL };
+ err = posix_spawn(&pid_, "/bin/sh", &action, &attr,
+ const_cast<char**>(spawned_args), environ);
+ if (err != 0)
+ Fatal("posix_spawn: %s", strerror(err));
+
+ err = posix_spawnattr_destroy(&attr);
+ if (err != 0)
+ Fatal("posix_spawnattr_destroy: %s", strerror(err));
+ err = posix_spawn_file_actions_destroy(&action);
+ if (err != 0)
+ Fatal("posix_spawn_file_actions_destroy: %s", strerror(err));
+
+ close(output_pipe[1]);
+ return true;
+}
+
+void Subprocess::OnPipeReady() {
+ char buf[4 << 10];
+ ssize_t len = read(fd_, buf, sizeof(buf));
+ if (len > 0) {
+ buf_.append(buf, len);
+ } else {
+ if (len < 0)
+ Fatal("read: %s", strerror(errno));
+ close(fd_);
+ fd_ = -1;
+ }
+}
+
+ExitStatus Subprocess::Finish() {
+ assert(pid_ != -1);
+ int status;
+ if (waitpid(pid_, &status, 0) < 0)
+ Fatal("waitpid(%d): %s", pid_, strerror(errno));
+ pid_ = -1;
+
+#ifdef _AIX
+ if (WIFEXITED(status) && WEXITSTATUS(status) & 0x80) {
+ // Map the shell's exit code used for signal failure (128 + signal) to the
+ // status code expected by AIX WIFSIGNALED and WTERMSIG macros which, unlike
+ // other systems, uses a different bit layout.
+ int signal = WEXITSTATUS(status) & 0x7f;
+ status = (signal << 16) | signal;
+ }
+#endif
+
+ if (WIFEXITED(status)) {
+ int exit = WEXITSTATUS(status);
+ if (exit == 0)
+ return ExitSuccess;
+ } else if (WIFSIGNALED(status)) {
+ if (WTERMSIG(status) == SIGINT || WTERMSIG(status) == SIGTERM
+ || WTERMSIG(status) == SIGHUP)
+ return ExitInterrupted;
+ }
+ return ExitFailure;
+}
+
+bool Subprocess::Done() const {
+ return fd_ == -1;
+}
+
+const string& Subprocess::GetOutput() const {
+ return buf_;
+}
+
+int SubprocessSet::interrupted_;
+
+void SubprocessSet::SetInterruptedFlag(int signum) {
+ interrupted_ = signum;
+}
+
+void SubprocessSet::HandlePendingInterruption() {
+ sigset_t pending;
+ sigemptyset(&pending);
+ if (sigpending(&pending) == -1) {
+ perror("ninja: sigpending");
+ return;
+ }
+ if (sigismember(&pending, SIGINT))
+ interrupted_ = SIGINT;
+ else if (sigismember(&pending, SIGTERM))
+ interrupted_ = SIGTERM;
+ else if (sigismember(&pending, SIGHUP))
+ interrupted_ = SIGHUP;
+}
+
+SubprocessSet::SubprocessSet() {
+ sigset_t set;
+ sigemptyset(&set);
+ sigaddset(&set, SIGINT);
+ sigaddset(&set, SIGTERM);
+ sigaddset(&set, SIGHUP);
+ if (sigprocmask(SIG_BLOCK, &set, &old_mask_) < 0)
+ Fatal("sigprocmask: %s", strerror(errno));
+
+ struct sigaction act;
+ memset(&act, 0, sizeof(act));
+ act.sa_handler = SetInterruptedFlag;
+ if (sigaction(SIGINT, &act, &old_int_act_) < 0)
+ Fatal("sigaction: %s", strerror(errno));
+ if (sigaction(SIGTERM, &act, &old_term_act_) < 0)
+ Fatal("sigaction: %s", strerror(errno));
+ if (sigaction(SIGHUP, &act, &old_hup_act_) < 0)
+ Fatal("sigaction: %s", strerror(errno));
+}
+
+SubprocessSet::~SubprocessSet() {
+ Clear();
+
+ if (sigaction(SIGINT, &old_int_act_, 0) < 0)
+ Fatal("sigaction: %s", strerror(errno));
+ if (sigaction(SIGTERM, &old_term_act_, 0) < 0)
+ Fatal("sigaction: %s", strerror(errno));
+ if (sigaction(SIGHUP, &old_hup_act_, 0) < 0)
+ Fatal("sigaction: %s", strerror(errno));
+ if (sigprocmask(SIG_SETMASK, &old_mask_, 0) < 0)
+ Fatal("sigprocmask: %s", strerror(errno));
+}
+
+Subprocess *SubprocessSet::Add(const string& command, bool use_console) {
+ Subprocess *subprocess = new Subprocess(use_console);
+ if (!subprocess->Start(this, command)) {
+ delete subprocess;
+ return 0;
+ }
+ running_.push_back(subprocess);
+ return subprocess;
+}
+
+#ifdef USE_PPOLL
+bool SubprocessSet::DoWork() {
+ vector<pollfd> fds;
+ nfds_t nfds = 0;
+
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ++i) {
+ int fd = (*i)->fd_;
+ if (fd < 0)
+ continue;
+ pollfd pfd = { fd, POLLIN | POLLPRI, 0 };
+ fds.push_back(pfd);
+ ++nfds;
+ }
+
+ interrupted_ = 0;
+ int ret = ppoll(&fds.front(), nfds, NULL, &old_mask_);
+ if (ret == -1) {
+ if (errno != EINTR) {
+ perror("ninja: ppoll");
+ return false;
+ }
+ return IsInterrupted();
+ }
+
+ HandlePendingInterruption();
+ if (IsInterrupted())
+ return true;
+
+ nfds_t cur_nfd = 0;
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ) {
+ int fd = (*i)->fd_;
+ if (fd < 0)
+ continue;
+ assert(fd == fds[cur_nfd].fd);
+ if (fds[cur_nfd++].revents) {
+ (*i)->OnPipeReady();
+ if ((*i)->Done()) {
+ finished_.push(*i);
+ i = running_.erase(i);
+ continue;
+ }
+ }
+ ++i;
+ }
+
+ return IsInterrupted();
+}
+
+#else // !defined(USE_PPOLL)
+bool SubprocessSet::DoWork() {
+ fd_set set;
+ int nfds = 0;
+ FD_ZERO(&set);
+
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ++i) {
+ int fd = (*i)->fd_;
+ if (fd >= 0) {
+ FD_SET(fd, &set);
+ if (nfds < fd+1)
+ nfds = fd+1;
+ }
+ }
+
+ interrupted_ = 0;
+ int ret = pselect(nfds, &set, 0, 0, 0, &old_mask_);
+ if (ret == -1) {
+ if (errno != EINTR) {
+ perror("ninja: pselect");
+ return false;
+ }
+ return IsInterrupted();
+ }
+
+ HandlePendingInterruption();
+ if (IsInterrupted())
+ return true;
+
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ) {
+ int fd = (*i)->fd_;
+ if (fd >= 0 && FD_ISSET(fd, &set)) {
+ (*i)->OnPipeReady();
+ if ((*i)->Done()) {
+ finished_.push(*i);
+ i = running_.erase(i);
+ continue;
+ }
+ }
+ ++i;
+ }
+
+ return IsInterrupted();
+}
+#endif // !defined(USE_PPOLL)
+
+Subprocess* SubprocessSet::NextFinished() {
+ if (finished_.empty())
+ return NULL;
+ Subprocess* subproc = finished_.front();
+ finished_.pop();
+ return subproc;
+}
+
+void SubprocessSet::Clear() {
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ++i)
+ // Since the foreground process is in our process group, it will receive
+ // the interruption signal (i.e. SIGINT or SIGTERM) at the same time as us.
+ if (!(*i)->use_console_)
+ kill(-(*i)->pid_, interrupted_);
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ++i)
+ delete *i;
+ running_.clear();
+}
diff --git a/src/subprocess-win32.cc b/src/subprocess-win32.cc
new file mode 100644
index 0000000..ff3baac
--- /dev/null
+++ b/src/subprocess-win32.cc
@@ -0,0 +1,307 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "subprocess.h"
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <algorithm>
+
+#include "util.h"
+
+using namespace std;
+
+Subprocess::Subprocess(bool use_console) : child_(NULL) , overlapped_(),
+ is_reading_(false),
+ use_console_(use_console) {
+}
+
+Subprocess::~Subprocess() {
+ if (pipe_) {
+ if (!CloseHandle(pipe_))
+ Win32Fatal("CloseHandle");
+ }
+ // Reap child if forgotten.
+ if (child_)
+ Finish();
+}
+
+HANDLE Subprocess::SetupPipe(HANDLE ioport) {
+ char pipe_name[100];
+ snprintf(pipe_name, sizeof(pipe_name),
+ "\\\\.\\pipe\\ninja_pid%lu_sp%p", GetCurrentProcessId(), this);
+
+ pipe_ = ::CreateNamedPipeA(pipe_name,
+ PIPE_ACCESS_INBOUND | FILE_FLAG_OVERLAPPED,
+ PIPE_TYPE_BYTE,
+ PIPE_UNLIMITED_INSTANCES,
+ 0, 0, INFINITE, NULL);
+ if (pipe_ == INVALID_HANDLE_VALUE)
+ Win32Fatal("CreateNamedPipe");
+
+ if (!CreateIoCompletionPort(pipe_, ioport, (ULONG_PTR)this, 0))
+ Win32Fatal("CreateIoCompletionPort");
+
+ memset(&overlapped_, 0, sizeof(overlapped_));
+ if (!ConnectNamedPipe(pipe_, &overlapped_) &&
+ GetLastError() != ERROR_IO_PENDING) {
+ Win32Fatal("ConnectNamedPipe");
+ }
+
+ // Get the write end of the pipe as a handle inheritable across processes.
+ HANDLE output_write_handle =
+ CreateFileA(pipe_name, GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, NULL);
+ HANDLE output_write_child;
+ if (!DuplicateHandle(GetCurrentProcess(), output_write_handle,
+ GetCurrentProcess(), &output_write_child,
+ 0, TRUE, DUPLICATE_SAME_ACCESS)) {
+ Win32Fatal("DuplicateHandle");
+ }
+ CloseHandle(output_write_handle);
+
+ return output_write_child;
+}
+
+bool Subprocess::Start(SubprocessSet* set, const string& command) {
+ HANDLE child_pipe = SetupPipe(set->ioport_);
+
+ SECURITY_ATTRIBUTES security_attributes;
+ memset(&security_attributes, 0, sizeof(SECURITY_ATTRIBUTES));
+ security_attributes.nLength = sizeof(SECURITY_ATTRIBUTES);
+ security_attributes.bInheritHandle = TRUE;
+ // Must be inheritable so subprocesses can dup to children.
+ HANDLE nul =
+ CreateFileA("NUL", GENERIC_READ,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ &security_attributes, OPEN_EXISTING, 0, NULL);
+ if (nul == INVALID_HANDLE_VALUE)
+ Fatal("couldn't open nul");
+
+ STARTUPINFOA startup_info;
+ memset(&startup_info, 0, sizeof(startup_info));
+ startup_info.cb = sizeof(STARTUPINFO);
+ if (!use_console_) {
+ startup_info.dwFlags = STARTF_USESTDHANDLES;
+ startup_info.hStdInput = nul;
+ startup_info.hStdOutput = child_pipe;
+ startup_info.hStdError = child_pipe;
+ }
+ // In the console case, child_pipe is still inherited by the child and closed
+ // when the subprocess finishes, which then notifies ninja.
+
+ PROCESS_INFORMATION process_info;
+ memset(&process_info, 0, sizeof(process_info));
+
+ // Ninja handles ctrl-c, except for subprocesses in console pools.
+ DWORD process_flags = use_console_ ? 0 : CREATE_NEW_PROCESS_GROUP;
+
+ // Do not prepend 'cmd /c' on Windows, this breaks command
+ // lines greater than 8,191 chars.
+ if (!CreateProcessA(NULL, (char*)command.c_str(), NULL, NULL,
+ /* inherit handles */ TRUE, process_flags,
+ NULL, NULL,
+ &startup_info, &process_info)) {
+ DWORD error = GetLastError();
+ if (error == ERROR_FILE_NOT_FOUND) {
+ // File (program) not found error is treated as a normal build
+ // action failure.
+ if (child_pipe)
+ CloseHandle(child_pipe);
+ CloseHandle(pipe_);
+ CloseHandle(nul);
+ pipe_ = NULL;
+ // child_ is already NULL;
+ buf_ = "CreateProcess failed: The system cannot find the file "
+ "specified.\n";
+ return true;
+ } else {
+ fprintf(stderr, "\nCreateProcess failed. Command attempted:\n\"%s\"\n",
+ command.c_str());
+ const char* hint = NULL;
+ // ERROR_INVALID_PARAMETER means the command line was formatted
+ // incorrectly. This can be caused by a command line being too long or
+ // leading whitespace in the command. Give extra context for this case.
+ if (error == ERROR_INVALID_PARAMETER) {
+ if (command.length() > 0 && (command[0] == ' ' || command[0] == '\t'))
+ hint = "command contains leading whitespace";
+ else
+ hint = "is the command line too long?";
+ }
+ Win32Fatal("CreateProcess", hint);
+ }
+ }
+
+ // Close pipe channel only used by the child.
+ if (child_pipe)
+ CloseHandle(child_pipe);
+ CloseHandle(nul);
+
+ CloseHandle(process_info.hThread);
+ child_ = process_info.hProcess;
+
+ return true;
+}
+
+void Subprocess::OnPipeReady() {
+ DWORD bytes;
+ if (!GetOverlappedResult(pipe_, &overlapped_, &bytes, TRUE)) {
+ if (GetLastError() == ERROR_BROKEN_PIPE) {
+ CloseHandle(pipe_);
+ pipe_ = NULL;
+ return;
+ }
+ Win32Fatal("GetOverlappedResult");
+ }
+
+ if (is_reading_ && bytes)
+ buf_.append(overlapped_buf_, bytes);
+
+ memset(&overlapped_, 0, sizeof(overlapped_));
+ is_reading_ = true;
+ if (!::ReadFile(pipe_, overlapped_buf_, sizeof(overlapped_buf_),
+ &bytes, &overlapped_)) {
+ if (GetLastError() == ERROR_BROKEN_PIPE) {
+ CloseHandle(pipe_);
+ pipe_ = NULL;
+ return;
+ }
+ if (GetLastError() != ERROR_IO_PENDING)
+ Win32Fatal("ReadFile");
+ }
+
+ // Even if we read any bytes in the readfile call, we'll enter this
+ // function again later and get them at that point.
+}
+
+ExitStatus Subprocess::Finish() {
+ if (!child_)
+ return ExitFailure;
+
+ // TODO: add error handling for all of these.
+ WaitForSingleObject(child_, INFINITE);
+
+ DWORD exit_code = 0;
+ GetExitCodeProcess(child_, &exit_code);
+
+ CloseHandle(child_);
+ child_ = NULL;
+
+ return exit_code == 0 ? ExitSuccess :
+ exit_code == CONTROL_C_EXIT ? ExitInterrupted :
+ ExitFailure;
+}
+
+bool Subprocess::Done() const {
+ return pipe_ == NULL;
+}
+
+const string& Subprocess::GetOutput() const {
+ return buf_;
+}
+
+HANDLE SubprocessSet::ioport_;
+
+SubprocessSet::SubprocessSet() {
+ ioport_ = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1);
+ if (!ioport_)
+ Win32Fatal("CreateIoCompletionPort");
+ if (!SetConsoleCtrlHandler(NotifyInterrupted, TRUE))
+ Win32Fatal("SetConsoleCtrlHandler");
+}
+
+SubprocessSet::~SubprocessSet() {
+ Clear();
+
+ SetConsoleCtrlHandler(NotifyInterrupted, FALSE);
+ CloseHandle(ioport_);
+}
+
+BOOL WINAPI SubprocessSet::NotifyInterrupted(DWORD dwCtrlType) {
+ if (dwCtrlType == CTRL_C_EVENT || dwCtrlType == CTRL_BREAK_EVENT) {
+ if (!PostQueuedCompletionStatus(ioport_, 0, 0, NULL))
+ Win32Fatal("PostQueuedCompletionStatus");
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+Subprocess *SubprocessSet::Add(const string& command, bool use_console) {
+ Subprocess *subprocess = new Subprocess(use_console);
+ if (!subprocess->Start(this, command)) {
+ delete subprocess;
+ return 0;
+ }
+ if (subprocess->child_)
+ running_.push_back(subprocess);
+ else
+ finished_.push(subprocess);
+ return subprocess;
+}
+
+bool SubprocessSet::DoWork() {
+ DWORD bytes_read;
+ Subprocess* subproc;
+ OVERLAPPED* overlapped;
+
+ if (!GetQueuedCompletionStatus(ioport_, &bytes_read, (PULONG_PTR)&subproc,
+ &overlapped, INFINITE)) {
+ if (GetLastError() != ERROR_BROKEN_PIPE)
+ Win32Fatal("GetQueuedCompletionStatus");
+ }
+
+ if (!subproc) // A NULL subproc indicates that we were interrupted and is
+ // delivered by NotifyInterrupted above.
+ return true;
+
+ subproc->OnPipeReady();
+
+ if (subproc->Done()) {
+ vector<Subprocess*>::iterator end =
+ remove(running_.begin(), running_.end(), subproc);
+ if (running_.end() != end) {
+ finished_.push(subproc);
+ running_.resize(end - running_.begin());
+ }
+ }
+
+ return false;
+}
+
+Subprocess* SubprocessSet::NextFinished() {
+ if (finished_.empty())
+ return NULL;
+ Subprocess* subproc = finished_.front();
+ finished_.pop();
+ return subproc;
+}
+
+void SubprocessSet::Clear() {
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ++i) {
+ // Since the foreground process is in our process group, it will receive a
+ // CTRL_C_EVENT or CTRL_BREAK_EVENT at the same time as us.
+ if ((*i)->child_ && !(*i)->use_console_) {
+ if (!GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT,
+ GetProcessId((*i)->child_))) {
+ Win32Fatal("GenerateConsoleCtrlEvent");
+ }
+ }
+ }
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ++i)
+ delete *i;
+ running_.clear();
+}
diff --git a/src/subprocess.h b/src/subprocess.h
new file mode 100644
index 0000000..9e3d2ee
--- /dev/null
+++ b/src/subprocess.h
@@ -0,0 +1,113 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_SUBPROCESS_H_
+#define NINJA_SUBPROCESS_H_
+
+#include <string>
+#include <vector>
+#include <queue>
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <signal.h>
+#endif
+
+// ppoll() exists on FreeBSD, but only on newer versions.
+#ifdef __FreeBSD__
+# include <sys/param.h>
+# if defined USE_PPOLL && __FreeBSD_version < 1002000
+# undef USE_PPOLL
+# endif
+#endif
+
+#include "exit_status.h"
+
+/// Subprocess wraps a single async subprocess. It is entirely
+/// passive: it expects the caller to notify it when its fds are ready
+/// for reading, as well as call Finish() to reap the child once done()
+/// is true.
+struct Subprocess {
+ ~Subprocess();
+
+ /// Returns ExitSuccess on successful process exit, ExitInterrupted if
+ /// the process was interrupted, ExitFailure if it otherwise failed.
+ ExitStatus Finish();
+
+ bool Done() const;
+
+ const std::string& GetOutput() const;
+
+ private:
+ Subprocess(bool use_console);
+ bool Start(struct SubprocessSet* set, const std::string& command);
+ void OnPipeReady();
+
+ std::string buf_;
+
+#ifdef _WIN32
+ /// Set up pipe_ as the parent-side pipe of the subprocess; return the
+ /// other end of the pipe, usable in the child process.
+ HANDLE SetupPipe(HANDLE ioport);
+
+ HANDLE child_;
+ HANDLE pipe_;
+ OVERLAPPED overlapped_;
+ char overlapped_buf_[4 << 10];
+ bool is_reading_;
+#else
+ int fd_;
+ pid_t pid_;
+#endif
+ bool use_console_;
+
+ friend struct SubprocessSet;
+};
+
+/// SubprocessSet runs a ppoll/pselect() loop around a set of Subprocesses.
+/// DoWork() waits for any state change in subprocesses; finished_
+/// is a queue of subprocesses as they finish.
+struct SubprocessSet {
+ SubprocessSet();
+ ~SubprocessSet();
+
+ Subprocess* Add(const std::string& command, bool use_console = false);
+ bool DoWork();
+ Subprocess* NextFinished();
+ void Clear();
+
+ std::vector<Subprocess*> running_;
+ std::queue<Subprocess*> finished_;
+
+#ifdef _WIN32
+ static BOOL WINAPI NotifyInterrupted(DWORD dwCtrlType);
+ static HANDLE ioport_;
+#else
+ static void SetInterruptedFlag(int signum);
+ static void HandlePendingInterruption();
+ /// Store the signal number that causes the interruption.
+ /// 0 if not interruption.
+ static int interrupted_;
+
+ static bool IsInterrupted() { return interrupted_ != 0; }
+
+ struct sigaction old_int_act_;
+ struct sigaction old_term_act_;
+ struct sigaction old_hup_act_;
+ sigset_t old_mask_;
+#endif
+};
+
+#endif // NINJA_SUBPROCESS_H_
diff --git a/src/subprocess_test.cc b/src/subprocess_test.cc
new file mode 100644
index 0000000..073fe86
--- /dev/null
+++ b/src/subprocess_test.cc
@@ -0,0 +1,263 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "subprocess.h"
+
+#include "test.h"
+
+#ifndef _WIN32
+// SetWithLots need setrlimit.
+#include <stdio.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <unistd.h>
+#endif
+
+using namespace std;
+
+namespace {
+
+#ifdef _WIN32
+const char* kSimpleCommand = "cmd /c dir \\";
+#else
+const char* kSimpleCommand = "ls /";
+#endif
+
+struct SubprocessTest : public testing::Test {
+ SubprocessSet subprocs_;
+};
+
+} // anonymous namespace
+
+// Run a command that fails and emits to stderr.
+TEST_F(SubprocessTest, BadCommandStderr) {
+ Subprocess* subproc = subprocs_.Add("cmd /c ninja_no_such_command");
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ // Pretend we discovered that stderr was ready for writing.
+ subprocs_.DoWork();
+ }
+
+ EXPECT_EQ(ExitFailure, subproc->Finish());
+ EXPECT_NE("", subproc->GetOutput());
+}
+
+// Run a command that does not exist
+TEST_F(SubprocessTest, NoSuchCommand) {
+ Subprocess* subproc = subprocs_.Add("ninja_no_such_command");
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ // Pretend we discovered that stderr was ready for writing.
+ subprocs_.DoWork();
+ }
+
+ EXPECT_EQ(ExitFailure, subproc->Finish());
+ EXPECT_NE("", subproc->GetOutput());
+#ifdef _WIN32
+ ASSERT_EQ("CreateProcess failed: The system cannot find the file "
+ "specified.\n", subproc->GetOutput());
+#endif
+}
+
+#ifndef _WIN32
+
+TEST_F(SubprocessTest, InterruptChild) {
+ Subprocess* subproc = subprocs_.Add("kill -INT $$");
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ subprocs_.DoWork();
+ }
+
+ EXPECT_EQ(ExitInterrupted, subproc->Finish());
+}
+
+TEST_F(SubprocessTest, InterruptParent) {
+ Subprocess* subproc = subprocs_.Add("kill -INT $PPID ; sleep 1");
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ bool interrupted = subprocs_.DoWork();
+ if (interrupted)
+ return;
+ }
+
+ ASSERT_FALSE("We should have been interrupted");
+}
+
+TEST_F(SubprocessTest, InterruptChildWithSigTerm) {
+ Subprocess* subproc = subprocs_.Add("kill -TERM $$");
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ subprocs_.DoWork();
+ }
+
+ EXPECT_EQ(ExitInterrupted, subproc->Finish());
+}
+
+TEST_F(SubprocessTest, InterruptParentWithSigTerm) {
+ Subprocess* subproc = subprocs_.Add("kill -TERM $PPID ; sleep 1");
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ bool interrupted = subprocs_.DoWork();
+ if (interrupted)
+ return;
+ }
+
+ ASSERT_FALSE("We should have been interrupted");
+}
+
+TEST_F(SubprocessTest, InterruptChildWithSigHup) {
+ Subprocess* subproc = subprocs_.Add("kill -HUP $$");
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ subprocs_.DoWork();
+ }
+
+ EXPECT_EQ(ExitInterrupted, subproc->Finish());
+}
+
+TEST_F(SubprocessTest, InterruptParentWithSigHup) {
+ Subprocess* subproc = subprocs_.Add("kill -HUP $PPID ; sleep 1");
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ bool interrupted = subprocs_.DoWork();
+ if (interrupted)
+ return;
+ }
+
+ ASSERT_FALSE("We should have been interrupted");
+}
+
+TEST_F(SubprocessTest, Console) {
+ // Skip test if we don't have the console ourselves.
+ if (isatty(0) && isatty(1) && isatty(2)) {
+ Subprocess* subproc =
+ subprocs_.Add("test -t 0 -a -t 1 -a -t 2", /*use_console=*/true);
+ ASSERT_NE((Subprocess*)0, subproc);
+
+ while (!subproc->Done()) {
+ subprocs_.DoWork();
+ }
+
+ EXPECT_EQ(ExitSuccess, subproc->Finish());
+ }
+}
+
+#endif
+
+TEST_F(SubprocessTest, SetWithSingle) {
+ Subprocess* subproc = subprocs_.Add(kSimpleCommand);
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ subprocs_.DoWork();
+ }
+ ASSERT_EQ(ExitSuccess, subproc->Finish());
+ ASSERT_NE("", subproc->GetOutput());
+
+ ASSERT_EQ(1u, subprocs_.finished_.size());
+}
+
+TEST_F(SubprocessTest, SetWithMulti) {
+ Subprocess* processes[3];
+ const char* kCommands[3] = {
+ kSimpleCommand,
+#ifdef _WIN32
+ "cmd /c echo hi",
+ "cmd /c time /t",
+#else
+ "id -u",
+ "pwd",
+#endif
+ };
+
+ for (int i = 0; i < 3; ++i) {
+ processes[i] = subprocs_.Add(kCommands[i]);
+ ASSERT_NE((Subprocess *) 0, processes[i]);
+ }
+
+ ASSERT_EQ(3u, subprocs_.running_.size());
+ for (int i = 0; i < 3; ++i) {
+ ASSERT_FALSE(processes[i]->Done());
+ ASSERT_EQ("", processes[i]->GetOutput());
+ }
+
+ while (!processes[0]->Done() || !processes[1]->Done() ||
+ !processes[2]->Done()) {
+ ASSERT_GT(subprocs_.running_.size(), 0u);
+ subprocs_.DoWork();
+ }
+
+ ASSERT_EQ(0u, subprocs_.running_.size());
+ ASSERT_EQ(3u, subprocs_.finished_.size());
+
+ for (int i = 0; i < 3; ++i) {
+ ASSERT_EQ(ExitSuccess, processes[i]->Finish());
+ ASSERT_NE("", processes[i]->GetOutput());
+ delete processes[i];
+ }
+}
+
+#if defined(USE_PPOLL)
+TEST_F(SubprocessTest, SetWithLots) {
+ // Arbitrary big number; needs to be over 1024 to confirm we're no longer
+ // hostage to pselect.
+ const unsigned kNumProcs = 1025;
+
+ // Make sure [ulimit -n] isn't going to stop us from working.
+ rlimit rlim;
+ ASSERT_EQ(0, getrlimit(RLIMIT_NOFILE, &rlim));
+ if (rlim.rlim_cur < kNumProcs) {
+ printf("Raise [ulimit -n] above %u (currently %lu) to make this test go\n",
+ kNumProcs, rlim.rlim_cur);
+ return;
+ }
+
+ vector<Subprocess*> procs;
+ for (size_t i = 0; i < kNumProcs; ++i) {
+ Subprocess* subproc = subprocs_.Add("/bin/echo");
+ ASSERT_NE((Subprocess *) 0, subproc);
+ procs.push_back(subproc);
+ }
+ while (!subprocs_.running_.empty())
+ subprocs_.DoWork();
+ for (size_t i = 0; i < procs.size(); ++i) {
+ ASSERT_EQ(ExitSuccess, procs[i]->Finish());
+ ASSERT_NE("", procs[i]->GetOutput());
+ }
+ ASSERT_EQ(kNumProcs, subprocs_.finished_.size());
+}
+#endif // !__APPLE__ && !_WIN32
+
+// TODO: this test could work on Windows, just not sure how to simply
+// read stdin.
+#ifndef _WIN32
+// Verify that a command that attempts to read stdin correctly thinks
+// that stdin is closed.
+TEST_F(SubprocessTest, ReadStdin) {
+ Subprocess* subproc = subprocs_.Add("cat -");
+ while (!subproc->Done()) {
+ subprocs_.DoWork();
+ }
+ ASSERT_EQ(ExitSuccess, subproc->Finish());
+ ASSERT_EQ(1u, subprocs_.finished_.size());
+}
+#endif // _WIN32
diff --git a/src/test.cc b/src/test.cc
new file mode 100644
index 0000000..11b1c9e
--- /dev/null
+++ b/src/test.cc
@@ -0,0 +1,237 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifdef _WIN32
+#include <direct.h> // Has to be before util.h is included.
+#endif
+
+#include "test.h"
+
+#include <algorithm>
+
+#include <errno.h>
+#include <stdlib.h>
+#ifdef _WIN32
+#include <windows.h>
+#include <io.h>
+#else
+#include <unistd.h>
+#endif
+
+#include "build_log.h"
+#include "graph.h"
+#include "manifest_parser.h"
+#include "util.h"
+
+#ifdef _AIX
+extern "C" {
+ // GCC "helpfully" strips the definition of mkdtemp out on AIX.
+ // The function is still present, so if we define it ourselves
+ // it will work perfectly fine.
+ extern char* mkdtemp(char* name_template);
+}
+#endif
+
+using namespace std;
+
+namespace {
+
+#ifdef _WIN32
+/// Windows has no mkdtemp. Implement it in terms of _mktemp_s.
+char* mkdtemp(char* name_template) {
+ int err = _mktemp_s(name_template, strlen(name_template) + 1);
+ if (err < 0) {
+ perror("_mktemp_s");
+ return NULL;
+ }
+
+ err = _mkdir(name_template);
+ if (err < 0) {
+ perror("mkdir");
+ return NULL;
+ }
+
+ return name_template;
+}
+#endif // _WIN32
+
+string GetSystemTempDir() {
+#ifdef _WIN32
+ char buf[1024];
+ if (!GetTempPath(sizeof(buf), buf))
+ return "";
+ return buf;
+#else
+ const char* tempdir = getenv("TMPDIR");
+ if (tempdir)
+ return tempdir;
+ return "/tmp";
+#endif
+}
+
+} // anonymous namespace
+
+StateTestWithBuiltinRules::StateTestWithBuiltinRules() {
+ AddCatRule(&state_);
+}
+
+void StateTestWithBuiltinRules::AddCatRule(State* state) {
+ AssertParse(state,
+"rule cat\n"
+" command = cat $in > $out\n");
+}
+
+Node* StateTestWithBuiltinRules::GetNode(const string& path) {
+ EXPECT_FALSE(strpbrk(path.c_str(), "/\\"));
+ return state_.GetNode(path, 0);
+}
+
+void AssertParse(State* state, const char* input,
+ ManifestParserOptions opts) {
+ ManifestParser parser(state, NULL, opts);
+ string err;
+ EXPECT_TRUE(parser.ParseTest(input, &err));
+ ASSERT_EQ("", err);
+ VerifyGraph(*state);
+}
+
+void AssertHash(const char* expected, uint64_t actual) {
+ ASSERT_EQ(BuildLog::LogEntry::HashCommand(expected), actual);
+}
+
+void VerifyGraph(const State& state) {
+ for (vector<Edge*>::const_iterator e = state.edges_.begin();
+ e != state.edges_.end(); ++e) {
+ // All edges need at least one output.
+ EXPECT_FALSE((*e)->outputs_.empty());
+ // Check that the edge's inputs have the edge as out-edge.
+ for (vector<Node*>::const_iterator in_node = (*e)->inputs_.begin();
+ in_node != (*e)->inputs_.end(); ++in_node) {
+ const vector<Edge*>& out_edges = (*in_node)->out_edges();
+ EXPECT_NE(find(out_edges.begin(), out_edges.end(), *e),
+ out_edges.end());
+ }
+ // Check that the edge's outputs have the edge as in-edge.
+ for (vector<Node*>::const_iterator out_node = (*e)->outputs_.begin();
+ out_node != (*e)->outputs_.end(); ++out_node) {
+ EXPECT_EQ((*out_node)->in_edge(), *e);
+ }
+ }
+
+ // The union of all in- and out-edges of each nodes should be exactly edges_.
+ set<const Edge*> node_edge_set;
+ for (State::Paths::const_iterator p = state.paths_.begin();
+ p != state.paths_.end(); ++p) {
+ const Node* n = p->second;
+ if (n->in_edge())
+ node_edge_set.insert(n->in_edge());
+ node_edge_set.insert(n->out_edges().begin(), n->out_edges().end());
+ }
+ set<const Edge*> edge_set(state.edges_.begin(), state.edges_.end());
+ EXPECT_EQ(node_edge_set, edge_set);
+}
+
+void VirtualFileSystem::Create(const string& path,
+ const string& contents) {
+ files_[path].mtime = now_;
+ files_[path].contents = contents;
+ files_created_.insert(path);
+}
+
+TimeStamp VirtualFileSystem::Stat(const string& path, string* err) const {
+ FileMap::const_iterator i = files_.find(path);
+ if (i != files_.end()) {
+ *err = i->second.stat_error;
+ return i->second.mtime;
+ }
+ return 0;
+}
+
+bool VirtualFileSystem::WriteFile(const string& path, const string& contents) {
+ Create(path, contents);
+ return true;
+}
+
+bool VirtualFileSystem::MakeDir(const string& path) {
+ directories_made_.push_back(path);
+ return true; // success
+}
+
+FileReader::Status VirtualFileSystem::ReadFile(const string& path,
+ string* contents,
+ string* err) {
+ files_read_.push_back(path);
+ FileMap::iterator i = files_.find(path);
+ if (i != files_.end()) {
+ *contents = i->second.contents;
+ return Okay;
+ }
+ *err = strerror(ENOENT);
+ return NotFound;
+}
+
+int VirtualFileSystem::RemoveFile(const string& path) {
+ if (find(directories_made_.begin(), directories_made_.end(), path)
+ != directories_made_.end())
+ return -1;
+ FileMap::iterator i = files_.find(path);
+ if (i != files_.end()) {
+ files_.erase(i);
+ files_removed_.insert(path);
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+void ScopedTempDir::CreateAndEnter(const string& name) {
+ // First change into the system temp dir and save it for cleanup.
+ start_dir_ = GetSystemTempDir();
+ if (start_dir_.empty())
+ Fatal("couldn't get system temp dir");
+ if (chdir(start_dir_.c_str()) < 0)
+ Fatal("chdir: %s", strerror(errno));
+
+ // Create a temporary subdirectory of that.
+ char name_template[1024];
+ strcpy(name_template, name.c_str());
+ strcat(name_template, "-XXXXXX");
+ char* tempname = mkdtemp(name_template);
+ if (!tempname)
+ Fatal("mkdtemp: %s", strerror(errno));
+ temp_dir_name_ = tempname;
+
+ // chdir into the new temporary directory.
+ if (chdir(temp_dir_name_.c_str()) < 0)
+ Fatal("chdir: %s", strerror(errno));
+}
+
+void ScopedTempDir::Cleanup() {
+ if (temp_dir_name_.empty())
+ return; // Something went wrong earlier.
+
+ // Move out of the directory we're about to clobber.
+ if (chdir(start_dir_.c_str()) < 0)
+ Fatal("chdir: %s", strerror(errno));
+
+#ifdef _WIN32
+ string command = "rmdir /s /q " + temp_dir_name_;
+#else
+ string command = "rm -rf " + temp_dir_name_;
+#endif
+ if (system(command.c_str()) < 0)
+ Fatal("system: %s", strerror(errno));
+
+ temp_dir_name_.clear();
+}
diff --git a/src/test.h b/src/test.h
new file mode 100644
index 0000000..4552c34
--- /dev/null
+++ b/src/test.h
@@ -0,0 +1,185 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_TEST_H_
+#define NINJA_TEST_H_
+
+#include "disk_interface.h"
+#include "manifest_parser.h"
+#include "state.h"
+#include "util.h"
+
+// A tiny testing framework inspired by googletest, but much simpler and
+// faster to compile. It supports most things commonly used from googltest. The
+// most noticeable things missing: EXPECT_* and ASSERT_* don't support
+// streaming notes to them with operator<<, and for failing tests the lhs and
+// rhs are not printed. That's so that this header does not have to include
+// sstream, which slows down building ninja_test almost 20%.
+namespace testing {
+class Test {
+ bool failed_;
+ int assertion_failures_;
+ public:
+ Test() : failed_(false), assertion_failures_(0) {}
+ virtual ~Test() {}
+ virtual void SetUp() {}
+ virtual void TearDown() {}
+ virtual void Run() = 0;
+
+ bool Failed() const { return failed_; }
+ int AssertionFailures() const { return assertion_failures_; }
+ void AddAssertionFailure() { assertion_failures_++; }
+ bool Check(bool condition, const char* file, int line, const char* error);
+};
+}
+
+void RegisterTest(testing::Test* (*)(), const char*);
+
+extern testing::Test* g_current_test;
+#define TEST_F_(x, y, name) \
+ struct y : public x { \
+ static testing::Test* Create() { return g_current_test = new y; } \
+ virtual void Run(); \
+ }; \
+ struct Register##y { \
+ Register##y() { RegisterTest(y::Create, name); } \
+ }; \
+ Register##y g_register_##y; \
+ void y::Run()
+
+#define TEST_F(x, y) TEST_F_(x, x##y, #x "." #y)
+#define TEST(x, y) TEST_F_(testing::Test, x##y, #x "." #y)
+
+#define EXPECT_EQ(a, b) \
+ g_current_test->Check(a == b, __FILE__, __LINE__, #a " == " #b)
+#define EXPECT_NE(a, b) \
+ g_current_test->Check(a != b, __FILE__, __LINE__, #a " != " #b)
+#define EXPECT_GT(a, b) \
+ g_current_test->Check(a > b, __FILE__, __LINE__, #a " > " #b)
+#define EXPECT_LT(a, b) \
+ g_current_test->Check(a < b, __FILE__, __LINE__, #a " < " #b)
+#define EXPECT_GE(a, b) \
+ g_current_test->Check(a >= b, __FILE__, __LINE__, #a " >= " #b)
+#define EXPECT_LE(a, b) \
+ g_current_test->Check(a <= b, __FILE__, __LINE__, #a " <= " #b)
+#define EXPECT_TRUE(a) \
+ g_current_test->Check(static_cast<bool>(a), __FILE__, __LINE__, #a)
+#define EXPECT_FALSE(a) \
+ g_current_test->Check(!static_cast<bool>(a), __FILE__, __LINE__, #a)
+
+#define ASSERT_EQ(a, b) \
+ if (!EXPECT_EQ(a, b)) { g_current_test->AddAssertionFailure(); return; }
+#define ASSERT_NE(a, b) \
+ if (!EXPECT_NE(a, b)) { g_current_test->AddAssertionFailure(); return; }
+#define ASSERT_GT(a, b) \
+ if (!EXPECT_GT(a, b)) { g_current_test->AddAssertionFailure(); return; }
+#define ASSERT_LT(a, b) \
+ if (!EXPECT_LT(a, b)) { g_current_test->AddAssertionFailure(); return; }
+#define ASSERT_GE(a, b) \
+ if (!EXPECT_GE(a, b)) { g_current_test->AddAssertionFailure(); return; }
+#define ASSERT_LE(a, b) \
+ if (!EXPECT_LE(a, b)) { g_current_test->AddAssertionFailure(); return; }
+#define ASSERT_TRUE(a) \
+ if (!EXPECT_TRUE(a)) { g_current_test->AddAssertionFailure(); return; }
+#define ASSERT_FALSE(a) \
+ if (!EXPECT_FALSE(a)) { g_current_test->AddAssertionFailure(); return; }
+#define ASSERT_NO_FATAL_FAILURE(a) \
+ { \
+ int fail_count = g_current_test->AssertionFailures(); \
+ a; \
+ if (fail_count != g_current_test->AssertionFailures()) { \
+ g_current_test->AddAssertionFailure(); \
+ return; \
+ } \
+ }
+
+// Support utilities for tests.
+
+struct Node;
+
+/// A base test fixture that includes a State object with a
+/// builtin "cat" rule.
+struct StateTestWithBuiltinRules : public testing::Test {
+ StateTestWithBuiltinRules();
+
+ /// Add a "cat" rule to \a state. Used by some tests; it's
+ /// otherwise done by the ctor to state_.
+ void AddCatRule(State* state);
+
+ /// Short way to get a Node by its path from state_.
+ Node* GetNode(const std::string& path);
+
+ State state_;
+};
+
+void AssertParse(State* state, const char* input,
+ ManifestParserOptions = ManifestParserOptions());
+void AssertHash(const char* expected, uint64_t actual);
+void VerifyGraph(const State& state);
+
+/// An implementation of DiskInterface that uses an in-memory representation
+/// of disk state. It also logs file accesses and directory creations
+/// so it can be used by tests to verify disk access patterns.
+struct VirtualFileSystem : public DiskInterface {
+ VirtualFileSystem() : now_(1) {}
+
+ /// "Create" a file with contents.
+ void Create(const std::string& path, const std::string& contents);
+
+ /// Tick "time" forwards; subsequent file operations will be newer than
+ /// previous ones.
+ int Tick() {
+ return ++now_;
+ }
+
+ // DiskInterface
+ virtual TimeStamp Stat(const std::string& path, std::string* err) const;
+ virtual bool WriteFile(const std::string& path, const std::string& contents);
+ virtual bool MakeDir(const std::string& path);
+ virtual Status ReadFile(const std::string& path, std::string* contents,
+ std::string* err);
+ virtual int RemoveFile(const std::string& path);
+
+ /// An entry for a single in-memory file.
+ struct Entry {
+ int mtime;
+ std::string stat_error; // If mtime is -1.
+ std::string contents;
+ };
+
+ std::vector<std::string> directories_made_;
+ std::vector<std::string> files_read_;
+ typedef std::map<std::string, Entry> FileMap;
+ FileMap files_;
+ std::set<std::string> files_removed_;
+ std::set<std::string> files_created_;
+
+ /// A simple fake timestamp for file operations.
+ int now_;
+};
+
+struct ScopedTempDir {
+ /// Create a temporary directory and chdir into it.
+ void CreateAndEnter(const std::string& name);
+
+ /// Clean up the temporary directory.
+ void Cleanup();
+
+ /// The temp directory containing our dir.
+ std::string start_dir_;
+ /// The subdirectory name for our dir, or empty if it hasn't been set up.
+ std::string temp_dir_name_;
+};
+
+#endif // NINJA_TEST_H_
diff --git a/src/timestamp.h b/src/timestamp.h
new file mode 100644
index 0000000..6a7ccd0
--- /dev/null
+++ b/src/timestamp.h
@@ -0,0 +1,33 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_TIMESTAMP_H_
+#define NINJA_TIMESTAMP_H_
+
+#ifdef _WIN32
+#include "win32port.h"
+#else
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#include <inttypes.h>
+#endif
+
+// When considering file modification times we only care to compare
+// them against one another -- we never convert them to an absolute
+// real time. On POSIX we use timespec (seconds&nanoseconds since epoch)
+// and on Windows we use a different value. Both fit in an int64.
+typedef int64_t TimeStamp;
+
+#endif // NINJA_TIMESTAMP_H_
diff --git a/src/util.cc b/src/util.cc
new file mode 100644
index 0000000..c76f730
--- /dev/null
+++ b/src/util.cc
@@ -0,0 +1,634 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "util.h"
+
+#ifdef __CYGWIN__
+#include <windows.h>
+#include <io.h>
+#elif defined( _WIN32)
+#include <windows.h>
+#include <io.h>
+#include <share.h>
+#endif
+
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#ifndef _WIN32
+#include <unistd.h>
+#include <sys/time.h>
+#endif
+
+#include <vector>
+
+#if defined(__APPLE__) || defined(__FreeBSD__)
+#include <sys/sysctl.h>
+#elif defined(__SVR4) && defined(__sun)
+#include <unistd.h>
+#include <sys/loadavg.h>
+#elif defined(_AIX) && !defined(__PASE__)
+#include <libperfstat.h>
+#elif defined(linux) || defined(__GLIBC__)
+#include <sys/sysinfo.h>
+#endif
+
+#include "edit_distance.h"
+#include "metrics.h"
+
+using namespace std;
+
+void Fatal(const char* msg, ...) {
+ va_list ap;
+ fprintf(stderr, "ninja: fatal: ");
+ va_start(ap, msg);
+ vfprintf(stderr, msg, ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+#ifdef _WIN32
+ // On Windows, some tools may inject extra threads.
+ // exit() may block on locks held by those threads, so forcibly exit.
+ fflush(stderr);
+ fflush(stdout);
+ ExitProcess(1);
+#else
+ exit(1);
+#endif
+}
+
+void Warning(const char* msg, ...) {
+ va_list ap;
+ fprintf(stderr, "ninja: warning: ");
+ va_start(ap, msg);
+ vfprintf(stderr, msg, ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+}
+
+void Error(const char* msg, ...) {
+ va_list ap;
+ fprintf(stderr, "ninja: error: ");
+ va_start(ap, msg);
+ vfprintf(stderr, msg, ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+}
+
+bool CanonicalizePath(string* path, uint64_t* slash_bits, string* err) {
+ METRIC_RECORD("canonicalize str");
+ size_t len = path->size();
+ char* str = 0;
+ if (len > 0)
+ str = &(*path)[0];
+ if (!CanonicalizePath(str, &len, slash_bits, err))
+ return false;
+ path->resize(len);
+ return true;
+}
+
+static bool IsPathSeparator(char c) {
+#ifdef _WIN32
+ return c == '/' || c == '\\';
+#else
+ return c == '/';
+#endif
+}
+
+bool CanonicalizePath(char* path, size_t* len, uint64_t* slash_bits,
+ string* err) {
+ // WARNING: this function is performance-critical; please benchmark
+ // any changes you make to it.
+ METRIC_RECORD("canonicalize path");
+ if (*len == 0) {
+ *err = "empty path";
+ return false;
+ }
+
+ const int kMaxPathComponents = 60;
+ char* components[kMaxPathComponents];
+ int component_count = 0;
+
+ char* start = path;
+ char* dst = start;
+ const char* src = start;
+ const char* end = start + *len;
+
+ if (IsPathSeparator(*src)) {
+#ifdef _WIN32
+
+ // network path starts with //
+ if (*len > 1 && IsPathSeparator(*(src + 1))) {
+ src += 2;
+ dst += 2;
+ } else {
+ ++src;
+ ++dst;
+ }
+#else
+ ++src;
+ ++dst;
+#endif
+ }
+
+ while (src < end) {
+ if (*src == '.') {
+ if (src + 1 == end || IsPathSeparator(src[1])) {
+ // '.' component; eliminate.
+ src += 2;
+ continue;
+ } else if (src[1] == '.' && (src + 2 == end || IsPathSeparator(src[2]))) {
+ // '..' component. Back up if possible.
+ if (component_count > 0) {
+ dst = components[component_count - 1];
+ src += 3;
+ --component_count;
+ } else {
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst++ = *src++;
+ }
+ continue;
+ }
+ }
+
+ if (IsPathSeparator(*src)) {
+ src++;
+ continue;
+ }
+
+ if (component_count == kMaxPathComponents)
+ Fatal("path has too many components : %s", path);
+ components[component_count] = dst;
+ ++component_count;
+
+ while (src != end && !IsPathSeparator(*src))
+ *dst++ = *src++;
+ *dst++ = *src++; // Copy '/' or final \0 character as well.
+ }
+
+ if (dst == start) {
+ *dst++ = '.';
+ *dst++ = '\0';
+ }
+
+ *len = dst - start - 1;
+#ifdef _WIN32
+ uint64_t bits = 0;
+ uint64_t bits_mask = 1;
+
+ for (char* c = start; c < start + *len; ++c) {
+ switch (*c) {
+ case '\\':
+ bits |= bits_mask;
+ *c = '/';
+ NINJA_FALLTHROUGH;
+ case '/':
+ bits_mask <<= 1;
+ }
+ }
+
+ *slash_bits = bits;
+#else
+ *slash_bits = 0;
+#endif
+ return true;
+}
+
+static inline bool IsKnownShellSafeCharacter(char ch) {
+ if ('A' <= ch && ch <= 'Z') return true;
+ if ('a' <= ch && ch <= 'z') return true;
+ if ('0' <= ch && ch <= '9') return true;
+
+ switch (ch) {
+ case '_':
+ case '+':
+ case '-':
+ case '.':
+ case '/':
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool IsKnownWin32SafeCharacter(char ch) {
+ switch (ch) {
+ case ' ':
+ case '"':
+ return false;
+ default:
+ return true;
+ }
+}
+
+static inline bool StringNeedsShellEscaping(const string& input) {
+ for (size_t i = 0; i < input.size(); ++i) {
+ if (!IsKnownShellSafeCharacter(input[i])) return true;
+ }
+ return false;
+}
+
+static inline bool StringNeedsWin32Escaping(const string& input) {
+ for (size_t i = 0; i < input.size(); ++i) {
+ if (!IsKnownWin32SafeCharacter(input[i])) return true;
+ }
+ return false;
+}
+
+void GetShellEscapedString(const string& input, string* result) {
+ assert(result);
+
+ if (!StringNeedsShellEscaping(input)) {
+ result->append(input);
+ return;
+ }
+
+ const char kQuote = '\'';
+ const char kEscapeSequence[] = "'\\'";
+
+ result->push_back(kQuote);
+
+ string::const_iterator span_begin = input.begin();
+ for (string::const_iterator it = input.begin(), end = input.end(); it != end;
+ ++it) {
+ if (*it == kQuote) {
+ result->append(span_begin, it);
+ result->append(kEscapeSequence);
+ span_begin = it;
+ }
+ }
+ result->append(span_begin, input.end());
+ result->push_back(kQuote);
+}
+
+
+void GetWin32EscapedString(const string& input, string* result) {
+ assert(result);
+ if (!StringNeedsWin32Escaping(input)) {
+ result->append(input);
+ return;
+ }
+
+ const char kQuote = '"';
+ const char kBackslash = '\\';
+
+ result->push_back(kQuote);
+ size_t consecutive_backslash_count = 0;
+ string::const_iterator span_begin = input.begin();
+ for (string::const_iterator it = input.begin(), end = input.end(); it != end;
+ ++it) {
+ switch (*it) {
+ case kBackslash:
+ ++consecutive_backslash_count;
+ break;
+ case kQuote:
+ result->append(span_begin, it);
+ result->append(consecutive_backslash_count + 1, kBackslash);
+ span_begin = it;
+ consecutive_backslash_count = 0;
+ break;
+ default:
+ consecutive_backslash_count = 0;
+ break;
+ }
+ }
+ result->append(span_begin, input.end());
+ result->append(consecutive_backslash_count, kBackslash);
+ result->push_back(kQuote);
+}
+
+int ReadFile(const string& path, string* contents, string* err) {
+#ifdef _WIN32
+ // This makes a ninja run on a set of 1500 manifest files about 4% faster
+ // than using the generic fopen code below.
+ err->clear();
+ HANDLE f = ::CreateFileA(path.c_str(), GENERIC_READ, FILE_SHARE_READ, NULL,
+ OPEN_EXISTING, FILE_FLAG_SEQUENTIAL_SCAN, NULL);
+ if (f == INVALID_HANDLE_VALUE) {
+ err->assign(GetLastErrorString());
+ return -ENOENT;
+ }
+
+ for (;;) {
+ DWORD len;
+ char buf[64 << 10];
+ if (!::ReadFile(f, buf, sizeof(buf), &len, NULL)) {
+ err->assign(GetLastErrorString());
+ contents->clear();
+ return -1;
+ }
+ if (len == 0)
+ break;
+ contents->append(buf, len);
+ }
+ ::CloseHandle(f);
+ return 0;
+#else
+ FILE* f = fopen(path.c_str(), "rb");
+ if (!f) {
+ err->assign(strerror(errno));
+ return -errno;
+ }
+
+ struct stat st;
+ if (fstat(fileno(f), &st) < 0) {
+ err->assign(strerror(errno));
+ fclose(f);
+ return -errno;
+ }
+
+ // +1 is for the resize in ManifestParser::Load
+ contents->reserve(st.st_size + 1);
+
+ char buf[64 << 10];
+ size_t len;
+ while (!feof(f) && (len = fread(buf, 1, sizeof(buf), f)) > 0) {
+ contents->append(buf, len);
+ }
+ if (ferror(f)) {
+ err->assign(strerror(errno)); // XXX errno?
+ contents->clear();
+ fclose(f);
+ return -errno;
+ }
+ fclose(f);
+ return 0;
+#endif
+}
+
+void SetCloseOnExec(int fd) {
+#ifndef _WIN32
+ int flags = fcntl(fd, F_GETFD);
+ if (flags < 0) {
+ perror("fcntl(F_GETFD)");
+ } else {
+ if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0)
+ perror("fcntl(F_SETFD)");
+ }
+#else
+ HANDLE hd = (HANDLE) _get_osfhandle(fd);
+ if (! SetHandleInformation(hd, HANDLE_FLAG_INHERIT, 0)) {
+ fprintf(stderr, "SetHandleInformation(): %s", GetLastErrorString().c_str());
+ }
+#endif // ! _WIN32
+}
+
+
+const char* SpellcheckStringV(const string& text,
+ const vector<const char*>& words) {
+ const bool kAllowReplacements = true;
+ const int kMaxValidEditDistance = 3;
+
+ int min_distance = kMaxValidEditDistance + 1;
+ const char* result = NULL;
+ for (vector<const char*>::const_iterator i = words.begin();
+ i != words.end(); ++i) {
+ int distance = EditDistance(*i, text, kAllowReplacements,
+ kMaxValidEditDistance);
+ if (distance < min_distance) {
+ min_distance = distance;
+ result = *i;
+ }
+ }
+ return result;
+}
+
+const char* SpellcheckString(const char* text, ...) {
+ // Note: This takes a const char* instead of a string& because using
+ // va_start() with a reference parameter is undefined behavior.
+ va_list ap;
+ va_start(ap, text);
+ vector<const char*> words;
+ const char* word;
+ while ((word = va_arg(ap, const char*)))
+ words.push_back(word);
+ va_end(ap);
+ return SpellcheckStringV(text, words);
+}
+
+#ifdef _WIN32
+string GetLastErrorString() {
+ DWORD err = GetLastError();
+
+ char* msg_buf;
+ FormatMessageA(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ err,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (char*)&msg_buf,
+ 0,
+ NULL);
+ string msg = msg_buf;
+ LocalFree(msg_buf);
+ return msg;
+}
+
+void Win32Fatal(const char* function, const char* hint) {
+ if (hint) {
+ Fatal("%s: %s (%s)", function, GetLastErrorString().c_str(), hint);
+ } else {
+ Fatal("%s: %s", function, GetLastErrorString().c_str());
+ }
+}
+#endif
+
+bool islatinalpha(int c) {
+ // isalpha() is locale-dependent.
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
+}
+
+string StripAnsiEscapeCodes(const string& in) {
+ string stripped;
+ stripped.reserve(in.size());
+
+ for (size_t i = 0; i < in.size(); ++i) {
+ if (in[i] != '\33') {
+ // Not an escape code.
+ stripped.push_back(in[i]);
+ continue;
+ }
+
+ // Only strip CSIs for now.
+ if (i + 1 >= in.size()) break;
+ if (in[i + 1] != '[') continue; // Not a CSI.
+ i += 2;
+
+ // Skip everything up to and including the next [a-zA-Z].
+ while (i < in.size() && !islatinalpha(in[i]))
+ ++i;
+ }
+ return stripped;
+}
+
+int GetProcessorCount() {
+#ifdef _WIN32
+ return GetActiveProcessorCount(ALL_PROCESSOR_GROUPS);
+#else
+#ifdef CPU_COUNT
+ // The number of exposed processors might not represent the actual number of
+ // processors threads can run on. This happens when a CPU set limitation is
+ // active, see https://github.com/ninja-build/ninja/issues/1278
+ cpu_set_t set;
+ if (sched_getaffinity(getpid(), sizeof(set), &set) == 0) {
+ return CPU_COUNT(&set);
+ }
+#endif
+ return sysconf(_SC_NPROCESSORS_ONLN);
+#endif
+}
+
+#if defined(_WIN32) || defined(__CYGWIN__)
+static double CalculateProcessorLoad(uint64_t idle_ticks, uint64_t total_ticks)
+{
+ static uint64_t previous_idle_ticks = 0;
+ static uint64_t previous_total_ticks = 0;
+ static double previous_load = -0.0;
+
+ uint64_t idle_ticks_since_last_time = idle_ticks - previous_idle_ticks;
+ uint64_t total_ticks_since_last_time = total_ticks - previous_total_ticks;
+
+ bool first_call = (previous_total_ticks == 0);
+ bool ticks_not_updated_since_last_call = (total_ticks_since_last_time == 0);
+
+ double load;
+ if (first_call || ticks_not_updated_since_last_call) {
+ load = previous_load;
+ } else {
+ // Calculate load.
+ double idle_to_total_ratio =
+ ((double)idle_ticks_since_last_time) / total_ticks_since_last_time;
+ double load_since_last_call = 1.0 - idle_to_total_ratio;
+
+ // Filter/smooth result when possible.
+ if(previous_load > 0) {
+ load = 0.9 * previous_load + 0.1 * load_since_last_call;
+ } else {
+ load = load_since_last_call;
+ }
+ }
+
+ previous_load = load;
+ previous_total_ticks = total_ticks;
+ previous_idle_ticks = idle_ticks;
+
+ return load;
+}
+
+static uint64_t FileTimeToTickCount(const FILETIME & ft)
+{
+ uint64_t high = (((uint64_t)(ft.dwHighDateTime)) << 32);
+ uint64_t low = ft.dwLowDateTime;
+ return (high | low);
+}
+
+double GetLoadAverage() {
+ FILETIME idle_time, kernel_time, user_time;
+ BOOL get_system_time_succeeded =
+ GetSystemTimes(&idle_time, &kernel_time, &user_time);
+
+ double posix_compatible_load;
+ if (get_system_time_succeeded) {
+ uint64_t idle_ticks = FileTimeToTickCount(idle_time);
+
+ // kernel_time from GetSystemTimes already includes idle_time.
+ uint64_t total_ticks =
+ FileTimeToTickCount(kernel_time) + FileTimeToTickCount(user_time);
+
+ double processor_load = CalculateProcessorLoad(idle_ticks, total_ticks);
+ posix_compatible_load = processor_load * GetProcessorCount();
+
+ } else {
+ posix_compatible_load = -0.0;
+ }
+
+ return posix_compatible_load;
+}
+#elif defined(__PASE__)
+double GetLoadAverage() {
+ return -0.0f;
+}
+#elif defined(_AIX)
+double GetLoadAverage() {
+ perfstat_cpu_total_t cpu_stats;
+ if (perfstat_cpu_total(NULL, &cpu_stats, sizeof(cpu_stats), 1) < 0) {
+ return -0.0f;
+ }
+
+ // Calculation taken from comment in libperfstats.h
+ return double(cpu_stats.loadavg[0]) / double(1 << SBITS);
+}
+#elif defined(__UCLIBC__) || (defined(__BIONIC__) && __ANDROID_API__ < 29)
+double GetLoadAverage() {
+ struct sysinfo si;
+ if (sysinfo(&si) != 0)
+ return -0.0f;
+ return 1.0 / (1 << SI_LOAD_SHIFT) * si.loads[0];
+}
+#else
+double GetLoadAverage() {
+ double loadavg[3] = { 0.0f, 0.0f, 0.0f };
+ if (getloadavg(loadavg, 3) < 0) {
+ // Maybe we should return an error here or the availability of
+ // getloadavg(3) should be checked when ninja is configured.
+ return -0.0f;
+ }
+ return loadavg[0];
+}
+#endif // _WIN32
+
+string ElideMiddle(const string& str, size_t width) {
+ switch (width) {
+ case 0: return "";
+ case 1: return ".";
+ case 2: return "..";
+ case 3: return "...";
+ }
+ const int kMargin = 3; // Space for "...".
+ string result = str;
+ if (result.size() > width) {
+ size_t elide_size = (width - kMargin) / 2;
+ result = result.substr(0, elide_size)
+ + "..."
+ + result.substr(result.size() - elide_size, elide_size);
+ }
+ return result;
+}
+
+bool Truncate(const string& path, size_t size, string* err) {
+#ifdef _WIN32
+ int fh = _sopen(path.c_str(), _O_RDWR | _O_CREAT, _SH_DENYNO,
+ _S_IREAD | _S_IWRITE);
+ int success = _chsize(fh, size);
+ _close(fh);
+#else
+ int success = truncate(path.c_str(), size);
+#endif
+ // Both truncate() and _chsize() return 0 on success and set errno and return
+ // -1 on failure.
+ if (success < 0) {
+ *err = strerror(errno);
+ return false;
+ }
+ return true;
+}
diff --git a/src/util.h b/src/util.h
new file mode 100644
index 0000000..4e6ebb8
--- /dev/null
+++ b/src/util.h
@@ -0,0 +1,125 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_UTIL_H_
+#define NINJA_UTIL_H_
+
+#ifdef _WIN32
+#include "win32port.h"
+#else
+#include <stdint.h>
+#endif
+
+#include <string>
+#include <vector>
+
+#ifdef _MSC_VER
+#define NORETURN __declspec(noreturn)
+#else
+#define NORETURN __attribute__((noreturn))
+#endif
+
+/// Log a fatal message and exit.
+NORETURN void Fatal(const char* msg, ...);
+
+// Have a generic fall-through for different versions of C/C++.
+#if defined(__cplusplus) && __cplusplus >= 201703L
+#define NINJA_FALLTHROUGH [[fallthrough]]
+#elif defined(__cplusplus) && __cplusplus >= 201103L && defined(__clang__)
+#define NINJA_FALLTHROUGH [[clang::fallthrough]]
+#elif defined(__cplusplus) && __cplusplus >= 201103L && defined(__GNUC__) && \
+ __GNUC__ >= 7
+#define NINJA_FALLTHROUGH [[gnu::fallthrough]]
+#elif defined(__GNUC__) && __GNUC__ >= 7 // gcc 7
+#define NINJA_FALLTHROUGH __attribute__ ((fallthrough))
+#else // C++11 on gcc 6, and all other cases
+#define NINJA_FALLTHROUGH
+#endif
+
+/// Log a warning message.
+void Warning(const char* msg, ...);
+
+/// Log an error message.
+void Error(const char* msg, ...);
+
+/// Canonicalize a path like "foo/../bar.h" into just "bar.h".
+/// |slash_bits| has bits set starting from lowest for a backslash that was
+/// normalized to a forward slash. (only used on Windows)
+bool CanonicalizePath(std::string* path, uint64_t* slash_bits,
+ std::string* err);
+bool CanonicalizePath(char* path, size_t* len, uint64_t* slash_bits,
+ std::string* err);
+
+/// Appends |input| to |*result|, escaping according to the whims of either
+/// Bash, or Win32's CommandLineToArgvW().
+/// Appends the string directly to |result| without modification if we can
+/// determine that it contains no problematic characters.
+void GetShellEscapedString(const std::string& input, std::string* result);
+void GetWin32EscapedString(const std::string& input, std::string* result);
+
+/// Read a file to a string (in text mode: with CRLF conversion
+/// on Windows).
+/// Returns -errno and fills in \a err on error.
+int ReadFile(const std::string& path, std::string* contents, std::string* err);
+
+/// Mark a file descriptor to not be inherited on exec()s.
+void SetCloseOnExec(int fd);
+
+/// Given a misspelled string and a list of correct spellings, returns
+/// the closest match or NULL if there is no close enough match.
+const char* SpellcheckStringV(const std::string& text,
+ const std::vector<const char*>& words);
+
+/// Like SpellcheckStringV, but takes a NULL-terminated list.
+const char* SpellcheckString(const char* text, ...);
+
+bool islatinalpha(int c);
+
+/// Removes all Ansi escape codes (http://www.termsys.demon.co.uk/vtansi.htm).
+std::string StripAnsiEscapeCodes(const std::string& in);
+
+/// @return the number of processors on the machine. Useful for an initial
+/// guess for how many jobs to run in parallel. @return 0 on error.
+int GetProcessorCount();
+
+/// @return the load average of the machine. A negative value is returned
+/// on error.
+double GetLoadAverage();
+
+/// Elide the given string @a str with '...' in the middle if the length
+/// exceeds @a width.
+std::string ElideMiddle(const std::string& str, size_t width);
+
+/// Truncates a file to the given size.
+bool Truncate(const std::string& path, size_t size, std::string* err);
+
+#ifdef _MSC_VER
+#define snprintf _snprintf
+#define fileno _fileno
+#define unlink _unlink
+#define chdir _chdir
+#define strtoull _strtoui64
+#define getcwd _getcwd
+#define PATH_MAX _MAX_PATH
+#endif
+
+#ifdef _WIN32
+/// Convert the value returned by GetLastError() into a string.
+std::string GetLastErrorString();
+
+/// Calls Fatal() with a function name and GetLastErrorString.
+NORETURN void Win32Fatal(const char* function, const char* hint = NULL);
+#endif
+
+#endif // NINJA_UTIL_H_
diff --git a/src/util_test.cc b/src/util_test.cc
new file mode 100644
index 0000000..1621c91
--- /dev/null
+++ b/src/util_test.cc
@@ -0,0 +1,436 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "util.h"
+
+#include "test.h"
+
+using namespace std;
+
+namespace {
+
+bool CanonicalizePath(string* path, string* err) {
+ uint64_t unused;
+ return ::CanonicalizePath(path, &unused, err);
+}
+
+} // namespace
+
+TEST(CanonicalizePath, PathSamples) {
+ string path;
+ string err;
+
+ EXPECT_FALSE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("empty path", err);
+
+ path = "foo.h"; err = "";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo.h", path);
+
+ path = "./foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo.h", path);
+
+ path = "./foo/./bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo/bar.h", path);
+
+ path = "./x/foo/../bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("x/bar.h", path);
+
+ path = "./x/foo/../../bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("bar.h", path);
+
+ path = "foo//bar";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo/bar", path);
+
+ path = "foo//.//..///bar";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("bar", path);
+
+ path = "./x/../foo/../../bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("../bar.h", path);
+
+ path = "foo/./.";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo", path);
+
+ path = "foo/bar/..";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo", path);
+
+ path = "foo/.hidden_bar";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo/.hidden_bar", path);
+
+ path = "/foo";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("/foo", path);
+
+ path = "//foo";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+#ifdef _WIN32
+ EXPECT_EQ("//foo", path);
+#else
+ EXPECT_EQ("/foo", path);
+#endif
+
+ path = "/";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("", path);
+
+ path = "/foo/..";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("", path);
+
+ path = ".";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ(".", path);
+
+ path = "./.";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ(".", path);
+
+ path = "foo/..";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ(".", path);
+}
+
+#ifdef _WIN32
+TEST(CanonicalizePath, PathSamplesWindows) {
+ string path;
+ string err;
+
+ EXPECT_FALSE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("empty path", err);
+
+ path = "foo.h"; err = "";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo.h", path);
+
+ path = ".\\foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo.h", path);
+
+ path = ".\\foo\\.\\bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo/bar.h", path);
+
+ path = ".\\x\\foo\\..\\bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("x/bar.h", path);
+
+ path = ".\\x\\foo\\..\\..\\bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("bar.h", path);
+
+ path = "foo\\\\bar";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo/bar", path);
+
+ path = "foo\\\\.\\\\..\\\\\\bar";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("bar", path);
+
+ path = ".\\x\\..\\foo\\..\\..\\bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("../bar.h", path);
+
+ path = "foo\\.\\.";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo", path);
+
+ path = "foo\\bar\\..";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo", path);
+
+ path = "foo\\.hidden_bar";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo/.hidden_bar", path);
+
+ path = "\\foo";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("/foo", path);
+
+ path = "\\\\foo";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("//foo", path);
+
+ path = "\\";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("", path);
+}
+
+TEST(CanonicalizePath, SlashTracking) {
+ string path;
+ string err;
+ uint64_t slash_bits;
+
+ path = "foo.h"; err = "";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("foo.h", path);
+ EXPECT_EQ(0, slash_bits);
+
+ path = "a\\foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("a/foo.h", path);
+ EXPECT_EQ(1, slash_bits);
+
+ path = "a/bcd/efh\\foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("a/bcd/efh/foo.h", path);
+ EXPECT_EQ(4, slash_bits);
+
+ path = "a\\bcd/efh\\foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("a/bcd/efh/foo.h", path);
+ EXPECT_EQ(5, slash_bits);
+
+ path = "a\\bcd\\efh\\foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("a/bcd/efh/foo.h", path);
+ EXPECT_EQ(7, slash_bits);
+
+ path = "a/bcd/efh/foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("a/bcd/efh/foo.h", path);
+ EXPECT_EQ(0, slash_bits);
+
+ path = "a\\./efh\\foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("a/efh/foo.h", path);
+ EXPECT_EQ(3, slash_bits);
+
+ path = "a\\../efh\\foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("efh/foo.h", path);
+ EXPECT_EQ(1, slash_bits);
+
+ path = "a\\b\\c\\d\\e\\f\\g\\foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("a/b/c/d/e/f/g/foo.h", path);
+ EXPECT_EQ(127, slash_bits);
+
+ path = "a\\b\\c\\..\\..\\..\\g\\foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("g/foo.h", path);
+ EXPECT_EQ(1, slash_bits);
+
+ path = "a\\b/c\\../../..\\g\\foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("g/foo.h", path);
+ EXPECT_EQ(1, slash_bits);
+
+ path = "a\\b/c\\./../..\\g\\foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("a/g/foo.h", path);
+ EXPECT_EQ(3, slash_bits);
+
+ path = "a\\b/c\\./../..\\g/foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("a/g/foo.h", path);
+ EXPECT_EQ(1, slash_bits);
+
+ path = "a\\\\\\foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("a/foo.h", path);
+ EXPECT_EQ(1, slash_bits);
+
+ path = "a/\\\\foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("a/foo.h", path);
+ EXPECT_EQ(0, slash_bits);
+
+ path = "a\\//foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ("a/foo.h", path);
+ EXPECT_EQ(1, slash_bits);
+}
+
+TEST(CanonicalizePath, CanonicalizeNotExceedingLen) {
+ // Make sure searching \/ doesn't go past supplied len.
+ char buf[] = "foo/bar\\baz.h\\"; // Last \ past end.
+ uint64_t slash_bits;
+ string err;
+ size_t size = 13;
+ EXPECT_TRUE(::CanonicalizePath(buf, &size, &slash_bits, &err));
+ EXPECT_EQ(0, strncmp("foo/bar/baz.h", buf, size));
+ EXPECT_EQ(2, slash_bits); // Not including the trailing one.
+}
+
+TEST(CanonicalizePath, TooManyComponents) {
+ string path;
+ string err;
+ uint64_t slash_bits;
+
+ // 64 is OK.
+ path = "a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./"
+ "a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./x.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ(slash_bits, 0x0);
+
+ // Backslashes version.
+ path =
+ "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\"
+ "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\"
+ "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\"
+ "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\x.h";
+
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ(slash_bits, 0xffffffff);
+
+ // 65 is OK if #component is less than 60 after path canonicalization.
+ err = "";
+ path = "a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./"
+ "a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./x/y.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ(slash_bits, 0x0);
+
+ // Backslashes version.
+ err = "";
+ path =
+ "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\"
+ "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\"
+ "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\"
+ "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\x\\y.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ(slash_bits, 0x1ffffffff);
+
+
+ // 59 after canonicalization is OK.
+ err = "";
+ path = "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/"
+ "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/x/y.h";
+ EXPECT_EQ(58, std::count(path.begin(), path.end(), '/'));
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ(slash_bits, 0x0);
+
+ // Backslashes version.
+ err = "";
+ path =
+ "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+ "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+ "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+ "a\\a\\a\\a\\a\\a\\a\\a\\a\\x\\y.h";
+ EXPECT_EQ(58, std::count(path.begin(), path.end(), '\\'));
+ EXPECT_TRUE(CanonicalizePath(&path, &slash_bits, &err));
+ EXPECT_EQ(slash_bits, 0x3ffffffffffffff);
+}
+#endif
+
+TEST(CanonicalizePath, UpDir) {
+ string path, err;
+ path = "../../foo/bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("../../foo/bar.h", path);
+
+ path = "test/../../foo/bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("../foo/bar.h", path);
+}
+
+TEST(CanonicalizePath, AbsolutePath) {
+ string path = "/usr/include/stdio.h";
+ string err;
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("/usr/include/stdio.h", path);
+}
+
+TEST(CanonicalizePath, NotNullTerminated) {
+ string path;
+ string err;
+ size_t len;
+ uint64_t unused;
+
+ path = "foo/. bar/.";
+ len = strlen("foo/."); // Canonicalize only the part before the space.
+ EXPECT_TRUE(CanonicalizePath(&path[0], &len, &unused, &err));
+ EXPECT_EQ(strlen("foo"), len);
+ EXPECT_EQ("foo/. bar/.", string(path));
+
+ path = "foo/../file bar/.";
+ len = strlen("foo/../file");
+ EXPECT_TRUE(CanonicalizePath(&path[0], &len, &unused, &err));
+ EXPECT_EQ(strlen("file"), len);
+ EXPECT_EQ("file ./file bar/.", string(path));
+}
+
+TEST(PathEscaping, TortureTest) {
+ string result;
+
+ GetWin32EscapedString("foo bar\\\"'$@d!st!c'\\path'\\", &result);
+ EXPECT_EQ("\"foo bar\\\\\\\"'$@d!st!c'\\path'\\\\\"", result);
+ result.clear();
+
+ GetShellEscapedString("foo bar\"/'$@d!st!c'/path'", &result);
+ EXPECT_EQ("'foo bar\"/'\\''$@d!st!c'\\''/path'\\'''", result);
+}
+
+TEST(PathEscaping, SensiblePathsAreNotNeedlesslyEscaped) {
+ const char* path = "some/sensible/path/without/crazy/characters.c++";
+ string result;
+
+ GetWin32EscapedString(path, &result);
+ EXPECT_EQ(path, result);
+ result.clear();
+
+ GetShellEscapedString(path, &result);
+ EXPECT_EQ(path, result);
+}
+
+TEST(PathEscaping, SensibleWin32PathsAreNotNeedlesslyEscaped) {
+ const char* path = "some\\sensible\\path\\without\\crazy\\characters.c++";
+ string result;
+
+ GetWin32EscapedString(path, &result);
+ EXPECT_EQ(path, result);
+}
+
+TEST(StripAnsiEscapeCodes, EscapeAtEnd) {
+ string stripped = StripAnsiEscapeCodes("foo\33");
+ EXPECT_EQ("foo", stripped);
+
+ stripped = StripAnsiEscapeCodes("foo\33[");
+ EXPECT_EQ("foo", stripped);
+}
+
+TEST(StripAnsiEscapeCodes, StripColors) {
+ // An actual clang warning.
+ string input = "\33[1maffixmgr.cxx:286:15: \33[0m\33[0;1;35mwarning: "
+ "\33[0m\33[1musing the result... [-Wparentheses]\33[0m";
+ string stripped = StripAnsiEscapeCodes(input);
+ EXPECT_EQ("affixmgr.cxx:286:15: warning: using the result... [-Wparentheses]",
+ stripped);
+}
+
+TEST(ElideMiddle, NothingToElide) {
+ string input = "Nothing to elide in this short string.";
+ EXPECT_EQ(input, ElideMiddle(input, 80));
+ EXPECT_EQ(input, ElideMiddle(input, 38));
+ EXPECT_EQ("", ElideMiddle(input, 0));
+ EXPECT_EQ(".", ElideMiddle(input, 1));
+ EXPECT_EQ("..", ElideMiddle(input, 2));
+ EXPECT_EQ("...", ElideMiddle(input, 3));
+}
+
+TEST(ElideMiddle, ElideInTheMiddle) {
+ string input = "01234567890123456789";
+ string elided = ElideMiddle(input, 10);
+ EXPECT_EQ("012...789", elided);
+ EXPECT_EQ("01234567...23456789", ElideMiddle(input, 19));
+}
diff --git a/src/version.cc b/src/version.cc
new file mode 100644
index 0000000..7fee744
--- /dev/null
+++ b/src/version.cc
@@ -0,0 +1,55 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "version.h"
+
+#include <stdlib.h>
+
+#include "util.h"
+
+using namespace std;
+
+const char* kNinjaVersion = "1.10.2";
+
+void ParseVersion(const string& version, int* major, int* minor) {
+ size_t end = version.find('.');
+ *major = atoi(version.substr(0, end).c_str());
+ *minor = 0;
+ if (end != string::npos) {
+ size_t start = end + 1;
+ end = version.find('.', start);
+ *minor = atoi(version.substr(start, end).c_str());
+ }
+}
+
+void CheckNinjaVersion(const string& version) {
+ int bin_major, bin_minor;
+ ParseVersion(kNinjaVersion, &bin_major, &bin_minor);
+ int file_major, file_minor;
+ ParseVersion(version, &file_major, &file_minor);
+
+ if (bin_major > file_major) {
+ Warning("ninja executable version (%s) greater than build file "
+ "ninja_required_version (%s); versions may be incompatible.",
+ kNinjaVersion, version.c_str());
+ return;
+ }
+
+ if ((bin_major == file_major && bin_minor < file_minor) ||
+ bin_major < file_major) {
+ Fatal("ninja version (%s) incompatible with build file "
+ "ninja_required_version version (%s).",
+ kNinjaVersion, version.c_str());
+ }
+}
diff --git a/src/version.h b/src/version.h
new file mode 100644
index 0000000..9d84ecb
--- /dev/null
+++ b/src/version.h
@@ -0,0 +1,31 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_VERSION_H_
+#define NINJA_VERSION_H_
+
+#include <string>
+
+/// The version number of the current Ninja release. This will always
+/// be "git" on trunk.
+extern const char* kNinjaVersion;
+
+/// Parse the major/minor components of a version string.
+void ParseVersion(const std::string& version, int* major, int* minor);
+
+/// Check whether \a version is compatible with the current Ninja version,
+/// aborting if not.
+void CheckNinjaVersion(const std::string& required_version);
+
+#endif // NINJA_VERSION_H_
diff --git a/src/win32port.h b/src/win32port.h
new file mode 100644
index 0000000..e542536
--- /dev/null
+++ b/src/win32port.h
@@ -0,0 +1,39 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_WIN32PORT_H_
+#define NINJA_WIN32PORT_H_
+
+#if defined(__MINGW32__) || defined(__MINGW64__)
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#include <inttypes.h>
+#endif
+
+typedef signed short int16_t;
+typedef unsigned short uint16_t;
+/// A 64-bit integer type
+typedef signed long long int64_t;
+typedef unsigned long long uint64_t;
+
+// printf format specifier for uint64_t, from C99.
+#ifndef PRIu64
+#define PRId64 "I64d"
+#define PRIu64 "I64u"
+#define PRIx64 "I64x"
+#endif
+
+#endif // NINJA_WIN32PORT_H_
+