diff --git a/.gitignore b/.gitignore index c424556b42f..c5dfc2b31cc 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ !/*/ !/.gitattributes !/.gitignore +!/.travis.yml !/makefile !/mame.doxygen !/*.md diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000000..15718c091af --- /dev/null +++ b/.travis.yml @@ -0,0 +1,18 @@ +language: cpp +compiler: + - gcc + - clang +env: + - SUBTARGET=arcade MAME=mamearcade64 + - SUBTARGET=mess MAME=mess64 +script: + - if [ $CC == 'clang' ]; + then make -j2 linux_x64_clang && ./$MAME -validate; + else make -j2 OPTIMIZE=0 && ./$MAME -validate; + fi +sudo: required +before_install: + - sudo add-apt-repository ppa:zoogie/sdl2-snapshots -y + - sudo add-apt-repository ppa:shahar-evron/qt-backports -y + - sudo apt-get update -qq + - sudo apt-get install -y libsdl2-dev libsdl2-ttf-dev libasound2-dev libqt4-dev libqt4-dev-bin diff --git a/3rdparty/dxsdk/Include/d3dcommon.h b/3rdparty/dxsdk/Include/d3dcommon.h index 1baa2c262c4..9305a040fc2 100644 --- a/3rdparty/dxsdk/Include/d3dcommon.h +++ b/3rdparty/dxsdk/Include/d3dcommon.h @@ -81,6 +81,21 @@ enum D3D_FEATURE_LEVEL } D3D_FEATURE_LEVEL; typedef +#define D3D_FL9_1_REQ_TEXTURE1D_U_DIMENSION 2048 +#define D3D_FL9_3_REQ_TEXTURE1D_U_DIMENSION 4096 +#define D3D_FL9_1_REQ_TEXTURE2D_U_OR_V_DIMENSION 2048 +#define D3D_FL9_3_REQ_TEXTURE2D_U_OR_V_DIMENSION 4096 +#define D3D_FL9_1_REQ_TEXTURECUBE_DIMENSION 512 +#define D3D_FL9_3_REQ_TEXTURECUBE_DIMENSION 4096 +#define D3D_FL9_1_REQ_TEXTURE3D_U_V_OR_W_DIMENSION 256 +#define D3D_FL9_1_DEFAULT_MAX_ANISOTROPY 2 +#define D3D_FL9_1_IA_PRIMITIVE_MAX_COUNT 65535 +#define D3D_FL9_2_IA_PRIMITIVE_MAX_COUNT 1048575 +#define D3D_FL9_1_SIMULTANEOUS_RENDER_TARGET_COUNT 1 +#define D3D_FL9_3_SIMULTANEOUS_RENDER_TARGET_COUNT 4 +#define D3D_FL9_1_MAX_TEXTURE_REPEAT 128 +#define D3D_FL9_2_MAX_TEXTURE_REPEAT 2048 +#define D3D_FL9_3_MAX_TEXTURE_REPEAT 8192 enum D3D_PRIMITIVE_TOPOLOGY { D3D_PRIMITIVE_TOPOLOGY_UNDEFINED = 0, D3D_PRIMITIVE_TOPOLOGY_POINTLIST = 1, diff --git a/3rdparty/jsoncpp/.gitignore b/3rdparty/jsoncpp/.gitignore index 60c4a0b9aee..ef226a8875a 100644 --- a/3rdparty/jsoncpp/.gitignore +++ b/3rdparty/jsoncpp/.gitignore @@ -10,4 +10,27 @@ /libs/ /doc/doxyfile /dist/ -/include/json/version.h +#/version +#/include/json/version.h + +# MSVC project files: +*.sln +*.vcxproj +*.filters +*.user +*.sdf +*.opensdf +*.suo + +# MSVC build files: +*.lib +*.obj +*.tlog/ +*.pdb + +# CMake-generated files: +CMakeFiles/ +CTestTestFile.cmake +cmake_install.cmake +pkg-config/jsoncpp.pc +jsoncpp_lib_static.dir/ diff --git a/3rdparty/jsoncpp/.travis.yml b/3rdparty/jsoncpp/.travis.yml index a913b095849..17e52dcf89b 100644 --- a/3rdparty/jsoncpp/.travis.yml +++ b/3rdparty/jsoncpp/.travis.yml @@ -2,17 +2,24 @@ # http://about.travis-ci.org/docs/user/build-configuration/ # This file can be validated on: # http://lint.travis-ci.org/ -before_install: sudo apt-get install cmake + +#before_install: sudo apt-get install -y cmake +# cmake is pre-installed in Travis for both linux and osx + +before_install: + - sudo apt-get update -qq + - sudo apt-get install -qq valgrind +os: + - linux language: cpp compiler: - gcc - clang -script: cmake -DJSONCPP_LIB_BUILD_SHARED=$SHARED_LIBRARY -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_VERBOSE_MAKEFILE=$VERBOSE_MAKE . && make +script: ./travis.sh env: matrix: - - SHARED_LIBRARY=ON BUILD_TYPE=release VERBOSE_MAKE=false - - SHARED_LIBRARY=OFF BUILD_TYPE=release VERBOSE_MAKE=false - - SHARED_LIBRARY=OFF BUILD_TYPE=debug VERBOSE VERBOSE_MAKE=true + - SHARED_LIB=ON STATIC_LIB=ON CMAKE_PKG=ON BUILD_TYPE=release VERBOSE_MAKE=false + - SHARED_LIB=OFF STATIC_LIB=ON CMAKE_PKG=OFF BUILD_TYPE=debug VERBOSE_MAKE=true VERBOSE notifications: email: - aaronjjacobs@gmail.com diff --git a/3rdparty/jsoncpp/CMakeLists.txt b/3rdparty/jsoncpp/CMakeLists.txt index 3e8f96ecb90..90eb14e30d0 100644 --- a/3rdparty/jsoncpp/CMakeLists.txt +++ b/3rdparty/jsoncpp/CMakeLists.txt @@ -1,12 +1,16 @@ +# vim: et ts=4 sts=4 sw=4 tw=0 + CMAKE_MINIMUM_REQUIRED(VERSION 2.8.5) PROJECT(jsoncpp) ENABLE_TESTING() -OPTION(JSONCPP_WITH_TESTS "Compile and run JsonCpp test executables" ON) +OPTION(JSONCPP_WITH_TESTS "Compile and (for jsoncpp_check) run JsonCpp test executables" ON) OPTION(JSONCPP_WITH_POST_BUILD_UNITTEST "Automatically run unit-tests as a post build step" ON) OPTION(JSONCPP_WITH_WARNING_AS_ERROR "Force compilation to fail if a warning occurs" OFF) OPTION(JSONCPP_WITH_PKGCONFIG_SUPPORT "Generate and install .pc files" ON) OPTION(JSONCPP_WITH_CMAKE_PACKAGE "Generate and install cmake package files" OFF) +OPTION(BUILD_SHARED_LIBS "Build jsoncpp_lib as a shared library." OFF) +OPTION(BUILD_STATIC_LIBS "Build jsoncpp_lib static library." ON) # Ensures that CMAKE_BUILD_TYPE is visible in cmake-gui on Unix IF(NOT WIN32) @@ -17,30 +21,21 @@ IF(NOT WIN32) ENDIF(NOT CMAKE_BUILD_TYPE) ENDIF(NOT WIN32) +SET(DEBUG_LIBNAME_SUFFIX "" CACHE STRING "Optional suffix to append to the library name for a debug build") SET(LIB_SUFFIX "" CACHE STRING "Optional arch-dependent suffix for the library installation directory") SET(RUNTIME_INSTALL_DIR bin CACHE PATH "Install dir for executables and dlls") -SET(ARCHIVE_INSTALL_DIR lib${LIB_SUFFIX} +SET(ARCHIVE_INSTALL_DIR ${CMAKE_INSTALL_PREFIX}/lib${LIB_SUFFIX} CACHE PATH "Install dir for static libraries") -SET(LIBRARY_INSTALL_DIR lib${LIB_SUFFIX} +SET(LIBRARY_INSTALL_DIR ${CMAKE_INSTALL_PREFIX}/lib${LIB_SUFFIX} CACHE PATH "Install dir for shared libraries") -SET(INCLUDE_INSTALL_DIR include +SET(INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_PREFIX}/include CACHE PATH "Install dir for headers") SET(PACKAGE_INSTALL_DIR lib${LIB_SUFFIX}/cmake CACHE PATH "Install dir for cmake package config files") MARK_AS_ADVANCED( RUNTIME_INSTALL_DIR ARCHIVE_INSTALL_DIR INCLUDE_INSTALL_DIR PACKAGE_INSTALL_DIR ) -# This ensures shared DLL are in the same dir as executable on Windows. -# Put all executables / libraries are in a project global directory. -SET(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/lib - CACHE PATH "Single directory for all static libraries.") -SET(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/lib - CACHE PATH "Single directory for all dynamic libraries on Unix.") -SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/bin - CACHE PATH "Single directory for all executable and dynamic libraries on Windows.") -MARK_AS_ADVANCED( CMAKE_RUNTIME_OUTPUT_DIRECTORY CMAKE_LIBRARY_OUTPUT_DIRECTORY CMAKE_ARCHIVE_OUTPUT_DIRECTORY ) - # Set variable named ${VAR_NAME} to value ${VALUE} FUNCTION(set_using_dynamic_name VAR_NAME VALUE) SET( "${VAR_NAME}" "${VALUE}" PARENT_SCOPE) @@ -64,17 +59,24 @@ MACRO(jsoncpp_parse_version VERSION_TEXT OUPUT_PREFIX) ENDMACRO(jsoncpp_parse_version) # Read out version from "version" file -FILE(STRINGS "version" JSONCPP_VERSION) - +#FILE(STRINGS "version" JSONCPP_VERSION) +#SET( JSONCPP_VERSION_MAJOR X ) +#SET( JSONCPP_VERSION_MINOR Y ) +#SET( JSONCPP_VERSION_PATCH Z ) +SET( JSONCPP_VERSION 1.6.2 ) jsoncpp_parse_version( ${JSONCPP_VERSION} JSONCPP_VERSION ) -IF(NOT JSONCPP_VERSION_FOUND) - MESSAGE(FATAL_ERROR "Failed to parse version string properly. Expect X.Y.Z") -ENDIF(NOT JSONCPP_VERSION_FOUND) +#IF(NOT JSONCPP_VERSION_FOUND) +# MESSAGE(FATAL_ERROR "Failed to parse version string properly. Expect X.Y.Z") +#ENDIF(NOT JSONCPP_VERSION_FOUND) MESSAGE(STATUS "JsonCpp Version: ${JSONCPP_VERSION_MAJOR}.${JSONCPP_VERSION_MINOR}.${JSONCPP_VERSION_PATCH}") # File version.h is only regenerated on CMake configure step CONFIGURE_FILE( "${PROJECT_SOURCE_DIR}/src/lib_json/version.h.in" - "${PROJECT_SOURCE_DIR}/include/json/version.h" ) + "${PROJECT_SOURCE_DIR}/include/json/version.h" + NEWLINE_STYLE UNIX ) +CONFIGURE_FILE( "${PROJECT_SOURCE_DIR}/version.in" + "${PROJECT_SOURCE_DIR}/version" + NEWLINE_STYLE UNIX ) macro(UseCompilationWarningAsError) if ( MSVC ) @@ -93,6 +95,14 @@ if ( MSVC ) set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /W4 ") endif( MSVC ) +if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") + # using regular Clang or AppleClang + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wshorten-64-to-32") +elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") + # using GCC + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x -Wall -Wextra -pedantic") +endif() + IF(JSONCPP_WITH_WARNING_AS_ERROR) UseCompilationWarningAsError() ENDIF(JSONCPP_WITH_WARNING_AS_ERROR) diff --git a/3rdparty/jsoncpp/NEWS.txt b/3rdparty/jsoncpp/NEWS.txt index 1be7b8ef894..5733fcd5ef4 100644 --- a/3rdparty/jsoncpp/NEWS.txt +++ b/3rdparty/jsoncpp/NEWS.txt @@ -80,7 +80,7 @@ New in SVN (e.g. MSVC 2008 command prompt in start menu) before running scons. - Added support for amalgamated source and header generation (a la sqlite). - Refer to README.txt section "Generating amalgamated source and header" + Refer to README.md section "Generating amalgamated source and header" for detail. * Value diff --git a/3rdparty/jsoncpp/README.md b/3rdparty/jsoncpp/README.md index 099f17fa2fd..93c8d1f593a 100644 --- a/3rdparty/jsoncpp/README.md +++ b/3rdparty/jsoncpp/README.md @@ -7,34 +7,62 @@ pairs. [json-org]: http://json.org/ -JsonCpp is a C++ library that allows manipulating JSON values, including +[JsonCpp][] is a C++ library that allows manipulating JSON values, including serialization and deserialization to and from strings. It can also preserve existing comment in unserialization/serialization steps, making it a convenient format to store user input files. +[JsonCpp]: http://open-source-parsers.github.io/jsoncpp-docs/doxygen/index.html + ## A note on backward-compatibility -Very soon, we are switching to C++11 only. For older compilers, try the `pre-C++11` branch. +* `1.y.z` is built with C++11. +* `0.y.z` can be used with older compilers. +* Major versions maintain binary-compatibility. -Using JsonCpp in your project +# Using JsonCpp in your project ----------------------------- - -The recommended approach to integrating JsonCpp in your project is to build -the amalgamated source (a single `.cpp` file) with your own build system. This -ensures consistency of compilation flags and ABI compatibility. See the section -"Generating amalgamated source and header" for instructions. +The recommended approach to integrating JsonCpp in your project is to include +the [amalgamated source](#generating-amalgamated-source-and-header) (a single +`.cpp` file and two `.h` files) in your project, and compile and build as you +would any other source file. This ensures consistency of compilation flags and +ABI compatibility, issues which arise when building shared or static +libraries. See the next section for instructions. The `include/` should be added to your compiler include path. Jsoncpp headers should be included as follow: #include -If JsonCpp was build as a dynamic library on Windows, then your project needs to +If JsonCpp was built as a dynamic library on Windows, then your project needs to define the macro `JSON_DLL`. +Generating amalgamated source and header +---------------------------------------- +JsonCpp is provided with a script to generate a single header and a single +source file to ease inclusion into an existing project. The amalgamated source +can be generated at any time by running the following command from the +top-directory (this requires Python 2.6): -Building and testing with new CMake ------------------------------------ + python amalgamate.py +It is possible to specify header name. See the `-h` option for detail. + +By default, the following files are generated: +* `dist/jsoncpp.cpp`: source file that needs to be added to your project. +* `dist/json/json.h`: corresponding header file for use in your project. It is + equivalent to including `json/json.h` in non-amalgamated source. This header + only depends on standard headers. +* `dist/json/json-forwards.h`: header that provides forward declaration of all + JsonCpp types. + +The amalgamated sources are generated by concatenating JsonCpp source in the +correct order and defining the macro `JSON_IS_AMALGAMATION` to prevent inclusion +of other headers. + +# Contributing to JsonCpp + +Building and testing with CMake +------------------------------- [CMake][] is a C++ Makefiles/Solution generator. It is usually available on most Linux system as package. On Ubuntu: @@ -57,7 +85,7 @@ Steps for generating solution/makefiles using `cmake-gui`: * Make "source code" point to the source directory. * Make "where to build the binary" point to the directory to use for the build. * Click on the "Grouped" check box. -* Review JsonCpp build options (tick `JSONCPP_LIB_BUILD_SHARED` to build as a +* Review JsonCpp build options (tick `BUILD_SHARED_LIBS` to build as a dynamic library). * Click the configure button at the bottom, then the generate button. * The generated solution/makefiles can be found in the binary directory. @@ -66,19 +94,17 @@ Alternatively, from the command-line on Unix in the source directory: mkdir -p build/debug cd build/debug - cmake -DCMAKE_BUILD_TYPE=debug -DJSONCPP_LIB_BUILD_SHARED=OFF -G "Unix Makefiles" ../.. + cmake -DCMAKE_BUILD_TYPE=debug -DBUILD_STATIC_LIBS=ON -DBUILD_SHARED_LIBS=OFF -DARCHIVE_INSTALL_DIR=. -G "Unix Makefiles" ../.. make -Running `cmake -`" will display the list of available generators (passed using +Running `cmake -h` will display the list of available generators (passed using the `-G` option). By default CMake hides compilation commands. This can be modified by specifying `-DCMAKE_VERBOSE_MAKEFILE=true` when generating makefiles. - Building and testing with SCons ------------------------------- - **Note:** The SCons-based build system is deprecated. Please use CMake; see the section above. @@ -107,14 +133,7 @@ If you are building with Microsoft Visual Studio 2008, you need to set up the environment by running `vcvars32.bat` (e.g. MSVC 2008 command prompt) before running SCons. - -Running the tests manually --------------------------- - -Note that test can be run using SCons using the `check` target: - - scons platform=$PLATFORM check - +## Running the tests manually You need to run tests manually only if you are troubleshooting an issue. In the instructions below, replace `path/to/jsontest` with the path of the @@ -137,45 +156,21 @@ In the instructions below, replace `path/to/jsontest` with the path of the # You can run the tests using valgrind: python rununittests.py --valgrind path/to/test_lib_json +## Running the tests using scons +Note that tests can be run using SCons using the `check` target: + + scons platform=$PLATFORM check Building the documentation -------------------------- - Run the Python script `doxybuild.py` from the top directory: python doxybuild.py --doxygen=$(which doxygen) --open --with-dot See `doxybuild.py --help` for options. - -Generating amalgamated source and header ----------------------------------------- - -JsonCpp is provided with a script to generate a single header and a single -source file to ease inclusion into an existing project. The amalgamated source -can be generated at any time by running the following command from the -top-directory (this requires Python 2.6): - - python amalgamate.py - -It is possible to specify header name. See the `-h` option for detail. - -By default, the following files are generated: -* `dist/jsoncpp.cpp`: source file that needs to be added to your project. -* `dist/json/json.h`: corresponding header file for use in your project. It is - equivalent to including `json/json.h` in non-amalgamated source. This header - only depends on standard headers. -* `dist/json/json-forwards.h`: header that provides forward declaration of all - JsonCpp types. - -The amalgamated sources are generated by concatenating JsonCpp source in the -correct order and defining the macro `JSON_IS_AMALGAMATION` to prevent inclusion -of other headers. - - Adding a reader/writer test --------------------------- - To add a test, you need to create two files in test/data: * a `TESTNAME.json` file, that contains the input document in JSON format. @@ -195,10 +190,8 @@ The `TESTNAME.expected` file format is as follows: See the examples `test_complex_01.json` and `test_complex_01.expected` to better understand element paths. - Understanding reader/writer test output --------------------------------------- - When a test is run, output files are generated beside the input test files. Below is a short description of the content of each file: @@ -215,10 +208,7 @@ Below is a short description of the content of each file: * `test_complex_01.process-output`: `jsontest` output, typically useful for understanding parsing errors. - License ------- - See the `LICENSE` file for details. In summary, JsonCpp is licensed under the MIT license, or public domain if desired and recognized in your jurisdiction. - diff --git a/3rdparty/jsoncpp/SConstruct b/3rdparty/jsoncpp/SConstruct index 1c55bcd0986..f3a73f773bf 100644 --- a/3rdparty/jsoncpp/SConstruct +++ b/3rdparty/jsoncpp/SConstruct @@ -237,7 +237,7 @@ RunUnitTests = ActionFactory(runUnitTests_action, runUnitTests_string ) env.Alias( 'check' ) srcdist_cmd = env['SRCDIST_ADD']( source = """ - AUTHORS README.txt SConstruct + AUTHORS README.md SConstruct """.split() ) env.Alias( 'src-dist', srcdist_cmd ) diff --git a/3rdparty/jsoncpp/amalgamate.py b/3rdparty/jsoncpp/amalgamate.py index 550f6a67624..1916bb0d5e7 100644 --- a/3rdparty/jsoncpp/amalgamate.py +++ b/3rdparty/jsoncpp/amalgamate.py @@ -1,6 +1,6 @@ """Amalgate json-cpp library sources into a single source and header file. -Requires Python 2.6 +Works with python2.6+ and python3.4+. Example of invocation (must be invoked from json-cpp top directory): python amalgate.py @@ -10,46 +10,46 @@ import os.path import sys class AmalgamationFile: - def __init__( self, top_dir ): + def __init__(self, top_dir): self.top_dir = top_dir self.blocks = [] - def add_text( self, text ): - if not text.endswith( "\n" ): + def add_text(self, text): + if not text.endswith("\n"): text += "\n" - self.blocks.append( text ) + self.blocks.append(text) - def add_file( self, relative_input_path, wrap_in_comment=False ): - def add_marker( prefix ): - self.add_text( "" ) - self.add_text( "// " + "/"*70 ) - self.add_text( "// %s of content of file: %s" % (prefix, relative_input_path.replace("\\","/")) ) - self.add_text( "// " + "/"*70 ) - self.add_text( "" ) - add_marker( "Beginning" ) - f = open( os.path.join( self.top_dir, relative_input_path ), "rt" ) + def add_file(self, relative_input_path, wrap_in_comment=False): + def add_marker(prefix): + self.add_text("") + self.add_text("// " + "/"*70) + self.add_text("// %s of content of file: %s" % (prefix, relative_input_path.replace("\\","/"))) + self.add_text("// " + "/"*70) + self.add_text("") + add_marker("Beginning") + f = open(os.path.join(self.top_dir, relative_input_path), "rt") content = f.read() if wrap_in_comment: content = "/*\n" + content + "\n*/" - self.add_text( content ) + self.add_text(content) f.close() - add_marker( "End" ) - self.add_text( "\n\n\n\n" ) + add_marker("End") + self.add_text("\n\n\n\n") - def get_value( self ): - return "".join( self.blocks ).replace("\r\n","\n") + def get_value(self): + return "".join(self.blocks).replace("\r\n","\n") - def write_to( self, output_path ): - output_dir = os.path.dirname( output_path ) - if output_dir and not os.path.isdir( output_dir ): - os.makedirs( output_dir ) - f = open( output_path, "wb" ) - f.write( str.encode(self.get_value(), 'UTF-8') ) + def write_to(self, output_path): + output_dir = os.path.dirname(output_path) + if output_dir and not os.path.isdir(output_dir): + os.makedirs(output_dir) + f = open(output_path, "wb") + f.write(str.encode(self.get_value(), 'UTF-8')) f.close() -def amalgamate_source( source_top_dir=None, +def amalgamate_source(source_top_dir=None, target_source_path=None, - header_include_path=None ): + header_include_path=None): """Produces amalgated source. Parameters: source_top_dir: top-directory @@ -57,69 +57,73 @@ def amalgamate_source( source_top_dir=None, header_include_path: generated header path relative to target_source_path. """ print("Amalgating header...") - header = AmalgamationFile( source_top_dir ) - header.add_text( "/// Json-cpp amalgated header (http://jsoncpp.sourceforge.net/)." ) - header.add_text( "/// It is intented to be used with #include <%s>" % header_include_path ) - header.add_file( "LICENSE", wrap_in_comment=True ) - header.add_text( "#ifndef JSON_AMALGATED_H_INCLUDED" ) - header.add_text( "# define JSON_AMALGATED_H_INCLUDED" ) - header.add_text( "/// If defined, indicates that the source file is amalgated" ) - header.add_text( "/// to prevent private header inclusion." ) - header.add_text( "#define JSON_IS_AMALGAMATION" ) - header.add_file( "include/json/version.h" ) - header.add_file( "include/json/config.h" ) - header.add_file( "include/json/forwards.h" ) - header.add_file( "include/json/features.h" ) - header.add_file( "include/json/value.h" ) - header.add_file( "include/json/reader.h" ) - header.add_file( "include/json/writer.h" ) - header.add_file( "include/json/assertions.h" ) - header.add_text( "#endif //ifndef JSON_AMALGATED_H_INCLUDED" ) + header = AmalgamationFile(source_top_dir) + header.add_text("/// Json-cpp amalgated header (http://jsoncpp.sourceforge.net/).") + header.add_text('/// It is intended to be used with #include "%s"' % header_include_path) + header.add_file("LICENSE", wrap_in_comment=True) + header.add_text("#ifndef JSON_AMALGATED_H_INCLUDED") + header.add_text("# define JSON_AMALGATED_H_INCLUDED") + header.add_text("/// If defined, indicates that the source file is amalgated") + header.add_text("/// to prevent private header inclusion.") + header.add_text("#define JSON_IS_AMALGAMATION") + header.add_file("include/json/version.h") + header.add_file("include/json/config.h") + header.add_file("include/json/forwards.h") + header.add_file("include/json/features.h") + header.add_file("include/json/value.h") + header.add_file("include/json/reader.h") + header.add_file("include/json/writer.h") + header.add_file("include/json/assertions.h") + header.add_text("#endif //ifndef JSON_AMALGATED_H_INCLUDED") - target_header_path = os.path.join( os.path.dirname(target_source_path), header_include_path ) + target_header_path = os.path.join(os.path.dirname(target_source_path), header_include_path) print("Writing amalgated header to %r" % target_header_path) - header.write_to( target_header_path ) + header.write_to(target_header_path) - base, ext = os.path.splitext( header_include_path ) + base, ext = os.path.splitext(header_include_path) forward_header_include_path = base + "-forwards" + ext print("Amalgating forward header...") - header = AmalgamationFile( source_top_dir ) - header.add_text( "/// Json-cpp amalgated forward header (http://jsoncpp.sourceforge.net/)." ) - header.add_text( "/// It is intented to be used with #include <%s>" % forward_header_include_path ) - header.add_text( "/// This header provides forward declaration for all JsonCpp types." ) - header.add_file( "LICENSE", wrap_in_comment=True ) - header.add_text( "#ifndef JSON_FORWARD_AMALGATED_H_INCLUDED" ) - header.add_text( "# define JSON_FORWARD_AMALGATED_H_INCLUDED" ) - header.add_text( "/// If defined, indicates that the source file is amalgated" ) - header.add_text( "/// to prevent private header inclusion." ) - header.add_text( "#define JSON_IS_AMALGAMATION" ) - header.add_file( "include/json/config.h" ) - header.add_file( "include/json/forwards.h" ) - header.add_text( "#endif //ifndef JSON_FORWARD_AMALGATED_H_INCLUDED" ) + header = AmalgamationFile(source_top_dir) + header.add_text("/// Json-cpp amalgated forward header (http://jsoncpp.sourceforge.net/).") + header.add_text('/// It is intended to be used with #include "%s"' % forward_header_include_path) + header.add_text("/// This header provides forward declaration for all JsonCpp types.") + header.add_file("LICENSE", wrap_in_comment=True) + header.add_text("#ifndef JSON_FORWARD_AMALGATED_H_INCLUDED") + header.add_text("# define JSON_FORWARD_AMALGATED_H_INCLUDED") + header.add_text("/// If defined, indicates that the source file is amalgated") + header.add_text("/// to prevent private header inclusion.") + header.add_text("#define JSON_IS_AMALGAMATION") + header.add_file("include/json/config.h") + header.add_file("include/json/forwards.h") + header.add_text("#endif //ifndef JSON_FORWARD_AMALGATED_H_INCLUDED") - target_forward_header_path = os.path.join( os.path.dirname(target_source_path), - forward_header_include_path ) + target_forward_header_path = os.path.join(os.path.dirname(target_source_path), + forward_header_include_path) print("Writing amalgated forward header to %r" % target_forward_header_path) - header.write_to( target_forward_header_path ) + header.write_to(target_forward_header_path) print("Amalgating source...") - source = AmalgamationFile( source_top_dir ) - source.add_text( "/// Json-cpp amalgated source (http://jsoncpp.sourceforge.net/)." ) - source.add_text( "/// It is intented to be used with #include <%s>" % header_include_path ) - source.add_file( "LICENSE", wrap_in_comment=True ) - source.add_text( "" ) - source.add_text( "#include <%s>" % header_include_path ) - source.add_text( "" ) + source = AmalgamationFile(source_top_dir) + source.add_text("/// Json-cpp amalgated source (http://jsoncpp.sourceforge.net/).") + source.add_text('/// It is intended to be used with #include "%s"' % header_include_path) + source.add_file("LICENSE", wrap_in_comment=True) + source.add_text("") + source.add_text('#include "%s"' % header_include_path) + source.add_text(""" +#ifndef JSON_IS_AMALGAMATION +#error "Compile with -I PATH_TO_JSON_DIRECTORY" +#endif +""") + source.add_text("") lib_json = "src/lib_json" - source.add_file( os.path.join(lib_json, "json_tool.h") ) - source.add_file( os.path.join(lib_json, "json_reader.cpp") ) - source.add_file( os.path.join(lib_json, "json_batchallocator.h") ) - source.add_file( os.path.join(lib_json, "json_valueiterator.inl") ) - source.add_file( os.path.join(lib_json, "json_value.cpp") ) - source.add_file( os.path.join(lib_json, "json_writer.cpp") ) + source.add_file(os.path.join(lib_json, "json_tool.h")) + source.add_file(os.path.join(lib_json, "json_reader.cpp")) + source.add_file(os.path.join(lib_json, "json_valueiterator.inl")) + source.add_file(os.path.join(lib_json, "json_value.cpp")) + source.add_file(os.path.join(lib_json, "json_writer.cpp")) print("Writing amalgated source to %r" % target_source_path) - source.write_to( target_source_path ) + source.write_to(target_source_path) def main(): usage = """%prog [options] @@ -137,12 +141,12 @@ Generate a single amalgated source and header file from the sources. parser.enable_interspersed_args() options, args = parser.parse_args() - msg = amalgamate_source( source_top_dir=options.top_dir, + msg = amalgamate_source(source_top_dir=options.top_dir, target_source_path=options.target_source_path, - header_include_path=options.header_include_path ) + header_include_path=options.header_include_path) if msg: - sys.stderr.write( msg + "\n" ) - sys.exit( 1 ) + sys.stderr.write(msg + "\n") + sys.exit(1) else: print("Source succesfully amalagated") diff --git a/3rdparty/jsoncpp/appveyor.yml b/3rdparty/jsoncpp/appveyor.yml new file mode 100644 index 00000000000..546cb7ed13e --- /dev/null +++ b/3rdparty/jsoncpp/appveyor.yml @@ -0,0 +1,34 @@ +# This is a comment. + +version: build.{build} + +os: Windows Server 2012 R2 + +clone_folder: c:\projects\jsoncpp + +platform: + - Win32 + - x64 + +configuration: + - Debug + - Release + +# scripts to run before build +before_build: + - echo "Running cmake..." + - cd c:\projects\jsoncpp + - cmake --version + - if %PLATFORM% == Win32 cmake . + - if %PLATFORM% == x64 cmake -G "Visual Studio 12 2013 Win64" . + +build: + project: jsoncpp.sln # path to Visual Studio solution or project + +deploy: + provider: GitHub + auth_token: + secure: K2Tp1q8pIZ7rs0Ot24ZMWuwr12Ev6Tc6QkhMjGQxoQG3ng1pXtgPasiJ45IDXGdg + on: + branch: master + appveyor_repo_tag: true diff --git a/3rdparty/jsoncpp/dev.makefile b/3rdparty/jsoncpp/dev.makefile index dd16bdd6ef9..d288b166586 100644 --- a/3rdparty/jsoncpp/dev.makefile +++ b/3rdparty/jsoncpp/dev.makefile @@ -1,14 +1,35 @@ -all: build test-amalgamate +# This is only for jsoncpp developers/contributors. +# We use this to sign releases, generate documentation, etc. +VER?=$(shell cat version) +default: + @echo "VER=${VER}" +sign: jsoncpp-${VER}.tar.gz + gpg --armor --detach-sign $< + gpg --verify $<.asc + # Then upload .asc to the release. +jsoncpp-%.tar.gz: + curl https://github.com/open-source-parsers/jsoncpp/archive/$*.tar.gz -o $@ +dox: + python doxybuild.py --doxygen=$$(which doxygen) --in doc/web_doxyfile.in + rsync -va --delete dist/doxygen/jsoncpp-api-html-${VER}/ ../jsoncpp-docs/doxygen/ + # Then 'git add -A' and 'git push' in jsoncpp-docs. build: mkdir -p build/debug - cd build/debug; cmake -DCMAKE_BUILD_TYPE=debug -DJSONCPP_LIB_BUILD_SHARED=ON -G "Unix Makefiles" ../.. + cd build/debug; cmake -DCMAKE_BUILD_TYPE=debug -DBUILD_SHARED_LIBS=ON -G "Unix Makefiles" ../.. make -C build/debug # Currently, this depends on include/json/version.h generated # by cmake. -test-amalgamate: build +test-amalgamate: python2.7 amalgamate.py python3.4 amalgamate.py + cd dist; gcc -I. -c jsoncpp.cpp + +valgrind: + valgrind --error-exitcode=42 --leak-check=full ./build/debug/src/test_lib_json/jsoncpp_test + +clean: + \rm -rf *.gz *.asc dist/ .PHONY: build diff --git a/3rdparty/jsoncpp/devtools/__init__.py b/3rdparty/jsoncpp/devtools/__init__.py index c944e7cb0c0..d18a5216853 100644 --- a/3rdparty/jsoncpp/devtools/__init__.py +++ b/3rdparty/jsoncpp/devtools/__init__.py @@ -1 +1,6 @@ -# module \ No newline at end of file +# Copyright 2010 Baptiste Lepilleur +# Distributed under MIT license, or public domain if desired and +# recognized in your jurisdiction. +# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +# module diff --git a/3rdparty/jsoncpp/devtools/agent_vmw7.json b/3rdparty/jsoncpp/devtools/agent_vmw7.json index a1db7db6f18..0810a99544e 100644 --- a/3rdparty/jsoncpp/devtools/agent_vmw7.json +++ b/3rdparty/jsoncpp/devtools/agent_vmw7.json @@ -19,8 +19,8 @@ }, {"name": "shared_dll", "variables": [ - ["JSONCPP_LIB_BUILD_SHARED=true"], - ["JSONCPP_LIB_BUILD_SHARED=false"] + ["BUILD_SHARED_LIBS=true"], + ["BUILD_SHARED_LIBS=false"] ] }, {"name": "build_type", diff --git a/3rdparty/jsoncpp/devtools/agent_vmxp.json b/3rdparty/jsoncpp/devtools/agent_vmxp.json index d34cf86addf..b627a7221a7 100644 --- a/3rdparty/jsoncpp/devtools/agent_vmxp.json +++ b/3rdparty/jsoncpp/devtools/agent_vmxp.json @@ -12,8 +12,8 @@ }, {"name": "shared_dll", "variables": [ - ["JSONCPP_LIB_BUILD_SHARED=true"], - ["JSONCPP_LIB_BUILD_SHARED=false"] + ["BUILD_SHARED_LIBS=true"], + ["BUILD_SHARED_LIBS=false"] ] }, {"name": "build_type", diff --git a/3rdparty/jsoncpp/devtools/antglob.py b/3rdparty/jsoncpp/devtools/antglob.py index 8b7b4ca297e..c272f66343f 100644 --- a/3rdparty/jsoncpp/devtools/antglob.py +++ b/3rdparty/jsoncpp/devtools/antglob.py @@ -1,6 +1,9 @@ #!/usr/bin/env python # encoding: utf-8 -# Baptiste Lepilleur, 2009 +# Copyright 2009 Baptiste Lepilleur +# Distributed under MIT license, or public domain if desired and +# recognized in your jurisdiction. +# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE from __future__ import print_function from dircache import listdir @@ -54,9 +57,9 @@ LINKS = DIR_LINK | FILE_LINK ALL_NO_LINK = DIR | FILE ALL = DIR | FILE | LINKS -_ANT_RE = re.compile( r'(/\*\*/)|(\*\*/)|(/\*\*)|(\*)|(/)|([^\*/]*)' ) +_ANT_RE = re.compile(r'(/\*\*/)|(\*\*/)|(/\*\*)|(\*)|(/)|([^\*/]*)') -def ant_pattern_to_re( ant_pattern ): +def ant_pattern_to_re(ant_pattern): """Generates a regular expression from the ant pattern. Matching convention: **/a: match 'a', 'dir/a', 'dir1/dir2/a' @@ -65,30 +68,30 @@ def ant_pattern_to_re( ant_pattern ): """ rex = ['^'] next_pos = 0 - sep_rex = r'(?:/|%s)' % re.escape( os.path.sep ) + sep_rex = r'(?:/|%s)' % re.escape(os.path.sep) ## print 'Converting', ant_pattern - for match in _ANT_RE.finditer( ant_pattern ): + for match in _ANT_RE.finditer(ant_pattern): ## print 'Matched', match.group() ## print match.start(0), next_pos if match.start(0) != next_pos: - raise ValueError( "Invalid ant pattern" ) + raise ValueError("Invalid ant pattern") if match.group(1): # /**/ - rex.append( sep_rex + '(?:.*%s)?' % sep_rex ) + rex.append(sep_rex + '(?:.*%s)?' % sep_rex) elif match.group(2): # **/ - rex.append( '(?:.*%s)?' % sep_rex ) + rex.append('(?:.*%s)?' % sep_rex) elif match.group(3): # /** - rex.append( sep_rex + '.*' ) + rex.append(sep_rex + '.*') elif match.group(4): # * - rex.append( '[^/%s]*' % re.escape(os.path.sep) ) + rex.append('[^/%s]*' % re.escape(os.path.sep)) elif match.group(5): # / - rex.append( sep_rex ) + rex.append(sep_rex) else: # somepath - rex.append( re.escape(match.group(6)) ) + rex.append(re.escape(match.group(6))) next_pos = match.end() rex.append('$') - return re.compile( ''.join( rex ) ) + return re.compile(''.join(rex)) -def _as_list( l ): +def _as_list(l): if isinstance(l, basestring): return l.split() return l @@ -105,37 +108,37 @@ def glob(dir_path, dir_path = dir_path.replace('/',os.path.sep) entry_type_filter = entry_type - def is_pruned_dir( dir_name ): + def is_pruned_dir(dir_name): for pattern in prune_dirs: - if fnmatch.fnmatch( dir_name, pattern ): + if fnmatch.fnmatch(dir_name, pattern): return True return False - def apply_filter( full_path, filter_rexs ): + def apply_filter(full_path, filter_rexs): """Return True if at least one of the filter regular expression match full_path.""" for rex in filter_rexs: - if rex.match( full_path ): + if rex.match(full_path): return True return False - def glob_impl( root_dir_path ): + def glob_impl(root_dir_path): child_dirs = [root_dir_path] while child_dirs: dir_path = child_dirs.pop() - for entry in listdir( dir_path ): - full_path = os.path.join( dir_path, entry ) + for entry in listdir(dir_path): + full_path = os.path.join(dir_path, entry) ## print 'Testing:', full_path, - is_dir = os.path.isdir( full_path ) - if is_dir and not is_pruned_dir( entry ): # explore child directory ? + is_dir = os.path.isdir(full_path) + if is_dir and not is_pruned_dir(entry): # explore child directory ? ## print '===> marked for recursion', - child_dirs.append( full_path ) - included = apply_filter( full_path, include_filter ) - rejected = apply_filter( full_path, exclude_filter ) + child_dirs.append(full_path) + included = apply_filter(full_path, include_filter) + rejected = apply_filter(full_path, exclude_filter) if not included or rejected: # do not include entry ? ## print '=> not included or rejected' continue - link = os.path.islink( full_path ) - is_file = os.path.isfile( full_path ) + link = os.path.islink(full_path) + is_file = os.path.isfile(full_path) if not is_file and not is_dir: ## print '=> unknown entry type' continue @@ -146,57 +149,57 @@ def glob(dir_path, ## print '=> type: %d' % entry_type, if (entry_type & entry_type_filter) != 0: ## print ' => KEEP' - yield os.path.join( dir_path, entry ) + yield os.path.join(dir_path, entry) ## else: ## print ' => TYPE REJECTED' - return list( glob_impl( dir_path ) ) + return list(glob_impl(dir_path)) if __name__ == "__main__": import unittest class AntPatternToRETest(unittest.TestCase): -## def test_conversion( self ): -## self.assertEqual( '^somepath$', ant_pattern_to_re( 'somepath' ).pattern ) +## def test_conversion(self): +## self.assertEqual('^somepath$', ant_pattern_to_re('somepath').pattern) - def test_matching( self ): - test_cases = [ ( 'path', + def test_matching(self): + test_cases = [ ('path', ['path'], - ['somepath', 'pathsuffix', '/path', '/path'] ), - ( '*.py', + ['somepath', 'pathsuffix', '/path', '/path']), + ('*.py', ['source.py', 'source.ext.py', '.py'], - ['path/source.py', '/.py', 'dir.py/z', 'z.pyc', 'z.c'] ), - ( '**/path', + ['path/source.py', '/.py', 'dir.py/z', 'z.pyc', 'z.c']), + ('**/path', ['path', '/path', '/a/path', 'c:/a/path', '/a/b/path', '//a/path', '/a/path/b/path'], - ['path/', 'a/path/b', 'dir.py/z', 'somepath', 'pathsuffix', 'a/somepath'] ), - ( 'path/**', + ['path/', 'a/path/b', 'dir.py/z', 'somepath', 'pathsuffix', 'a/somepath']), + ('path/**', ['path/a', 'path/path/a', 'path//'], - ['path', 'somepath/a', 'a/path', 'a/path/a', 'pathsuffix/a'] ), - ( '/**/path', + ['path', 'somepath/a', 'a/path', 'a/path/a', 'pathsuffix/a']), + ('/**/path', ['/path', '/a/path', '/a/b/path/path', '/path/path'], - ['path', 'path/', 'a/path', '/pathsuffix', '/somepath'] ), - ( 'a/b', + ['path', 'path/', 'a/path', '/pathsuffix', '/somepath']), + ('a/b', ['a/b'], - ['somea/b', 'a/bsuffix', 'a/b/c'] ), - ( '**/*.py', + ['somea/b', 'a/bsuffix', 'a/b/c']), + ('**/*.py', ['script.py', 'src/script.py', 'a/b/script.py', '/a/b/script.py'], - ['script.pyc', 'script.pyo', 'a.py/b'] ), - ( 'src/**/*.py', + ['script.pyc', 'script.pyo', 'a.py/b']), + ('src/**/*.py', ['src/a.py', 'src/dir/a.py'], - ['a/src/a.py', '/src/a.py'] ), + ['a/src/a.py', '/src/a.py']), ] for ant_pattern, accepted_matches, rejected_matches in list(test_cases): - def local_path( paths ): + def local_path(paths): return [ p.replace('/',os.path.sep) for p in paths ] - test_cases.append( (ant_pattern, local_path(accepted_matches), local_path( rejected_matches )) ) + test_cases.append((ant_pattern, local_path(accepted_matches), local_path(rejected_matches))) for ant_pattern, accepted_matches, rejected_matches in test_cases: - rex = ant_pattern_to_re( ant_pattern ) + rex = ant_pattern_to_re(ant_pattern) print('ant_pattern:', ant_pattern, ' => ', rex.pattern) for accepted_match in accepted_matches: print('Accepted?:', accepted_match) - self.assertTrue( rex.match( accepted_match ) is not None ) + self.assertTrue(rex.match(accepted_match) is not None) for rejected_match in rejected_matches: print('Rejected?:', rejected_match) - self.assertTrue( rex.match( rejected_match ) is None ) + self.assertTrue(rex.match(rejected_match) is None) unittest.main() diff --git a/3rdparty/jsoncpp/devtools/batchbuild.py b/3rdparty/jsoncpp/devtools/batchbuild.py index 6f57945a7c3..0eb0690e8c6 100644 --- a/3rdparty/jsoncpp/devtools/batchbuild.py +++ b/3rdparty/jsoncpp/devtools/batchbuild.py @@ -18,62 +18,62 @@ class BuildDesc: self.build_type = build_type self.generator = generator - def merged_with( self, build_desc ): + def merged_with(self, build_desc): """Returns a new BuildDesc by merging field content. Prefer build_desc fields to self fields for single valued field. """ - return BuildDesc( self.prepend_envs + build_desc.prepend_envs, + return BuildDesc(self.prepend_envs + build_desc.prepend_envs, self.variables + build_desc.variables, build_desc.build_type or self.build_type, - build_desc.generator or self.generator ) + build_desc.generator or self.generator) - def env( self ): + def env(self): environ = os.environ.copy() for values_by_name in self.prepend_envs: for var, value in list(values_by_name.items()): var = var.upper() if type(value) is unicode: - value = value.encode( sys.getdefaultencoding() ) + value = value.encode(sys.getdefaultencoding()) if var in environ: environ[var] = value + os.pathsep + environ[var] else: environ[var] = value return environ - def cmake_args( self ): + def cmake_args(self): args = ["-D%s" % var for var in self.variables] # skip build type for Visual Studio solution as it cause warning if self.build_type and 'Visual' not in self.generator: - args.append( "-DCMAKE_BUILD_TYPE=%s" % self.build_type ) + args.append("-DCMAKE_BUILD_TYPE=%s" % self.build_type) if self.generator: - args.extend( ['-G', self.generator] ) + args.extend(['-G', self.generator]) return args - def __repr__( self ): - return "BuildDesc( %s, build_type=%s )" % (" ".join( self.cmake_args()), self.build_type) + def __repr__(self): + return "BuildDesc(%s, build_type=%s)" % (" ".join(self.cmake_args()), self.build_type) class BuildData: - def __init__( self, desc, work_dir, source_dir ): + def __init__(self, desc, work_dir, source_dir): self.desc = desc self.work_dir = work_dir self.source_dir = source_dir - self.cmake_log_path = os.path.join( work_dir, 'batchbuild_cmake.log' ) - self.build_log_path = os.path.join( work_dir, 'batchbuild_build.log' ) + self.cmake_log_path = os.path.join(work_dir, 'batchbuild_cmake.log') + self.build_log_path = os.path.join(work_dir, 'batchbuild_build.log') self.cmake_succeeded = False self.build_succeeded = False def execute_build(self): print('Build %s' % self.desc) - self._make_new_work_dir( ) - self.cmake_succeeded = self._generate_makefiles( ) + self._make_new_work_dir() + self.cmake_succeeded = self._generate_makefiles() if self.cmake_succeeded: - self.build_succeeded = self._build_using_makefiles( ) + self.build_succeeded = self._build_using_makefiles() return self.build_succeeded def _generate_makefiles(self): print(' Generating makefiles: ', end=' ') - cmd = ['cmake'] + self.desc.cmake_args( ) + [os.path.abspath( self.source_dir )] - succeeded = self._execute_build_subprocess( cmd, self.desc.env(), self.cmake_log_path ) + cmd = ['cmake'] + self.desc.cmake_args() + [os.path.abspath(self.source_dir)] + succeeded = self._execute_build_subprocess(cmd, self.desc.env(), self.cmake_log_path) print('done' if succeeded else 'FAILED') return succeeded @@ -82,58 +82,58 @@ class BuildData: cmd = ['cmake', '--build', self.work_dir] if self.desc.build_type: cmd += ['--config', self.desc.build_type] - succeeded = self._execute_build_subprocess( cmd, self.desc.env(), self.build_log_path ) + succeeded = self._execute_build_subprocess(cmd, self.desc.env(), self.build_log_path) print('done' if succeeded else 'FAILED') return succeeded def _execute_build_subprocess(self, cmd, env, log_path): - process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.work_dir, - env=env ) - stdout, _ = process.communicate( ) + process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.work_dir, + env=env) + stdout, _ = process.communicate() succeeded = (process.returncode == 0) - with open( log_path, 'wb' ) as flog: - log = ' '.join( cmd ) + '\n' + stdout + '\nExit code: %r\n' % process.returncode - flog.write( fix_eol( log ) ) + with open(log_path, 'wb') as flog: + log = ' '.join(cmd) + '\n' + stdout + '\nExit code: %r\n' % process.returncode + flog.write(fix_eol(log)) return succeeded def _make_new_work_dir(self): - if os.path.isdir( self.work_dir ): + if os.path.isdir(self.work_dir): print(' Removing work directory', self.work_dir) - shutil.rmtree( self.work_dir, ignore_errors=True ) - if not os.path.isdir( self.work_dir ): - os.makedirs( self.work_dir ) + shutil.rmtree(self.work_dir, ignore_errors=True) + if not os.path.isdir(self.work_dir): + os.makedirs(self.work_dir) -def fix_eol( stdout ): +def fix_eol(stdout): """Fixes wrong EOL produced by cmake --build on Windows (\r\r\n instead of \r\n). """ - return re.sub( '\r*\n', os.linesep, stdout ) + return re.sub('\r*\n', os.linesep, stdout) -def load_build_variants_from_config( config_path ): - with open( config_path, 'rb' ) as fconfig: - data = json.load( fconfig ) +def load_build_variants_from_config(config_path): + with open(config_path, 'rb') as fconfig: + data = json.load(fconfig) variants = data[ 'cmake_variants' ] - build_descs_by_axis = collections.defaultdict( list ) + build_descs_by_axis = collections.defaultdict(list) for axis in variants: axis_name = axis["name"] build_descs = [] if "generators" in axis: for generator_data in axis["generators"]: for generator in generator_data["generator"]: - build_desc = BuildDesc( generator=generator, - prepend_envs=generator_data.get("env_prepend") ) - build_descs.append( build_desc ) + build_desc = BuildDesc(generator=generator, + prepend_envs=generator_data.get("env_prepend")) + build_descs.append(build_desc) elif "variables" in axis: for variables in axis["variables"]: - build_desc = BuildDesc( variables=variables ) - build_descs.append( build_desc ) + build_desc = BuildDesc(variables=variables) + build_descs.append(build_desc) elif "build_types" in axis: for build_type in axis["build_types"]: - build_desc = BuildDesc( build_type=build_type ) - build_descs.append( build_desc ) - build_descs_by_axis[axis_name].extend( build_descs ) + build_desc = BuildDesc(build_type=build_type) + build_descs.append(build_desc) + build_descs_by_axis[axis_name].extend(build_descs) return build_descs_by_axis -def generate_build_variants( build_descs_by_axis ): +def generate_build_variants(build_descs_by_axis): """Returns a list of BuildDesc generated for the partial BuildDesc for each axis.""" axis_names = list(build_descs_by_axis.keys()) build_descs = [] @@ -141,8 +141,8 @@ def generate_build_variants( build_descs_by_axis ): if len(build_descs): # for each existing build_desc and each axis build desc, create a new build_desc new_build_descs = [] - for prototype_build_desc, axis_build_desc in itertools.product( build_descs, axis_build_descs): - new_build_descs.append( prototype_build_desc.merged_with( axis_build_desc ) ) + for prototype_build_desc, axis_build_desc in itertools.product(build_descs, axis_build_descs): + new_build_descs.append(prototype_build_desc.merged_with(axis_build_desc)) build_descs = new_build_descs else: build_descs = axis_build_descs @@ -174,60 +174,57 @@ $tr_builds ''') -def generate_html_report( html_report_path, builds ): - report_dir = os.path.dirname( html_report_path ) +def generate_html_report(html_report_path, builds): + report_dir = os.path.dirname(html_report_path) # Vertical axis: generator # Horizontal: variables, then build_type - builds_by_generator = collections.defaultdict( list ) + builds_by_generator = collections.defaultdict(list) variables = set() - build_types_by_variable = collections.defaultdict( set ) + build_types_by_variable = collections.defaultdict(set) build_by_pos_key = {} # { (generator, var_key, build_type): build } for build in builds: - builds_by_generator[build.desc.generator].append( build ) + builds_by_generator[build.desc.generator].append(build) var_key = tuple(sorted(build.desc.variables)) - variables.add( var_key ) - build_types_by_variable[var_key].add( build.desc.build_type ) + variables.add(var_key) + build_types_by_variable[var_key].add(build.desc.build_type) pos_key = (build.desc.generator, var_key, build.desc.build_type) build_by_pos_key[pos_key] = build - variables = sorted( variables ) + variables = sorted(variables) th_vars = [] th_build_types = [] for variable in variables: - build_types = sorted( build_types_by_variable[variable] ) + build_types = sorted(build_types_by_variable[variable]) nb_build_type = len(build_types_by_variable[variable]) - th_vars.append( '%s' % (nb_build_type, cgi.escape( ' '.join( variable ) ) ) ) + th_vars.append('%s' % (nb_build_type, cgi.escape(' '.join(variable)))) for build_type in build_types: - th_build_types.append( '%s' % cgi.escape(build_type) ) + th_build_types.append('%s' % cgi.escape(build_type)) tr_builds = [] - for generator in sorted( builds_by_generator ): - tds = [ '%s\n' % cgi.escape( generator ) ] + for generator in sorted(builds_by_generator): + tds = [ '%s\n' % cgi.escape(generator) ] for variable in variables: - build_types = sorted( build_types_by_variable[variable] ) + build_types = sorted(build_types_by_variable[variable]) for build_type in build_types: pos_key = (generator, variable, build_type) build = build_by_pos_key.get(pos_key) if build: cmake_status = 'ok' if build.cmake_succeeded else 'FAILED' build_status = 'ok' if build.build_succeeded else 'FAILED' - cmake_log_url = os.path.relpath( build.cmake_log_path, report_dir ) - build_log_url = os.path.relpath( build.build_log_path, report_dir ) - td = 'CMake: %s' % ( - build_status.lower(), cmake_log_url, cmake_status.lower(), cmake_status) + cmake_log_url = os.path.relpath(build.cmake_log_path, report_dir) + build_log_url = os.path.relpath(build.build_log_path, report_dir) + td = 'CMake: %s' % ( build_status.lower(), cmake_log_url, cmake_status.lower(), cmake_status) if build.cmake_succeeded: - td += '
Build: %s' % ( - build_log_url, build_status.lower(), build_status) + td += '
Build: %s' % ( build_log_url, build_status.lower(), build_status) td += '' else: td = '' - tds.append( td ) - tr_builds.append( '%s' % '\n'.join( tds ) ) - html = HTML_TEMPLATE.substitute( - title='Batch build report', + tds.append(td) + tr_builds.append('%s' % '\n'.join(tds)) + html = HTML_TEMPLATE.substitute( title='Batch build report', th_vars=' '.join(th_vars), - th_build_types=' '.join( th_build_types), - tr_builds='\n'.join( tr_builds ) ) - with open( html_report_path, 'wt' ) as fhtml: - fhtml.write( html ) + th_build_types=' '.join(th_build_types), + tr_builds='\n'.join(tr_builds)) + with open(html_report_path, 'wt') as fhtml: + fhtml.write(html) print('HTML report generated in:', html_report_path) def main(): @@ -246,33 +243,33 @@ python devtools\batchbuild.py e:\buildbots\jsoncpp\build . devtools\agent_vmw7.j parser.enable_interspersed_args() options, args = parser.parse_args() if len(args) < 3: - parser.error( "Missing one of WORK_DIR SOURCE_DIR CONFIG_JSON_PATH." ) + parser.error("Missing one of WORK_DIR SOURCE_DIR CONFIG_JSON_PATH.") work_dir = args[0] source_dir = args[1].rstrip('/\\') config_paths = args[2:] for config_path in config_paths: - if not os.path.isfile( config_path ): - parser.error( "Can not read: %r" % config_path ) + if not os.path.isfile(config_path): + parser.error("Can not read: %r" % config_path) # generate build variants build_descs = [] for config_path in config_paths: - build_descs_by_axis = load_build_variants_from_config( config_path ) - build_descs.extend( generate_build_variants( build_descs_by_axis ) ) + build_descs_by_axis = load_build_variants_from_config(config_path) + build_descs.extend(generate_build_variants(build_descs_by_axis)) print('Build variants (%d):' % len(build_descs)) # assign build directory for each variant - if not os.path.isdir( work_dir ): - os.makedirs( work_dir ) + if not os.path.isdir(work_dir): + os.makedirs(work_dir) builds = [] - with open( os.path.join( work_dir, 'matrix-dir-map.txt' ), 'wt' ) as fmatrixmap: - for index, build_desc in enumerate( build_descs ): - build_desc_work_dir = os.path.join( work_dir, '%03d' % (index+1) ) - builds.append( BuildData( build_desc, build_desc_work_dir, source_dir ) ) - fmatrixmap.write( '%s: %s\n' % (build_desc_work_dir, build_desc) ) + with open(os.path.join(work_dir, 'matrix-dir-map.txt'), 'wt') as fmatrixmap: + for index, build_desc in enumerate(build_descs): + build_desc_work_dir = os.path.join(work_dir, '%03d' % (index+1)) + builds.append(BuildData(build_desc, build_desc_work_dir, source_dir)) + fmatrixmap.write('%s: %s\n' % (build_desc_work_dir, build_desc)) for build in builds: build.execute_build() - html_report_path = os.path.join( work_dir, 'batchbuild-report.html' ) - generate_html_report( html_report_path, builds ) + html_report_path = os.path.join(work_dir, 'batchbuild-report.html') + generate_html_report(html_report_path, builds) print('Done') diff --git a/3rdparty/jsoncpp/devtools/fixeol.py b/3rdparty/jsoncpp/devtools/fixeol.py index 53af7612bc7..054eb9b227b 100644 --- a/3rdparty/jsoncpp/devtools/fixeol.py +++ b/3rdparty/jsoncpp/devtools/fixeol.py @@ -1,10 +1,15 @@ +# Copyright 2010 Baptiste Lepilleur +# Distributed under MIT license, or public domain if desired and +# recognized in your jurisdiction. +# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + from __future__ import print_function import os.path -def fix_source_eol( path, is_dry_run = True, verbose = True, eol = '\n' ): +def fix_source_eol(path, is_dry_run = True, verbose = True, eol = '\n'): """Makes sure that all sources have the specified eol sequence (default: unix).""" - if not os.path.isfile( path ): - raise ValueError( 'Path "%s" is not a file' % path ) + if not os.path.isfile(path): + raise ValueError('Path "%s" is not a file' % path) try: f = open(path, 'rb') except IOError as msg: @@ -29,27 +34,27 @@ def fix_source_eol( path, is_dry_run = True, verbose = True, eol = '\n' ): ## ## ## -##def _do_fix( is_dry_run = True ): +##def _do_fix(is_dry_run = True): ## from waftools import antglob -## python_sources = antglob.glob( '.', +## python_sources = antglob.glob('.', ## includes = '**/*.py **/wscript **/wscript_build', ## excludes = antglob.default_excludes + './waf.py', -## prune_dirs = antglob.prune_dirs + 'waf-* ./build' ) +## prune_dirs = antglob.prune_dirs + 'waf-* ./build') ## for path in python_sources: -## _fix_python_source( path, is_dry_run ) +## _fix_python_source(path, is_dry_run) ## -## cpp_sources = antglob.glob( '.', +## cpp_sources = antglob.glob('.', ## includes = '**/*.cpp **/*.h **/*.inl', -## prune_dirs = antglob.prune_dirs + 'waf-* ./build' ) +## prune_dirs = antglob.prune_dirs + 'waf-* ./build') ## for path in cpp_sources: -## _fix_source_eol( path, is_dry_run ) +## _fix_source_eol(path, is_dry_run) ## ## ##def dry_fix(context): -## _do_fix( is_dry_run = True ) +## _do_fix(is_dry_run = True) ## ##def fix(context): -## _do_fix( is_dry_run = False ) +## _do_fix(is_dry_run = False) ## ##def shutdown(): ## pass diff --git a/3rdparty/jsoncpp/devtools/licenseupdater.py b/3rdparty/jsoncpp/devtools/licenseupdater.py index 8cb71d737b3..6f823618fb9 100644 --- a/3rdparty/jsoncpp/devtools/licenseupdater.py +++ b/3rdparty/jsoncpp/devtools/licenseupdater.py @@ -13,7 +13,7 @@ BRIEF_LICENSE = LICENSE_BEGIN + """2007-2010 Baptiste Lepilleur """.replace('\r\n','\n') -def update_license( path, dry_run, show_diff ): +def update_license(path, dry_run, show_diff): """Update the license statement in the specified file. Parameters: path: path of the C++ source file to update. @@ -22,28 +22,28 @@ def update_license( path, dry_run, show_diff ): show_diff: if True, print the path of the file that would be modified, as well as the change made to the file. """ - with open( path, 'rt' ) as fin: + with open(path, 'rt') as fin: original_text = fin.read().replace('\r\n','\n') newline = fin.newlines and fin.newlines[0] or '\n' - if not original_text.startswith( LICENSE_BEGIN ): + if not original_text.startswith(LICENSE_BEGIN): # No existing license found => prepend it new_text = BRIEF_LICENSE + original_text else: - license_end_index = original_text.index( '\n\n' ) # search first blank line + license_end_index = original_text.index('\n\n') # search first blank line new_text = BRIEF_LICENSE + original_text[license_end_index+2:] if original_text != new_text: if not dry_run: - with open( path, 'wb' ) as fout: - fout.write( new_text.replace('\n', newline ) ) + with open(path, 'wb') as fout: + fout.write(new_text.replace('\n', newline)) print('Updated', path) if show_diff: import difflib - print('\n'.join( difflib.unified_diff( original_text.split('\n'), - new_text.split('\n') ) )) + print('\n'.join(difflib.unified_diff(original_text.split('\n'), + new_text.split('\n')))) return True return False -def update_license_in_source_directories( source_dirs, dry_run, show_diff ): +def update_license_in_source_directories(source_dirs, dry_run, show_diff): """Updates license text in C++ source files found in directory source_dirs. Parameters: source_dirs: list of directory to scan for C++ sources. Directories are @@ -56,11 +56,11 @@ def update_license_in_source_directories( source_dirs, dry_run, show_diff ): from devtools import antglob prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist' for source_dir in source_dirs: - cpp_sources = antglob.glob( source_dir, + cpp_sources = antglob.glob(source_dir, includes = '''**/*.h **/*.cpp **/*.inl''', - prune_dirs = prune_dirs ) + prune_dirs = prune_dirs) for source in cpp_sources: - update_license( source, dry_run, show_diff ) + update_license(source, dry_run, show_diff) def main(): usage = """%prog DIR [DIR2...] @@ -83,7 +83,7 @@ python devtools\licenseupdater.py include src help="""On update, show change made to the file.""") parser.enable_interspersed_args() options, args = parser.parse_args() - update_license_in_source_directories( args, options.dry_run, options.show_diff ) + update_license_in_source_directories(args, options.dry_run, options.show_diff) print('Done') if __name__ == '__main__': diff --git a/3rdparty/jsoncpp/devtools/tarball.py b/3rdparty/jsoncpp/devtools/tarball.py index ccbda394255..2e72717aba4 100644 --- a/3rdparty/jsoncpp/devtools/tarball.py +++ b/3rdparty/jsoncpp/devtools/tarball.py @@ -1,5 +1,10 @@ -import os.path -import gzip +# Copyright 2010 Baptiste Lepilleur +# Distributed under MIT license, or public domain if desired and +# recognized in your jurisdiction. +# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE + +from contextlib import closing +import os import tarfile TARGZ_DEFAULT_COMPRESSION_LEVEL = 9 @@ -13,41 +18,35 @@ def make_tarball(tarball_path, sources, base_dir, prefix_dir=''): prefix_dir: all files stored in the tarball be sub-directory of prefix_dir. Set to '' to make them child of root. """ - base_dir = os.path.normpath( os.path.abspath( base_dir ) ) - def archive_name( path ): + base_dir = os.path.normpath(os.path.abspath(base_dir)) + def archive_name(path): """Makes path relative to base_dir.""" - path = os.path.normpath( os.path.abspath( path ) ) - common_path = os.path.commonprefix( (base_dir, path) ) + path = os.path.normpath(os.path.abspath(path)) + common_path = os.path.commonprefix((base_dir, path)) archive_name = path[len(common_path):] - if os.path.isabs( archive_name ): + if os.path.isabs(archive_name): archive_name = archive_name[1:] - return os.path.join( prefix_dir, archive_name ) + return os.path.join(prefix_dir, archive_name) def visit(tar, dirname, names): for name in names: path = os.path.join(dirname, name) if os.path.isfile(path): path_in_tar = archive_name(path) - tar.add(path, path_in_tar ) + tar.add(path, path_in_tar) compression = TARGZ_DEFAULT_COMPRESSION_LEVEL - tar = tarfile.TarFile.gzopen( tarball_path, 'w', compresslevel=compression ) - try: + with closing(tarfile.TarFile.open(tarball_path, 'w:gz', + compresslevel=compression)) as tar: for source in sources: source_path = source - if os.path.isdir( source ): - os.path.walk(source_path, visit, tar) + if os.path.isdir(source): + for dirpath, dirnames, filenames in os.walk(source_path): + visit(tar, dirpath, filenames) else: path_in_tar = archive_name(source_path) - tar.add(source_path, path_in_tar ) # filename, arcname - finally: - tar.close() + tar.add(source_path, path_in_tar) # filename, arcname -def decompress( tarball_path, base_dir ): +def decompress(tarball_path, base_dir): """Decompress the gzipped tarball into directory base_dir. """ - # !!! This class method is not documented in the online doc - # nor is bz2open! - tar = tarfile.TarFile.gzopen(tarball_path, mode='r') - try: - tar.extractall( base_dir ) - finally: - tar.close() + with closing(tarfile.TarFile.open(tarball_path)) as tar: + tar.extractall(base_dir) diff --git a/3rdparty/jsoncpp/doc/doxyfile.in b/3rdparty/jsoncpp/doc/doxyfile.in index a4161865cc7..57c61c27e40 100644 --- a/3rdparty/jsoncpp/doc/doxyfile.in +++ b/3rdparty/jsoncpp/doc/doxyfile.in @@ -819,7 +819,7 @@ EXCLUDE_SYMBOLS = # that contain example code fragments that are included (see the \include # command). -EXAMPLE_PATH = +EXAMPLE_PATH = .. # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and @@ -1946,8 +1946,7 @@ INCLUDE_FILE_PATTERNS = *.h PREDEFINED = "_MSC_VER=1400" \ _CPPRTTI \ _WIN32 \ - JSONCPP_DOC_EXCLUDE_IMPLEMENTATION \ - JSON_VALUE_USE_INTERNAL_MAP + JSONCPP_DOC_EXCLUDE_IMPLEMENTATION # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # tag can be used to specify a list of macro names that should be expanded. The diff --git a/3rdparty/jsoncpp/doc/header.html b/3rdparty/jsoncpp/doc/header.html index 6bd2ad9e32a..4b2a5e92198 100644 --- a/3rdparty/jsoncpp/doc/header.html +++ b/3rdparty/jsoncpp/doc/header.html @@ -16,7 +16,7 @@ JsonCpp - JSON data format manipulation library - JsonCpp home page + JsonCpp home page diff --git a/3rdparty/jsoncpp/doc/jsoncpp.dox b/3rdparty/jsoncpp/doc/jsoncpp.dox index a9ed47ec4e9..47efc8a3576 100644 --- a/3rdparty/jsoncpp/doc/jsoncpp.dox +++ b/3rdparty/jsoncpp/doc/jsoncpp.dox @@ -4,11 +4,21 @@ JSON (JavaScript Object Notation) is a lightweight data-interchange format. -It can represent integer, real number, string, an ordered sequence of value, and -a collection of name/value pairs. Here is an example of JSON data: \verbatim +{ + "encoding" : "UTF-8", + "plug-ins" : [ + "python", + "c++", + "ruby" + ], + "indent" : { "length" : 3, "use_space": true } +} +\endverbatim +JsonCpp supports comments as meta-data: +\code // Configuration options { // Default encoding for text @@ -17,22 +27,22 @@ Here is an example of JSON data: // Plug-ins loaded at start-up "plug-ins" : [ "python", - "c++", + "c++", // trailing comment "ruby" ], // Tab indent size - "indent" : { "length" : 3, "use_space": true } + // (multi-line comment) + "indent" : { /*embedded comment*/ "length" : 3, "use_space": true } } -\endverbatim -jsoncpp supports comments as meta-data. +\endcode \section _features Features - read and write JSON document - attach C++ style comments to element during parsing - rewrite JSON document preserving original comments -Notes: Comments used to be supported in JSON but where removed for +Notes: Comments used to be supported in JSON but were removed for portability (C like comments are not supported in Python). Since comments are useful in configuration/input file, this feature was preserved. @@ -40,47 +50,77 @@ preserved. \section _example Code example \code -Json::Value root; // will contains the root value after parsing. -Json::Reader reader; -bool parsingSuccessful = reader.parse( config_doc, root ); -if ( !parsingSuccessful ) -{ - // report to the user the failure and their locations in the document. - std::cout << "Failed to parse configuration\n" - << reader.getFormattedErrorMessages(); - return; -} +Json::Value root; // 'root' will contain the root value after parsing. +std::cin >> root; -// Get the value of the member of root named 'encoding', return 'UTF-8' if there is no -// such member. -std::string encoding = root.get("encoding", "UTF-8" ).asString(); -// Get the value of the member of root named 'encoding', return a 'null' value if -// there is no such member. -const Json::Value plugins = root["plug-ins"]; -for ( int index = 0; index < plugins.size(); ++index ) // Iterates over the sequence elements. - loadPlugIn( plugins[index].asString() ); - -setIndentLength( root["indent"].get("length", 3).asInt() ); -setIndentUseSpace( root["indent"].get("use_space", true).asBool() ); - -// ... -// At application shutdown to make the new configuration document: -// Since Json::Value has implicit constructor for all value types, it is not -// necessary to explicitly construct the Json::Value object: -root["encoding"] = getCurrentEncoding(); -root["indent"]["length"] = getCurrentIndentLength(); -root["indent"]["use_space"] = getCurrentIndentUseSpace(); - -Json::StyledWriter writer; -// Make a new JSON document for the configuration. Preserve original comments. -std::string outputConfig = writer.write( root ); - -// You can also use streams. This will put the contents of any JSON -// stream at a particular sub-value, if you'd like. +// You can also read into a particular sub-value. std::cin >> root["subtree"]; -// And you can write to a stream, using the StyledWriter automatically. +// Get the value of the member of root named 'encoding', +// and return 'UTF-8' if there is no such member. +std::string encoding = root.get("encoding", "UTF-8" ).asString(); + +// Get the value of the member of root named 'plug-ins'; return a 'null' value if +// there is no such member. +const Json::Value plugins = root["plug-ins"]; + +// Iterate over the sequence elements. +for ( int index = 0; index < plugins.size(); ++index ) + loadPlugIn( plugins[index].asString() ); + +// Try other datatypes. Some are auto-convertible to others. +foo::setIndentLength( root["indent"].get("length", 3).asInt() ); +foo::setIndentUseSpace( root["indent"].get("use_space", true).asBool() ); + +// Since Json::Value has an implicit constructor for all value types, it is not +// necessary to explicitly construct the Json::Value object. +root["encoding"] = foo::getCurrentEncoding(); +root["indent"]["length"] = foo::getCurrentIndentLength(); +root["indent"]["use_space"] = foo::getCurrentIndentUseSpace(); + +// If you like the defaults, you can insert directly into a stream. std::cout << root; +// Of course, you can write to `std::ostringstream` if you prefer. + +// If desired, remember to add a linefeed and flush. +std::cout << std::endl; +\endcode + +\section _advanced Advanced usage + +Configure *builders* to create *readers* and *writers*. For +configuration, we use our own `Json::Value` (rather than +standard setters/getters) so that we can add +features without losing binary-compatibility. + +\code +// For convenience, use `writeString()` with a specialized builder. +Json::StreamWriterBuilder wbuilder; +wbuilder["indentation"] = "\t"; +std::string document = Json::writeString(wbuilder, root); + +// Here, using a specialized Builder, we discard comments and +// record errors as we parse. +Json::CharReaderBuilder rbuilder; +rbuilder["collectComments"] = false; +std::string errs; +bool ok = Json::parseFromStream(rbuilder, std::cin, &root, &errs); +\endcode + +Yes, compile-time configuration-checking would be helpful, +but `Json::Value` lets you +write and read the builder configuration, which is better! In other words, +you can configure your JSON parser using JSON. + +CharReaders and StreamWriters are not thread-safe, but they are re-usable. +\code +Json::CharReaderBuilder rbuilder; +cfg >> rbuilder.settings_; +std::unique_ptr const reader(rbuilder.newCharReader()); +reader->parse(start, stop, &value1, &errs); +// ... +reader->parse(start, stop, &value2, &errs); +// etc. \endcode \section _pbuild Build instructions @@ -116,4 +156,9 @@ Basically JsonCpp is licensed under MIT license, or public domain if desired and recognized in your jurisdiction. \author Baptiste Lepilleur (originator) +\author Christopher Dunn (primary maintainer) +\version \include version +We make strong guarantees about binary-compatibility, consistent with +the Apache versioning scheme. +\sa version.h */ diff --git a/3rdparty/jsoncpp/doc/web_doxyfile.in b/3rdparty/jsoncpp/doc/web_doxyfile.in new file mode 100644 index 00000000000..07d6819a743 --- /dev/null +++ b/3rdparty/jsoncpp/doc/web_doxyfile.in @@ -0,0 +1,2301 @@ +# Doxyfile 1.8.5 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "JsonCpp" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = %JSONCPP_VERSION% + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify an logo or icon that is included in +# the documentation. The maximum height of the logo should not exceed 55 pixels +# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo +# to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = %DOC_TOPDIR% + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese- +# Traditional, Croatian, Czech, Danish, Dutch, English, Esperanto, Farsi, +# Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en, +# Korean, Korean-en, Latvian, Norwegian, Macedonian, Persian, Polish, +# Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish, +# Turkish, Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = %TOPDIR% + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = %TOPDIR%/include + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = YES + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a +# new page for each member. If set to NO, the documentation of a member will be +# part of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 3 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = "testCaseSetup=\link CppUT::TestCase::setUp() setUp()\endlink" \ + "testCaseRun=\link CppUT::TestCase::run() run()\endlink" \ + "testCaseTearDown=\link CppUT::TestCase::tearDown() tearDown()\endlink" \ + "json_ref=JSON (JavaScript Object Notation)" + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make +# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C +# (default is Fortran), use: inc=Fortran f=C. +# +# Note For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by by putting a % sign in front of the word +# or globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = YES + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = NO + +# This flag is only useful for Objective-C code. When set to YES local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO these classes will be included in the various overviews. This option has +# no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = YES + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO the members will appear in declaration order. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = YES + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the +# todo list. This list is created by putting \todo commands in the +# documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the +# test list. This list is created by putting \test commands in the +# documentation. +# The default value is: YES. + +GENERATE_TESTLIST = NO + +# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = NO + +# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES the list +# will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. Do not use file names with spaces, bibtex cannot handle them. See +# also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO doxygen will only warn about wrong or incomplete parameter +# documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = %WARNING_LOG_PATH% + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. +# Note: If this tag is empty the current directory is searched. + +INPUT = ../include \ + ../src/lib_json \ + . + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank the +# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, +# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, +# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, +# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, +# *.qsf, *.as and *.js. + +FILE_PATTERNS = *.h \ + *.cpp \ + *.inl \ + *.dox + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = .. + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER ) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES, then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = NO + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = NO + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = %HTML_OUTPUT% + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = header.html + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = footer.html + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user- +# defined cascading style sheet that is included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefor more robust against future updates. +# Doxygen will copy the style sheet file to the output directory. For an example +# see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the stylesheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to NO can help when comparing the output of multiple runs. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = YES + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = %HTML_HELP% + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = jsoncpp-%JSONCPP_VERSION%.chm + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler ( hhc.exe). If non-empty +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = "c:\Program Files\HTML Help Workshop\hhc.exe" + +# The GENERATE_CHI flag controls if a separate .chi index file is generated ( +# YES) or that it should be included in the master .chm file ( NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = YES + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated ( +# YES) or a normal table of contents ( NO) in the .chm file. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = YES + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = YES + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using prerendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /