Remove obsolete regression testing files.

This commit is contained in:
Rene Rivera 2015-01-31 10:08:33 -06:00
parent f5304c0af6
commit 50ca965a87
126 changed files with 0 additions and 18258 deletions

View file

@ -1,101 +0,0 @@
# Regression test status reporting tools build Jamfile
# Copyright Rene Rivera
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
if [ glob ../../../boost-build.jam ]
{
use-project /boost : ../../.. ;
}
else
{
import modules ;
use-project /boost : [ MATCH --boost=(.*) : [ modules.peek : ARGV ] ] ;
}
if ! [ glob ../src/process_jam_log.cpp ]
{
project boost/regression
:
source-location ..
;
}
else
{
project boost/regression
:
source-location ../src
;
}
obj tiny_xml
:
detail/tiny_xml.cpp
:
<define>BOOST_ALL_NO_LIB=1
<define>_CRT_SECURE_NO_WARNINGS
<implicit-dependency>/boost//headers
:
release
;
explicit tiny_xml ;
exe process_jam_log
:
process_jam_log.cpp
tiny_xml
/boost/filesystem//boost_filesystem/<link>static
:
<define>BOOST_ALL_NO_LIB=1
<define>_CRT_SECURE_NO_WARNINGS
<implicit-dependency>/boost//headers
:
release
;
#~ explicit process_jam_log ;
exe compiler_status
:
compiler_status.cpp
tiny_xml
/boost/filesystem//boost_filesystem/<link>static
:
<define>BOOST_ALL_NO_LIB=1
<implicit-dependency>/boost//headers
:
release
;
explicit compiler_status ;
exe library_status
:
library_status.cpp
tiny_xml
/boost/filesystem//boost_filesystem/<link>static
:
<define>BOOST_ALL_NO_LIB=1
<implicit-dependency>/boost//headers
:
release
;
explicit library_status ;
exe boost_report
:
[ glob report/*.cpp ]
/boost/filesystem//boost_filesystem/<link>static
/boost//filesystem/<link>static
/boost//date_time/<link>static
/boost//regex/<link>static
/boost//program_options/<link>static
/boost//iostreams/<link>static
:
<define>BOOST_ALL_NO_LIB=1
<implicit-dependency>/boost//headers
:
release
;
explicit boost_report ;

View file

@ -1,59 +0,0 @@
<?xml version="1.0" encoding="Windows-1252"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="7.10"
Name="compiler_status"
ProjectGUID="{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}"
Keyword="MakeFileProj">
<Platforms>
<Platform
Name="Win32"/>
</Platforms>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="Debug"
IntermediateDirectory="Debug"
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..\..\..\..\tools\regression\build
..\..\jam\src\bin.ntx86\bjam.exe compiler_status.exe variant=debug"
ReBuildCommandLine="cd ..\..\..\..\tools\regression\build
...\..\jam\src\bin.ntx86\bjam.exe -a compiler_status.exe variant=debug"
CleanCommandLine="cd ..\..\..\..\tools\regression\build
call bjam --v2 msvc-7.1 debug clean
"
Output="compiler_status.exe"/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory="Release"
IntermediateDirectory="Release"
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam compiler_status variant=release link=static"
ReBuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam -a compiler_status variant=release link=static"
CleanCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam clean"
Output="..\..\..\..\bin.v2\tools\regression\build\msvc-7.1\release\link-static\compiler_status.exe"/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<Filter
Name="Source Files"
Filter="cpp;c;cxx;def;odl;idl;hpj;bat;asm;asmx"
UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}">
<File
RelativePath="..\..\compiler_status.cpp">
</File>
</Filter>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

View file

@ -1,64 +0,0 @@
<?xml version="1.0" encoding="Windows-1252"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="7.10"
Name="library_status"
ProjectGUID="{465BDB84-92B5-4C60-AF26-8BD1A61A089E}"
Keyword="MakeFileProj">
<Platforms>
<Platform
Name="Win32"/>
</Platforms>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="Debug"
IntermediateDirectory="Debug"
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam library_status variant=debug link=static"
ReBuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam -a library_status variant=debug link=static"
CleanCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam clean"
Output="..\..\..\..\bin.v2\tools\regression\build\msvc-7.1\debug\link-static\library_status.exe"/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory="Release"
IntermediateDirectory="Release"
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam library_status variant=release link=static"
ReBuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam -a library_status variant=release link=static"
CleanCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam clean"
Output="..\..\..\..\bin.v2\tools\regression\build\msvc-7.1\release\link-static\library_status.exe"/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<Filter
Name="Source Files"
Filter="cpp;c;cxx;def;odl;idl;hpj;bat;asm;asmx"
UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}">
<File
RelativePath="..\..\library_status.cpp">
</File>
<File
RelativePath="..\..\detail\tiny_xml.cpp">
</File>
<File
RelativePath="..\..\detail\tiny_xml.hpp">
</File>
</Filter>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

View file

@ -1,21 +0,0 @@
Microsoft Visual Studio Solution File, Format Version 8.00
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "process_jam_log", "process_jam_log.vcproj", "{9A751791-929F-496A-8DE7-B61020619BFA}"
ProjectSection(ProjectDependencies) = postProject
EndProjectSection
EndProject
Global
GlobalSection(SolutionConfiguration) = preSolution
Debug = Debug
Release = Release
EndGlobalSection
GlobalSection(ProjectConfiguration) = postSolution
{9A751791-929F-496A-8DE7-B61020619BFA}.Debug.ActiveCfg = Debug|Win32
{9A751791-929F-496A-8DE7-B61020619BFA}.Debug.Build.0 = Debug|Win32
{9A751791-929F-496A-8DE7-B61020619BFA}.Release.ActiveCfg = Release|Win32
{9A751791-929F-496A-8DE7-B61020619BFA}.Release.Build.0 = Release|Win32
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
EndGlobalSection
GlobalSection(ExtensibilityAddIns) = postSolution
EndGlobalSection
EndGlobal

View file

@ -1,72 +0,0 @@
<?xml version="1.0" encoding="windows-1251"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="7.10"
Name="process_jam_log"
ProjectGUID="{9A751791-929F-496A-8DE7-B61020619BFA}"
RootNamespace="process_jam_log"
Keyword="MakeFileProj">
<Platforms>
<Platform
Name="Win32"/>
</Platforms>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="Debug"
IntermediateDirectory="Debug"
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..\..\..\..\tools\regression\build
..\..\jam\src\bin.ntx86\bjam.exe process_jam_log variant=debug
"
ReBuildCommandLine="cd ..\..\..\..\tools\regression\build
call bjam --v2 msvc-7.1 debug clean
call bjam --v2 msvc-7.1 debug"
CleanCommandLine="cd ..\..\..\..\tools\regression\build
call bjam --v2 msvc-7.1 debug clean
"
Output="../../../../bin.v2/tools/regression/build/msvc-7.1/debug/link-static/process_jam_log.exe"/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory="Release"
IntermediateDirectory="Release"
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam process_jam_log variant=release link=static"
ReBuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam -a process_jam_log variant=release link=static"
CleanCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam clean"
Output="..\..\..\..\bin.v2\tools\regression\build\msvc-7.1\release\link-static\process_jam_log.exe"/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<Filter
Name="Source Files"
Filter="cpp;c;cxx;def;odl;idl;hpj;bat;asm;asmx"
UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}">
</Filter>
<Filter
Name="Header Files"
Filter="h;hpp;hxx;hm;inl;inc;xsd"
UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}">
</Filter>
<Filter
Name="Resource Files"
Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx"
UniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}">
</Filter>
<File
RelativePath=".\readme.txt">
</File>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

View file

@ -1,37 +0,0 @@
Microsoft Visual Studio Solution File, Format Version 8.00
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "compiler_status", "compiler_status.vcproj", "{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}"
ProjectSection(ProjectDependencies) = postProject
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "process_jam_log", "process_jam_log.vcproj", "{9A751791-929F-496A-8DE7-B61020619BFA}"
ProjectSection(ProjectDependencies) = postProject
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "library_status", "library_status.vcproj", "{465BDB84-92B5-4C60-AF26-8BD1A61A089E}"
ProjectSection(ProjectDependencies) = postProject
EndProjectSection
EndProject
Global
GlobalSection(SolutionConfiguration) = preSolution
Debug = Debug
Release = Release
EndGlobalSection
GlobalSection(ProjectConfiguration) = postSolution
{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}.Debug.ActiveCfg = Debug|Win32
{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}.Debug.Build.0 = Debug|Win32
{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}.Release.ActiveCfg = Release|Win32
{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}.Release.Build.0 = Release|Win32
{9A751791-929F-496A-8DE7-B61020619BFA}.Debug.ActiveCfg = Debug|Win32
{9A751791-929F-496A-8DE7-B61020619BFA}.Debug.Build.0 = Debug|Win32
{9A751791-929F-496A-8DE7-B61020619BFA}.Release.ActiveCfg = Release|Win32
{9A751791-929F-496A-8DE7-B61020619BFA}.Release.Build.0 = Release|Win32
{465BDB84-92B5-4C60-AF26-8BD1A61A089E}.Debug.ActiveCfg = Debug|Win32
{465BDB84-92B5-4C60-AF26-8BD1A61A089E}.Debug.Build.0 = Debug|Win32
{465BDB84-92B5-4C60-AF26-8BD1A61A089E}.Release.ActiveCfg = Release|Win32
{465BDB84-92B5-4C60-AF26-8BD1A61A089E}.Release.Build.0 = Release|Win32
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
EndGlobalSection
GlobalSection(ExtensibilityAddIns) = postSolution
EndGlobalSection
EndGlobal

View file

@ -1,57 +0,0 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Language" content="en-us" />
<meta http-equiv="Content-Type" content="text/html; charset=us-ascii" />
<link rel="stylesheet" type="text/css" href=
"../../../doc/src/boostbook.css" />
<title>Regression Test Reporting Tools</title>
</head>
<body>
<h1><img src="../../../boost.png" alt="boost.png (6897 bytes)" align=
"center" width="277" height="86" /> Regression Test Reporting Tools</h1>
<p>Boost regression testing uses <a href=
"../../build/index.html">Boost.Build</a> to run the actual builds and
tests. A separate set of tools is used to generate the actual status
reports.</p>
<ul>
<li><a href=
"http://beta.boost.org/development/running_regression_tests.html">Instructions</a>
for running the regression tests as part of the published regression
tests are available at the Boost web site.</li>
<li><a href="../src/process_jam_log.cpp">process_jam_log.cpp</a> -
Processes the bjam outputs, creating a file named test_log.xml for each
test encountered.</li>
<li><a href="../src/compiler_status.cpp">compiler_status.cpp</a> -
Generates HTML status tables from test_log.xml and other files.</li>
<li><a href="../build/Jamroot.jam">Jamroot.jam</a> - Builds process_jam_log
and compiler_status executables.</li>
<li><a href="library_status.html">Library Status</a> - Runs test programs
for one or all boost libraries on your local installation and generates
complete tables to show which combinations of libraries, compilers,
compiler settings pass and fail at your local installation.</li>
</ul>
<hr />
<p>Revised $Date$</p>
<p>Copyright Beman Dawes 2003.</p>
<p>Copyright Rene Rivera 2007.</p>
<p>Distributed under the Boost Software License, Version 1.0. (See
accompanying file <a href="../../../LICENSE_1_0.txt">LICENSE_1_0.txt</a> or
copy at <a href=
"http://www.boost.org/LICENSE_1_0.txt">www.boost.org/LICENSE_1_0.txt</a>)</p>
</body>
</html>

View file

@ -1,464 +0,0 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Language" content="en-us" />
<meta http-equiv="Content-Type" content="text/html; charset=us-ascii" />
<link rel="stylesheet" type="text/css" href=
"../../../doc/src/boostbook.css" />
<title>Libary Status</title>
<style type="text/css">
/*<![CDATA[*/
span.c3 {color: #FF0000; font-style: italic}
a.c2 {font-style: italic}
td.c1 {font-style: italic}
/*]]>*/
</style>
</head>
<body>
<table border="0">
<tr>
<td><img border="0" src="../../../boost.png" width="277" height="86"
alt="boost.png (6897 bytes)" /></td>
<td>
<h1>Generating Library Status Tables</h1>
</td>
</tr>
</table>
<h3>Purpose</h3>Any time one considers using a library as large and complex
as the Boost libraries, he must have a way of validating the the library
functions in his environment. This should be done when the library is
installed and anytime questions are raised regarding its applicabililty
and/or its usage.
<p>The procedures described here permit a user to run any combination of
tests on any or all libraries and generate a set of convenient tables which
show which libraries pass which tests under what conditions.</p>
<h3>Preliminaries</h3>Generating these tables requires a couple of utility
programs: <code>process_jam_log</code> and <code>library_status</code>.
These can be built by moving to the directory
<code>tools/regression/build</code> and invoking bjam. If all goes well
these utility programs will be found in the directory
<code>dist/bin</code>. From there they should be moved to a place in the
current path.
<h3>Running Tests for One Library</h3>
<ol>
<li>Start from your command line environment.</li>
<li>set the current directory to:../libs/&lt;library name&gt;/test</li>
<li>Invoke one of the following:
<ul>
<li><code>../../../tools/regression/src/library_test (*nix)</code>.</li>
<li><code>..\..\..\tools\regression\src\library_test
(windows)</code>.</li>
</ul>
</li>
<li>This will display short help message describing the how to set the
command line arguments for the compilers and variants you want to appear
in the final table.</li>
<li>Setting these arguments requires rudimentary knowledge of bjam usage.
Hopefully, if you've arrived at this page you've gained the required
knowledge during the installation and library build process.</li>
<li>Rerun the abve command with the argument set accordingly.</li>
<li>When the command terminates, there should be a file named
"library_status.html" in the current directory.</li>
<li>Display this file with any web browser.</li>
</ol>There should appear a table similar to the following for the regex
library.
<table border="1" cellspacing="0" cellpadding="5">
<tr>
<td rowspan="4">Test Name</td>
<td align="center" colspan="4">msvc-7.1</td>
</tr>
<tr>
<td align="center" colspan="2">debug</td>
<td align="center" colspan="2">release</td>
</tr>
<tr>
<td align="center">link-static</td>
<td align="center" rowspan="2">threading-multi</td>
<td align="center">link-static</td>
<td align="center" rowspan="2">threading-multi</td>
</tr>
<tr>
<td align="center">threading-multi</td>
<td align="center">threading-multi</td>
</tr>
<tr>
<td>bad_expression_test</td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-bad_expression_test.test-msvc-7.1-debug-threading-multi">
Warn</a></td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-bad_expression_test.test-msvc-7.1-release-threading-multi">
Warn</a></td>
</tr>
<tr>
<td>captures</td>
<td class="c1" align="right">Missing</td>
<td align="right"><a href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-captures-msvc-7.1-debug-threading-multi">
<span class="c3">Fail</span></a></td>
<td class="c1" align="right">Missing</td>
<td align="right"><a href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-captures-msvc-7.1-release-threading-multi">
<span class="c3">Fail</span></a></td>
</tr>
<tr>
<td>captures_test</td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-captures_test.test-msvc-7.1-debug-threading-multi">
Warn</a></td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-captures_test.test-msvc-7.1-release-threading-multi">
Warn</a></td>
</tr>
<tr>
<td>concept_check</td>
<td class="c1" align="right">Missing</td>
<td align="right">Pass</td>
<td class="c1" align="right">Missing</td>
<td align="right">Pass</td>
</tr>
<tr>
<td>icu_concept_check</td>
<td class="c1" align="right">Missing</td>
<td align="right">Pass</td>
<td class="c1" align="right">Missing</td>
<td align="right">Pass</td>
</tr>
<tr>
<td>object_cache_test</td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-object_cache_test.test-msvc-7.1-debug-threading-multi">
Warn</a></td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-object_cache_test.test-msvc-7.1-release-threading-multi">
Warn</a></td>
</tr>
<tr>
<td>posix_api_check</td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-posix_api_check.test-msvc-7.1-debug-threading-multi">
Warn</a></td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-posix_api_check.test-msvc-7.1-release-threading-multi">
Warn</a></td>
</tr>
<tr>
<td>posix_api_check_cpp</td>
<td class="c1" align="right">Missing</td>
<td align="right">Pass</td>
<td class="c1" align="right">Missing</td>
<td align="right">Pass</td>
</tr>
<tr>
<td>recursion_test</td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-recursion_test.test-msvc-7.1-debug-threading-multi">
Warn</a></td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-recursion_test.test-msvc-7.1-release-threading-multi">
Warn</a></td>
</tr>
<tr>
<td>regex_config_info</td>
<td class="c1" align="right">Missing</td>
<td align="right"><a href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-regex_config_info.test-msvc-7.1-debug-threading-multi">
Pass</a></td>
<td class="c1" align="right">Missing</td>
<td align="right"><a href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-regex_config_info.test-msvc-7.1-release-threading-multi">
Pass</a></td>
</tr>
<tr>
<td>regex_dll_config_info</td>
<td class="c1" align="right">Missing</td>
<td align="right"><a href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-regex_dll_config_info.test-msvc-7.1-debug-threading-multi">
Pass</a></td>
<td class="c1" align="right">Missing</td>
<td align="right"><a href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-regex_dll_config_info.test-msvc-7.1-release-threading-multi">
Pass</a></td>
</tr>
<tr>
<td>regex_regress</td>
<td align="right"><a href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-regex_regress.test-msvc-7.1-debug-link-static-threading-multi">
Pass</a><sup>*</sup></td>
<td class="c1" align="right">Missing</td>
<td align="right"><a href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-regex_regress.test-msvc-7.1-release-link-static-threading-multi">
Pass</a><sup>*</sup></td>
<td class="c1" align="right">Missing</td>
</tr>
<tr>
<td>regex_regress_dll</td>
<td class="c1" align="right">Missing</td>
<td align="right"><a href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-regex_regress_dll.test-msvc-7.1-debug-threading-multi">
Pass</a><sup>*</sup></td>
<td class="c1" align="right">Missing</td>
<td align="right"><a href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-regex_regress_dll.test-msvc-7.1-release-threading-multi">
Pass</a><sup>*</sup></td>
</tr>
<tr>
<td>regex_regress_threaded</td>
<td class="c1" align="right">Missing</td>
<td align="right">Pass</td>
<td class="c1" align="right">Missing</td>
<td align="right">Pass</td>
</tr>
<tr>
<td>static_mutex_test</td>
<td class="c1" align="right">Missing</td>
<td align="right">Pass</td>
<td class="c1" align="right">Missing</td>
<td align="right">Pass</td>
</tr>
<tr>
<td>test_collate_info</td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-test_collate_info.test-msvc-7.1-debug-threading-multi">
Warn</a></td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-test_collate_info.test-msvc-7.1-release-threading-multi">
Warn</a></td>
</tr>
<tr>
<td>unicode_iterator_test</td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-unicode_iterator_test.test-msvc-7.1-debug-threading-multi">
Warn</a></td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-unicode_iterator_test.test-msvc-7.1-release-threading-multi">
Warn</a></td>
</tr>
<tr>
<td>wide_posix_api_check_c</td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-wide_posix_api_check_c.test-msvc-7.1-debug-threading-multi">
Warn</a></td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-wide_posix_api_check_c.test-msvc-7.1-release-threading-multi">
Warn</a></td>
</tr>
<tr>
<td>wide_posix_api_check_cpp</td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-wide_posix_api_check_cpp.test-msvc-7.1-debug-threading-multi">
Warn</a></td>
<td class="c1" align="right">Missing</td>
<td align="right"><a class="c2" href=
"links.html#C:-Boost134-bin.v2-libs-regex-test-wide_posix_api_check_cpp.test-msvc-7.1-release-threading-multi">
Warn</a></td>
</tr>
</table>
<p>This table was generated by invoking the following command line:</p>
<p><code>../../../tools/regression/src/library_test --toolset=msvc-7.1
variant=debug,release</code></p>
<p>from within the .../libs/regex/test directory.</p>
<p>This table shows the regex test results for both debug and release
versions of the library. Also it displays the fact that one of the tests is
run specifically with the static linking/multi-threading versions of the
runtime libraries. The cells marked "Missing" correspond to tests that were
not run for some reason or another. This is usually because the
corresponding <code>Jamfile.v2</code> excludes this test for the given
combination of compiler and build attributes. In this example, all tests
were run with the same compiler. If additional compilers were used, they
would appear as more columns in the table.</p>
<p>The table above is just an illustration so the links don't actually
point to anything. In the table you generated, the links will display a
page describing any errors, warnings or other available information about
the tests. If the test passes, usually, there is no additional information
and hence no link.</p>
<p>The tables are cumulative. That is, if you run one set of tests now and
tests with different attributes later, the table will contain all the
results to date. The test results are stored in
<code>../bin.v2/libs/test/&lt;library%gt;/...</code>. To reinitialize the
test results to empty, delete the corresponding files in this
directory.</p>
<p>The procedure above assumes that the table are generated within the
directory <code>../libs/&lt;library&gt;/test</code>. This is the most
common case since this directory contains the <code>Jamfile.v2</code> as
well as the source code that is used by official boost testers. However,
this is just a convention. The table can be generated for other directories
within the libary. One possiblity would be to generate the table for all
the examples in <code>../libs/%lt;library%gt;/example</code>. Or one might
have a special directory of performance tests which take a long time to run
and hence are not suitable for running by official boost testers. Just
remember that library status table is generated in the directory from which
the <code>library_test</code> command is invoked.</p>
<h3>Running Tests for All Libraries</h3>For those with *nix or cygwin
command line shells, there is shell script that can be run from the boost
root directory:
<p><code>tools/regression/src/library_test_all</code></p>
<p>The command line arguments are the same as for running the test for one
library. This script creates all the html files in all the test directories
as well as an html page in the <code>status</code> directory named
<code>library_status_summary.html</code>. This can be used to browse
through all test results for all test in all libraries.</p>
<hr />
<p>Copyright 2011 Bryce Lelbach.</p>
<p>Copyright 2007-2011 Robert Ramey.</p>
<p>Distributed under the Boost Software
License, Version 1.0. (See accompanying file LICENSE_1_0.txt or
http://www.boost.org/LICENSE_1_0.txt)</p>
<p>Revised $Date$</p>
</body>
</html>

View file

@ -1,23 +0,0 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="refresh" content="0; URL=doc/index.html" />
<title></title>
</head>
<body>
Automatic redirection failed, please go to <a href=
"doc/index.html">doc/index.html</a>
<hr />
<p>Copyright Rene Rivera, 2007</p>
<p>Distributed under the Boost Software License, Version 1.0. (See
accompanying file <a href="../../LICENSE_1_0.txt">LICENSE_1_0.txt</a> or
copy at <a href=
"http://www.boost.org/LICENSE_1_0.txt">www.boost.org/LICENSE_1_0.txt</a>)</p>
</body>
</html>

View file

@ -1,70 +0,0 @@
#!/bin/sh
#~ Copyright Redshift Software, Inc. 2007
#~ Distributed under the Boost Software License, Version 1.0.
#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
export PATH=/bin:/usr/bin:${PATH}
timestamp=`date +%F-%H-%M-%S-%Z`
branch=$1
revision=`svn info file:///home/subversion/boost/${branch} | grep '^Revision:' | cut --byte=11-`
tag=boost-${1/\/*}-${timestamp}
export_dir=boost-$$
# Remove files as listed in stdin, the assumption is that processing
# of the file is complete and can be removed.
rm_c()
{
while read f; do
rm -f ${f}
done
}
# Generate the export file tree, and incrementally output the files
# created.
svn_export()
{
svn export -r ${revision} file:///home/subversion/boost/${branch} ${tag}
echo "Revision: ${revision}" > ${tag}/svn_info.txt
echo "---- ${tag}/svn_info.txt"
}
# Create the archive incrementally, deleting files as we are done
# adding them to the archive.
make_archive()
{
svn_export \
| cut --bytes=6- \
| star -c -D -to-stdout -d artype=pax list=- 2>/dev/null \
| bzip2 -6 -c \
| tee $1 \
| tar -jtf - \
| rm_c
}
run()
{
cd /tmp
rm -rf ${export_dir}
mkdir ${export_dir}
cd ${export_dir}
mkfifo out.tbz2
make_archive out.tbz2 &
cat out.tbz2
cd /tmp
rm -rf ${export_dir}
}
run_debug()
{
rm -rf ${export_dir}
mkdir ${export_dir}
cd ${export_dir}
mkfifo out.tbz2
make_archive out.tbz2 &
cat out.tbz2 > ../${tag}.tar.bz2
cd ..
rm -rf ${export_dir}
}
run
#run_debug

View file

@ -1,546 +0,0 @@
# Copyright (c) MetaCommunications, Inc. 2003-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import xml.sax.saxutils
import zipfile
import ftplib
import time
import stat
import xml.dom.minidom
import xmlrpclib
import httplib
import os.path
import string
import sys
import re
import urlparse
def process_xml_file( input_file, output_file ):
utils.log( 'Processing test log "%s"' % input_file )
f = open( input_file, 'r' )
xml = f.readlines()
f.close()
for i in range( 0, len(xml)):
xml[i] = string.translate( xml[i], utils.char_translation_table )
output_file.writelines( xml )
def process_test_log_files( output_file, dir, names ):
for file in names:
if os.path.basename( file ) == 'test_log.xml':
process_xml_file( os.path.join( dir, file ), output_file )
def collect_test_logs( input_dirs, test_results_writer ):
__log__ = 1
utils.log( 'Collecting test logs ...' )
for input_dir in input_dirs:
utils.log( 'Walking directory "%s" ...' % input_dir )
os.path.walk( input_dir, process_test_log_files, test_results_writer )
dart_status_from_result = {
'succeed': 'passed',
'fail': 'failed',
'note': 'passed',
'': 'notrun'
}
dart_project = {
'trunk': 'Boost_HEAD',
'': 'Boost_HEAD'
}
dart_track = {
'full': 'Nightly',
'incremental': 'Continuous',
'': 'Experimental'
}
ascii_only_table = ""
for i in range(0,256):
if chr(i) == '\n' or chr(i) == '\r':
ascii_only_table += chr(i)
elif i < 32 or i >= 0x80:
ascii_only_table += '?'
else:
ascii_only_table += chr(i)
class xmlrpcProxyTransport(xmlrpclib.Transport):
def __init__(self, proxy):
self.proxy = proxy
def make_connection(self, host):
self.realhost = host
return httplib.HTTP(self.proxy)
def send_request(self, connection, handler, request_body):
connection.putrequest('POST','http://%s%s' % (self.realhost,handler))
def send_host(self, connection, host):
connection.putheader('Host',self.realhost)
def publish_test_logs(
input_dirs,
runner_id, tag, platform, comment_file, timestamp, user, source, run_type,
dart_server = None,
http_proxy = None,
**unused
):
__log__ = 1
utils.log( 'Publishing test logs ...' )
dart_rpc = None
dart_dom = {}
def _publish_test_log_files_ ( unused, dir, names ):
for file in names:
if os.path.basename( file ) == 'test_log.xml':
utils.log( 'Publishing test log "%s"' % os.path.join(dir,file) )
if dart_server:
log_xml = open(os.path.join(dir,file)).read().translate(ascii_only_table)
#~ utils.log( '--- XML:\n%s' % log_xml)
#~ It seems possible to get an empty XML result file :-(
if log_xml == "": continue
log_dom = xml.dom.minidom.parseString(log_xml)
test = {
'library': log_dom.documentElement.getAttribute('library'),
'test-name': log_dom.documentElement.getAttribute('test-name'),
'toolset': log_dom.documentElement.getAttribute('toolset')
}
if not test['test-name'] or test['test-name'] == '':
test['test-name'] = 'unknown'
if not test['toolset'] or test['toolset'] == '':
test['toolset'] = 'unknown'
if not dart_dom.has_key(test['toolset']):
dart_dom[test['toolset']] = xml.dom.minidom.parseString(
'''<?xml version="1.0" encoding="UTF-8"?>
<DartSubmission version="2.0" createdby="collect_and_upload_logs.py">
<Site>%(site)s</Site>
<BuildName>%(buildname)s</BuildName>
<Track>%(track)s</Track>
<DateTimeStamp>%(datetimestamp)s</DateTimeStamp>
</DartSubmission>
''' % {
'site': runner_id,
'buildname': "%s -- %s (%s)" % (platform,test['toolset'],run_type),
'track': dart_track[run_type],
'datetimestamp' : timestamp
} )
submission_dom = dart_dom[test['toolset']]
for node in log_dom.documentElement.childNodes:
if node.nodeType == xml.dom.Node.ELEMENT_NODE:
if node.firstChild:
log_data = xml.sax.saxutils.escape(node.firstChild.data)
else:
log_data = ''
test_dom = xml.dom.minidom.parseString('''<?xml version="1.0" encoding="UTF-8"?>
<Test>
<Name>.Test.Boost.%(tag)s.%(library)s.%(test-name)s.%(type)s</Name>
<Status>%(result)s</Status>
<Measurement name="Toolset" type="text/string">%(toolset)s</Measurement>
<Measurement name="Timestamp" type="text/string">%(timestamp)s</Measurement>
<Measurement name="Log" type="text/text">%(log)s</Measurement>
</Test>
''' % {
'tag': tag,
'library': test['library'],
'test-name': test['test-name'],
'toolset': test['toolset'],
'type': node.nodeName,
'result': dart_status_from_result[node.getAttribute('result')],
'timestamp': node.getAttribute('timestamp'),
'log': log_data
})
submission_dom.documentElement.appendChild(
test_dom.documentElement.cloneNode(1) )
for input_dir in input_dirs:
utils.log( 'Walking directory "%s" ...' % input_dir )
os.path.walk( input_dir, _publish_test_log_files_, None )
if dart_server:
try:
rpc_transport = None
if http_proxy:
rpc_transport = xmlrpcProxyTransport(http_proxy)
dart_rpc = xmlrpclib.ServerProxy(
'http://%s/%s/Command/' % (dart_server,dart_project[tag]),
rpc_transport )
for dom in dart_dom.values():
#~ utils.log('Dart XML: %s' % dom.toxml('utf-8'))
dart_rpc.Submit.put(xmlrpclib.Binary(dom.toxml('utf-8')))
except Exception, e:
utils.log('Dart server error: %s' % e)
def upload_to_ftp( tag, results_file, ftp_proxy, debug_level, ftp_url ):
if not ftp_url:
ftp_host = 'boost.cowic.de'
ftp_url = ''.join(['ftp','://anonymous','@',ftp_host,'/boost/do-not-publish-this-url/results/'])
utils.log( 'Uploading log archive "%s" to %s/%s' % ( results_file, ftp_url, tag ) )
ftp_parts = urlparse.urlparse(ftp_url)
ftp_netloc = re.split('[@]',ftp_parts[1])
ftp_user = re.split('[:]',ftp_netloc[0])[0]
ftp_password = re.split('[:]',ftp_netloc[0]+':anonymous')[1]
ftp_site = re.split('[:]',ftp_netloc[1])[0]
ftp_path = ftp_parts[2]
if not ftp_proxy:
ftp = ftplib.FTP( ftp_site )
ftp.set_debuglevel( debug_level )
ftp.login( ftp_user, ftp_password )
else:
utils.log( ' Connecting through FTP proxy server "%s"' % ftp_proxy )
ftp = ftplib.FTP( ftp_proxy )
ftp.set_debuglevel( debug_level )
ftp.set_pasv (0) # turn off PASV mode
ftp.login( '%s@%s' % (ftp_user,ftp_site), ftp_password )
ftp.cwd( ftp_path )
try:
ftp.cwd( tag )
except ftplib.error_perm:
for dir in tag.split( '/' ):
ftp.mkd( dir )
ftp.cwd( dir )
f = open( results_file, 'rb' )
ftp.storbinary( 'STOR %s' % os.path.basename( results_file ), f )
ftp.quit()
def copy_comments( results_xml, comment_file ):
results_xml.startElement( 'comment', {} )
if os.path.exists( comment_file ):
utils.log( 'Reading comments file "%s"...' % comment_file )
f = open( comment_file, 'r' )
try:
results_xml.characters( f.read() )
finally:
f.close()
else:
utils.log( 'Warning: comment file "%s" is not found.' % comment_file )
lines = ['']
for arg in sys.argv:
# Make sure that the ftp details are hidden
arg = re.sub( 'ftp://.*$', 'ftp://XXXXX', arg )
# Escape quotes
arg = re.sub( r'(\\|")', r'\\\1', arg )
# Quote arguments if needed
if arg.find( ' ' ) != -1:
arg = '"%s"' % arg
if len( lines[-1] ) + len( arg ) + 2 >= 80:
# align backslashes
lines[-1] += ' ' * ( 79 - len( lines[-1] ) )
# indent lines after the first
lines.append( ' ' )
lines[-1] += ( arg + ' ' )
results_xml.characters( '<hr>' )
results_xml.characters( '<dl>' )
results_xml.characters( '<dt>Command Line</dt>' )
results_xml.characters( '<dd>' )
results_xml.characters( '<pre>' )
results_xml.characters( '\\\n'.join(lines) )
results_xml.characters( '</pre>' )
results_xml.characters( '</dd>' )
results_xml.characters( '</dl>\n' )
results_xml.endElement( 'comment' )
def compress_file( file_path, archive_path ):
utils.log( 'Compressing "%s"...' % file_path )
try:
z = zipfile.ZipFile( archive_path, 'w', zipfile.ZIP_DEFLATED )
z.write( file_path, os.path.basename( file_path ) )
z.close()
utils.log( 'Done writing "%s".'% archive_path )
except Exception, msg:
utils.log( 'Warning: Compressing falied (%s)' % msg )
utils.log( ' Trying to compress using a platform-specific tool...' )
try: import zip_cmd
except ImportError:
script_dir = os.path.dirname( os.path.abspath( sys.argv[0] ) )
utils.log( 'Could not find \'zip_cmd\' module in the script directory (%s).' % script_dir )
raise Exception( 'Compressing failed!' )
else:
if os.path.exists( archive_path ):
os.unlink( archive_path )
utils.log( 'Removing stale "%s".' % archive_path )
zip_cmd.main( file_path, archive_path )
utils.log( 'Done compressing "%s".' % archive_path )
def read_timestamp( file ):
if not os.path.exists( file ):
result = time.gmtime()
utils.log( 'Warning: timestamp file "%s" does not exist'% file )
utils.log( 'Using current UTC time (%s)' % result )
return result
return time.gmtime( os.stat( file ).st_mtime )
def collect_logs(
results_dir
, runner_id
, tag
, platform
, comment_file
, timestamp_file
, user
, source
, run_type
, dart_server = None
, http_proxy = None
, revision = ''
, **unused
):
timestamp = time.strftime( '%Y-%m-%dT%H:%M:%SZ', read_timestamp( timestamp_file ) )
if dart_server:
publish_test_logs( [ results_dir ],
runner_id, tag, platform, comment_file, timestamp, user, source, run_type,
dart_server = dart_server,
http_proxy = http_proxy )
results_file = os.path.join( results_dir, '%s.xml' % runner_id )
results_writer = open( results_file, 'w' )
utils.log( 'Collecting test logs into "%s"...' % results_file )
results_xml = xml.sax.saxutils.XMLGenerator( results_writer )
results_xml.startDocument()
results_xml.startElement(
'test-run'
, {
'tag': tag
, 'platform': platform
, 'runner': runner_id
, 'timestamp': timestamp
, 'source': source
, 'run-type': run_type
, 'revision': revision
}
)
copy_comments( results_xml, comment_file )
collect_test_logs( [ results_dir ], results_writer )
results_xml.endElement( "test-run" )
results_xml.endDocument()
results_writer.close()
utils.log( 'Done writing "%s".' % results_file )
compress_file(
results_file
, os.path.join( results_dir,'%s.zip' % runner_id )
)
def upload_logs(
results_dir
, runner_id
, tag
, user
, ftp_proxy
, debug_level
, send_bjam_log = False
, timestamp_file = None
, dart_server = None
, ftp_url = None
, **unused
):
logs_archive = os.path.join( results_dir, '%s.zip' % runner_id )
upload_to_ftp( tag, logs_archive, ftp_proxy, debug_level, ftp_url )
if send_bjam_log:
bjam_log_path = os.path.join( results_dir, 'bjam.log' )
if not timestamp_file:
timestamp_file = bjam_log_path
timestamp = time.strftime( '%Y-%m-%d-%H-%M-%S', read_timestamp( timestamp_file ) )
logs_archive = os.path.join( results_dir, '%s.%s.log.zip' % ( runner_id, timestamp ) )
compress_file( bjam_log_path, logs_archive )
upload_to_ftp( '%s/logs' % tag, logs_archive, ftp_proxy, debug_level, ftp_url )
def collect_and_upload_logs(
results_dir
, runner_id
, tag
, platform
, comment_file
, timestamp_file
, user
, source
, run_type
, revision = None
, ftp_proxy = None
, debug_level = 0
, send_bjam_log = False
, dart_server = None
, http_proxy = None
, ftp_url = None
, **unused
):
collect_logs(
results_dir
, runner_id
, tag
, platform
, comment_file
, timestamp_file
, user
, source
, run_type
, revision = revision
, dart_server = dart_server
, http_proxy = http_proxy
)
upload_logs(
results_dir
, runner_id
, tag
, user
, ftp_proxy
, debug_level
, send_bjam_log
, timestamp_file
, dart_server = dart_server
, ftp_url = ftp_url
)
def accept_args( args ):
args_spec = [
'locate-root='
, 'runner='
, 'tag='
, 'platform='
, 'comment='
, 'timestamp='
, 'source='
, 'run-type='
, 'user='
, 'ftp-proxy='
, 'proxy='
, 'debug-level='
, 'send-bjam-log'
, 'help'
, 'dart-server='
, 'revision='
, 'ftp='
]
options = {
'--tag' : 'trunk'
, '--platform' : sys.platform
, '--comment' : 'comment.html'
, '--timestamp' : 'timestamp'
, '--user' : None
, '--source' : 'SVN'
, '--run-type' : 'full'
, '--ftp-proxy' : None
, '--proxy' : None
, '--debug-level' : 0
, '--dart-server' : 'beta.boost.org:8081'
, '--revision' : None
, '--ftp' : None
}
utils.accept_args( args_spec, args, options, usage )
return {
'results_dir' : options[ '--locate-root' ]
, 'runner_id' : options[ '--runner' ]
, 'tag' : options[ '--tag' ]
, 'platform' : options[ '--platform']
, 'comment_file' : options[ '--comment' ]
, 'timestamp_file' : options[ '--timestamp' ]
, 'user' : options[ '--user' ]
, 'source' : options[ '--source' ]
, 'run_type' : options[ '--run-type' ]
, 'ftp_proxy' : options[ '--ftp-proxy' ]
, 'http_proxy' : options[ '--proxy' ]
, 'debug_level' : int(options[ '--debug-level' ])
, 'send_bjam_log' : options.has_key( '--send-bjam-log' )
, 'dart_server' : options[ '--dart-server' ]
, 'revision' : options[ '--revision' ]
, 'ftp' : options[ '--ftp' ]
}
commands = {
'collect-and-upload' : collect_and_upload_logs
, 'collect-logs' : collect_logs
, 'upload-logs' : upload_logs
}
def usage():
print 'Usage: %s [command] [options]' % os.path.basename( sys.argv[0] )
print '''
Commands:
\t%s
Options:
\t--locate-root directory to to scan for "test_log.xml" files
\t--runner runner ID (e.g. "Metacomm")
\t--timestamp path to a file which modification time will be used
\t as a timestamp of the run ("timestamp" by default)
\t--comment an HTML comment file to be inserted in the reports
\t ("comment.html" by default)
\t--tag the tag for the results ("trunk" by default)
\t--user SourceForge user name for a shell account (optional)
\t--source where Boost sources came from ("SVN" or "tarball";
\t "SVN" by default)
\t--run-type "incremental" or "full" ("full" by default)
\t--send-bjam-log in addition to regular XML results, send in full bjam
\t log of the regression run
\t--proxy HTTP proxy server address and port (e.g.
\t 'http://www.someproxy.com:3128', optional)
\t--ftp-proxy FTP proxy server (e.g. 'ftpproxy', optional)
\t--debug-level debugging level; controls the amount of debugging
\t output printed; 0 by default (no debug output)
\t--dart-server The dart server to send results to.
\t--ftp The ftp URL to upload results to.
''' % '\n\t'.join( commands.keys() )
def main():
if len(sys.argv) > 1 and sys.argv[1] in commands:
command = sys.argv[1]
args = sys.argv[ 2: ]
else:
command = 'collect-and-upload'
args = sys.argv[ 1: ]
commands[ command ]( **accept_args( args ) )
if __name__ != '__main__': import utils
else:
# in absense of relative import...
xsl_path = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
while os.path.basename( xsl_path ) != 'xsl_reports': xsl_path = os.path.dirname( xsl_path )
sys.path.append( xsl_path )
import utils
main()

File diff suppressed because it is too large Load diff

View file

@ -1,167 +0,0 @@
// tiny XML sub-set tools implementation -----------------------------------//
// (C) Copyright Beman Dawes 2002. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include "tiny_xml.hpp"
#include <cassert>
#include <cstring>
namespace
{
void eat_whitespace( char & c, std::istream & in )
{
while ( c == ' ' || c == '\r' || c == '\n' || c == '\t' )
in.get( c );
}
std::string get_name( char & c, std::istream & in )
{
std::string result;
eat_whitespace( c, in );
while ( std::strchr(
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.", c )
!= 0 )
{
result += c;
if(!in.get( c ))
throw std::string("xml: unexpected eof");
}
return result;
}
void eat_delim( char & c, std::istream & in,
char delim, const std::string & msg )
{
eat_whitespace( c, in );
if ( c != delim )
throw std::string("xml syntax error, expected ") + delim
+ " (" + msg + ")";
in.get( c );
}
std::string get_value( char & c, std::istream & in )
{
std::string result;
while ( c != '\"' )
{
result += c;
in.get( c );
}
in.get( c );
return result;
}
}
namespace boost
{
namespace tiny_xml
{
// parse -----------------------------------------------------------------//
element_ptr parse( std::istream & in, const std::string & msg )
{
char c = 0; // current character
element_ptr e( new element );
if(!in.get( c ))
throw std::string("xml: unexpected eof");
if ( c == '<' )
if(!in.get( c ))
throw std::string("xml: unexpected eof");
e->name = get_name( c, in );
eat_whitespace( c, in );
// attributes
while ( c != '>' )
{
attribute a;
a.name = get_name( c, in );
eat_delim( c, in, '=', msg );
eat_delim( c, in, '\"', msg );
a.value = get_value( c, in );
e->attributes.push_back( a );
eat_whitespace( c, in );
}
if(!in.get( c )) // next after '>'
throw std::string("xml: unexpected eof");
eat_whitespace( c, in );
// sub-elements
while ( c == '<' )
{
if ( in.peek() == '/' ) break;
e->elements.push_back( parse( in, msg ) );
in.get( c ); // next after '>'
eat_whitespace( c, in );
}
// content
if ( c != '<' )
{
e->content += '\n';
while ( c != '<' )
{
e->content += c;
if(!in.get( c ))
throw std::string("xml: unexpected eof");
}
}
assert( c == '<' );
if(!in.get( c )) // next after '<'
throw std::string("xml: unexpected eof");
eat_delim( c, in, '/', msg );
std::string end_name( get_name( c, in ) );
if ( e->name != end_name )
throw std::string("xml syntax error: beginning name ")
+ e->name + " did not match end name " + end_name
+ " (" + msg + ")";
eat_delim( c, in, '>', msg );
return e;
}
// write ---------------------------------------------------------------//
void write( const element & e, std::ostream & out )
{
out << "<" << e.name;
if ( !e.attributes.empty() )
{
for( attribute_list::const_iterator itr = e.attributes.begin();
itr != e.attributes.end(); ++itr )
{
out << " " << itr->name << "=\"" << itr->value << "\"";
}
}
out << ">";
if ( !e.elements.empty() )
{
out << "\n";
for( element_list::const_iterator itr = e.elements.begin();
itr != e.elements.end(); ++itr )
{
write( **itr, out );
}
}
if ( !e.content.empty() )
{
out << e.content;
}
out << "</" << e.name << ">\n";
}
} // namespace tiny_xml
} // namespace boost

View file

@ -1,70 +0,0 @@
// tiny XML sub-set tools --------------------------------------------------//
// (C) Copyright Beman Dawes 2002. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Provides self-contained tools for this XML sub-set:
//
// element ::= { "<" name { name "=" "\"" value "\"" } ">"
// {element} [contents] "</" name ">" }
//
// The point of "self-contained" is to minimize tool-chain dependencies.
#ifndef BOOST_TINY_XML_H
#define BOOST_TINY_XML_H
#include "boost/smart_ptr.hpp" // for shared_ptr
#include "boost/utility.hpp" // for noncopyable
#include <list>
#include <iostream>
#include <string>
namespace boost
{
namespace tiny_xml
{
class element;
struct attribute
{
std::string name;
std::string value;
attribute(){}
attribute( const std::string & name, const std::string & value )
: name(name), value(value) {}
};
typedef boost::shared_ptr< element > element_ptr;
typedef std::list< element_ptr > element_list;
typedef std::list< attribute > attribute_list;
class element
: private boost::noncopyable // because deep copy sematics would be required
{
public:
std::string name;
attribute_list attributes;
element_list elements;
std::string content;
element() {}
explicit element( const std::string & name ) : name(name) {}
};
element_ptr parse( std::istream & in, const std::string & msg );
// Precondition: stream positioned at either the initial "<"
// or the first character after the initial "<".
// Postcondition: stream positioned at the first character after final
// ">" (or eof).
// Returns: an element_ptr to an element representing the parsed stream.
// Throws: std::string on syntax error. msg appended to what() string.
void write( const element & e, std::ostream & out );
}
}
#endif // BOOST_TINY_XML_H

View file

@ -1,17 +0,0 @@
// tiny XML test program ---------------------------------------------------//
// Copyright Beman Dawes 2002. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include "tiny_xml.hpp"
#include <iostream>
int main()
{
boost::tiny_xml::element_ptr tree( boost::tiny_xml::parse( std::cin ) );
boost::tiny_xml::write( *tree, std::cout );
return 0;
}

View file

@ -1,17 +0,0 @@
<root>
<frontmatter>
// (C) Copyright Beman Dawes 2002. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
</frontmatter>
<element-1 at-1="abcd" at-2 = "defg" >
<element-1a>
It's Howdy Doody time!
</element-1a>
<element-1b>It's not Howdy Doody time!</element-1b>
</element-1>
<element-2>
It's
Eastern Standard time!
</element-2>
</root>

View file

@ -1,889 +0,0 @@
// Generate Library Status HTML from jam regression test output -----------//
// Copyright Bryce Lelbach 2011
// Copyright Beman Dawes 2002-2011.
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org/tools/regression/ for documentation.
//Note: This version of the original program builds a large table
//which includes all build variations such as build/release, static/dynamic, etc.
/*******************************************************************************
This program was designed to work unchanged on all platforms and
configurations. All output which is platform or configuration dependent
is obtained from external sources such as the .xml file from
process_jam_log execution, the tools/build/xxx-tools.jam files, or the
output of the config_info tests.
Please avoid adding platform or configuration dependencies during
program maintenance.
*******************************************************************************/
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/foreach.hpp>
namespace fs = boost::filesystem;
#include "detail/tiny_xml.hpp"
namespace xml = boost::tiny_xml;
#include <boost/iterator/transform_iterator.hpp>
#include <cstdlib> // for abort, exit
#include <string>
#include <vector>
#include <set>
#include <utility> // for make_pair on STLPort
#include <map>
#include <algorithm> // max_element, find_if
#include <iostream>
#include <fstream>
#include <ctime>
#include <stdexcept>
#include <cassert>
#include <utility> // for pair
using std::string;
const string pass_msg( "Pass" );
const string warn_msg( "<i>Warn</i>" );
const string fail_msg( "<font color=\"#FF0000\"><i>Fail</i></font>" );
const string missing_residue_msg( "<i>Missing</i>" );
const std::size_t max_compile_msg_size = 10000;
namespace
{
fs::path locate_root; // locate-root (AKA ALL_LOCATE_TARGET) complete path
bool ignore_pass = false;
bool no_warn = false;
bool no_links = false;
// transform pathname to something html can accept
struct char_xlate {
typedef char result_type;
result_type operator()(char c) const{
if(c == '/' || c == '\\')
return '-';
return c;
}
};
typedef boost::transform_iterator<char_xlate, std::string::const_iterator> html_from_path;
template<class I1, class I2>
std::ostream & operator<<(
std::ostream &os,
std::pair<I1, I2> p
){
while(p.first != p.second)
os << *p.first++;
return os;
}
struct col_node {
int rows, cols;
bool is_leaf;
typedef std::pair<const std::string, col_node> subcolumn;
typedef std::map<std::string, col_node> subcolumns_t;
subcolumns_t m_subcolumns;
bool operator<(const col_node &cn) const;
col_node() :
is_leaf(false)
{}
std::pair<int, int> get_spans();
};
std::pair<int, int> col_node::get_spans(){
rows = 1;
cols = 0;
if(is_leaf){
cols = 1;
}
if(! m_subcolumns.empty()){
BOOST_FOREACH(
subcolumn & s,
m_subcolumns
){
std::pair<int, int> spans;
spans = s.second.get_spans();
rows = (std::max)(rows, spans.first);
cols += spans.second;
}
++rows;
}
return std::make_pair(rows, cols);
}
void build_node_tree(const fs::path & dir_root, col_node & node){
bool has_directories = false;
bool has_files = false;
BOOST_FOREACH(
fs::directory_entry & d,
std::make_pair(
fs::directory_iterator(dir_root),
fs::directory_iterator()
)
){
if(fs::is_directory(d)){
has_directories = true;
std::pair<col_node::subcolumns_t::iterator, bool> result
= node.m_subcolumns.insert(
std::make_pair(d.path().filename().string(), col_node())
);
build_node_tree(d, result.first->second);
}
else{
has_files = true;
}
}
if(has_directories && has_files)
throw std::string("invalid bin directory structure");
node.is_leaf = has_files;
}
fs::ofstream report;
fs::ofstream links_file;
string links_name;
string specific_compiler; // if running on one toolset only
const string empty_string;
// extract object library name from target directory string ----------------//
string extract_object_library_name( const string & s )
{
string t( s );
string::size_type pos = t.find( "/build/" );
if ( pos != string::npos ) pos += 7;
else if ( (pos = t.find( "/test/" )) != string::npos ) pos += 6;
else return "";
return t.substr( pos, t.find( "/", pos ) - pos );
}
// find_element ------------------------------------------------------------//
struct element_equal {
const string & m_name;
element_equal(const string & name) :
m_name(name)
{}
bool operator()(const xml::element_ptr & xep) const {
return xep.get()->name == m_name;
}
};
xml::element_list::const_iterator find_element(
const xml::element & root, const string & name
){
return std::find_if(
root.elements.begin(),
root.elements.end(),
element_equal(name)
);
}
// element_content ---------------------------------------------------------//
const string & element_content(
const xml::element & root, const string & name
){
xml::element_list::const_iterator itr;
itr = find_element(root, name);
if(root.elements.end() == itr)
return empty_string;
return (*itr)->content;
}
// attribute_value ----------------------------------------------------------//
struct attribute_equal {
const string & m_name;
attribute_equal(const string & name) :
m_name(name)
{}
bool operator()(const xml::attribute & a) const {
return a.name == m_name;
}
};
const string & attribute_value(
const xml::element & element,
const string & attribute_name
){
xml::attribute_list::const_iterator itr;
itr = std::find_if(
element.attributes.begin(),
element.attributes.end(),
attribute_equal(attribute_name)
);
if(element.attributes.end() == itr){
static const string empty_string;
return empty_string;
}
return itr->value;
}
// generate_report ---------------------------------------------------------//
// return 0 if nothing generated, 1 otherwise, except 2 if compiler msgs
int generate_report(
const xml::element & db,
const std::string source_library_name,
const string & test_type,
const fs::path & target_dir,
bool pass,
bool always_show_run_output
)
{
// compile msgs sometimes modified, so make a local copy
string compile( ((pass && no_warn)
? empty_string : element_content( db, "compile" )) );
const string & link( pass ? empty_string : element_content( db, "link" ) );
const string & run( (pass && !always_show_run_output)
? empty_string : element_content( db, "run" ) );
string lib( (pass ? empty_string : element_content( db, "lib" )) );
// some compilers output the filename even if there are no errors or
// warnings; detect this if one line of output and it contains no space.
string::size_type pos = compile.find( '\n', 1 );
if ( pos != string::npos && compile.size()-pos <= 2
&& compile.find( ' ' ) == string::npos ) compile.clear();
if ( lib.empty()
&& (
compile.empty() || test_type == "compile_fail"
)
&& link.empty()
&& run.empty()
)
return 0;
int result = 1; // some kind of msg for sure
// limit compile message length
if ( compile.size() > max_compile_msg_size )
{
compile.erase( max_compile_msg_size );
compile += "...\n (remainder deleted because of excessive size)\n";
}
const string target_dir_string = target_dir.string();
links_file << "<h2><a name=\"";
links_file << std::make_pair(
html_from_path(target_dir_string.begin()),
html_from_path(target_dir_string.end())
)
<< "\">"
<< std::make_pair(
html_from_path(target_dir_string.begin()),
html_from_path(target_dir_string.end())
)
;
links_file << "</a></h2>\n";;
if ( !compile.empty() )
{
++result;
links_file << "<h3>Compiler output:</h3><pre>"
<< compile << "</pre>\n";
}
if ( !link.empty() )
links_file << "<h3>Linker output:</h3><pre>" << link << "</pre>\n";
if ( !run.empty() )
links_file << "<h3>Run output:</h3><pre>" << run << "</pre>\n";
// for an object library failure, generate a reference to the object
// library failure message, and (once only) generate the object
// library failure message itself
static std::set< string > failed_lib_target_dirs; // only generate once
if ( !lib.empty() )
{
if ( lib[0] == '\n' ) lib.erase( 0, 1 );
string object_library_name( extract_object_library_name( lib ) );
// changing the target directory naming scheme breaks
// extract_object_library_name()
assert( !object_library_name.empty() );
if ( object_library_name.empty() )
std::cerr << "Failed to extract object library name from " << lib << "\n";
links_file << "<h3>Library build failure: </h3>\n"
"See <a href=\"#"
<< source_library_name << "-"
<< object_library_name << "-"
<< std::make_pair(
html_from_path(target_dir_string.begin()),
html_from_path(target_dir_string.end())
)
<< source_library_name << " - "
<< object_library_name << " - "
<< std::make_pair(
html_from_path(target_dir_string.begin()),
html_from_path(target_dir_string.end())
)
<< "</a>";
if ( failed_lib_target_dirs.find( lib ) == failed_lib_target_dirs.end() )
{
failed_lib_target_dirs.insert( lib );
fs::path pth( locate_root / lib / "test_log.xml" );
fs::ifstream file( pth );
if ( file )
{
xml::element_ptr db = xml::parse( file, pth.string() );
generate_report(
*db,
source_library_name,
test_type,
target_dir,
false,
false
);
}
else
{
links_file << "<h2><a name=\""
<< object_library_name << "-"
<< std::make_pair(
html_from_path(target_dir_string.begin()),
html_from_path(target_dir_string.end())
)
<< "\">"
<< object_library_name << " - "
<< std::make_pair(
html_from_path(target_dir_string.begin()),
html_from_path(target_dir_string.end())
)
<< "</a></h2>\n"
<< "test_log.xml not found\n";
}
}
}
return result;
}
struct has_fail_result {
//bool operator()(const boost::shared_ptr<const xml::element> e) const {
bool operator()(const xml::element_ptr & e) const {
return attribute_value(*e, "result") == "fail";
}
};
// do_cell ---------------------------------------------------------------//
bool do_cell(
const fs::path & target_dir,
const string & lib_name,
const string & test_name,
string & target,
bool profile
){
// return true if any results except pass_msg
bool pass = false;
fs::path xml_file_path( target_dir / "test_log.xml" );
if ( !fs::exists( xml_file_path ) )
{
fs::path test_path = target_dir / (test_name + ".test");
target += "<td align=\"right\">";
target += fs::exists( test_path) ? pass_msg : fail_msg;
target += "</td>";
return true;
}
string test_type( "unknown" );
bool always_show_run_output( false );
fs::ifstream file( xml_file_path );
xml::element_ptr dbp = xml::parse( file, xml_file_path.string() );
const xml::element & db( *dbp );
always_show_run_output
= attribute_value( db, "show-run-output" ) == "true";
// if we don't find any failures
// mark it as a pass
pass = (db.elements.end() == std::find_if(
db.elements.begin(),
db.elements.end(),
has_fail_result()
));
int anything_generated = 0;
if (!no_links){
anything_generated =
generate_report(
db,
lib_name,
test_type,
target_dir,
pass,
always_show_run_output
);
}
// generate the status table cell pass/warn/fail HTML
target += "<td align=\"right\">";
if ( anything_generated != 0 )
{
target += "<a href=\"";
target += links_name;
target += "#";
const string target_dir_string = target_dir.string();
std::copy(
html_from_path(target_dir_string.begin()),
html_from_path(target_dir_string.end()),
std::back_inserter(target)
);
target += "\">";
target += pass
? (anything_generated < 2 ? pass_msg : warn_msg)
: fail_msg;
target += "</a>";
}
else target += pass ? pass_msg : fail_msg;
// if profiling
if(profile && pass){
// add link to profile
target += " <a href=\"";
target += (target_dir / "profile.txt").string();
target += "\"><i>Profile</i></a>";
}
target += "</td>";
return (anything_generated != 0) || !pass;
}
bool visit_node_tree(
const col_node & node,
fs::path dir_root,
const string & lib_name,
const string & test_name,
string & target,
bool profile
){
bool retval = false;
if(node.is_leaf){
return do_cell(
dir_root,
lib_name,
test_name,
target,
profile
);
}
BOOST_FOREACH(
const col_node::subcolumn & s,
node.m_subcolumns
){
fs::path subdir = dir_root / s.first;
retval |= visit_node_tree(
s.second,
subdir,
lib_name,
test_name,
target,
s.first == "profile"
);
}
return retval;
}
// emit results for each test
void do_row(
col_node test_node,
const fs::path & test_dir,
const string & lib_name,
const string & test_name,
string & target
){
string::size_type row_start_pos = target.size();
target += "<tr>";
target += "<td>";
//target += "<a href=\"" + url_prefix_dir_view + "/libs/" + lib_name + "\">";
target += test_name;
//target += "</a>";
target += "</td>";
bool no_warn_save = no_warn;
// emit cells on this row
bool anything_to_report = visit_node_tree(
test_node,
test_dir,
lib_name,
test_name,
target,
false
);
target += "</tr>";
if ( ignore_pass
&& ! anything_to_report )
target.erase( row_start_pos );
no_warn = no_warn_save;
}
// do_table_body -----------------------------------------------------------//
void do_table_body(
col_node root_node,
const string & lib_name,
const fs::path & test_lib_dir
){
// rows are held in a vector so they can be sorted, if desired.
std::vector<string> results;
BOOST_FOREACH(
fs::directory_entry & d,
std::make_pair(
fs::directory_iterator(test_lib_dir),
fs::directory_iterator()
)
){
if(! fs::is_directory(d))
continue;
// if the file name contains ".test"
if(d.path().extension() != ".test")
continue;
string test_name = d.path().stem().string();
results.push_back( std::string() );
do_row(
root_node, //*test_node_itr++,
d, // test dir
lib_name,
test_name,
results[results.size()-1]
);
}
std::sort( results.begin(), results.end() );
BOOST_FOREACH(string &s, results)
report << s << "\n";
}
// column header-----------------------------------------------------------//
int header_depth(const col_node & root){
int max_depth = 1;
BOOST_FOREACH(
const col_node::subcolumn &s,
root.m_subcolumns
){
max_depth = (std::max)(max_depth, s.second.rows);
}
return max_depth;
}
void header_cell(int rows, int cols, const std::string & name){
// add row cells
report << "<td align=\"center\" " ;
if(1 < cols)
report << "colspan=\"" << cols << "\" " ;
if(1 < rows)
// span rows to the end the header
report << "rowspan=\"" << rows << "\" " ;
report << ">" ;
report << name;
report << "</td>\n";
}
void emit_column_headers(
const col_node & node,
int display_row,
int current_row,
int row_count
){
if(current_row < display_row){
if(! node.m_subcolumns.empty()){
BOOST_FOREACH(
const col_node::subcolumn &s,
node.m_subcolumns
){
emit_column_headers(
s.second,
display_row,
current_row + 1,
row_count
);
}
}
return;
}
/*
if(node.is_leaf && ! node.m_subcolumns.empty()){
header_cell(row_count - current_row, 1, std::string(""));
}
*/
BOOST_FOREACH(col_node::subcolumn s, node.m_subcolumns){
if(1 == s.second.rows)
header_cell(row_count - current_row, s.second.cols, s.first);
else
header_cell(1, s.second.cols, s.first);
}
}
fs::path find_lib_test_dir(fs::path const& initial_path){
// walk up from the path were we started until we find
// bin or bin.v2
fs::path test_lib_dir = initial_path;
do{
if(fs::is_directory( test_lib_dir / "bin.v2")){
test_lib_dir /= "bin.v2";
break;
}
if(fs::is_directory( test_lib_dir / "bin")){
// v1 includes the word boost
test_lib_dir /= "bin";
if(fs::is_directory( test_lib_dir / "boost")){
test_lib_dir /= "boost";
}
break;
}
}while(! test_lib_dir.empty());
if(test_lib_dir.empty())
throw std::string("binary path not found");
return test_lib_dir;
}
string find_lib_name(fs::path lib_test_dir){
// search the path backwards for the magic name "libs"
fs::path::iterator e_itr = lib_test_dir.end();
while(lib_test_dir.begin() != e_itr){
if(*--e_itr == "libs")
break;
}
// if its found
if(lib_test_dir.begin() != e_itr){
// use the whole path since the "libs"
++e_itr;
}
// otherwise, just use the last two components
else{
e_itr = lib_test_dir.end();
if(e_itr != lib_test_dir.begin()){
if(--e_itr != lib_test_dir.begin()){
--e_itr;
}
}
}
fs::path library_name;
while(lib_test_dir.end() != e_itr){
library_name /= *e_itr++;
}
return library_name.string();
}
fs::path find_boost_root(fs::path initial_path){
fs::path boost_root = initial_path;
for(;;){
if(fs::is_directory( boost_root / "boost")){
break;
}
if(boost_root.empty())
throw std::string("boost root not found");
boost_root.remove_filename();
}
return boost_root;
}
// do_table ----------------------------------------------------------------//
void do_table(const fs::path & lib_test_dir, const string & lib_name)
{
col_node root_node;
BOOST_FOREACH(
fs::directory_entry & d,
std::make_pair(
fs::directory_iterator(lib_test_dir),
fs::directory_iterator()
)
){
if(! fs::is_directory(d))
continue;
fs::path p = d.path();
if(p.extension() != ".test")
continue;
build_node_tree(d, root_node);
}
// visit directory nodes and record nodetree
report << "<table border=\"1\" cellspacing=\"0\" cellpadding=\"5\">\n";
// emit
root_node.get_spans();
int row_count = header_depth(root_node);
report << "<tr>\n";
report << "<td rowspan=\"" << row_count << "\">Test Name</td>\n";
// emit column headers
int row_index = 0;
for(;;){
emit_column_headers(root_node, row_index, 0, row_count);
report << "</tr>" ;
if(++row_index == row_count)
break;
report << "<tr>\n";
}
// now the rest of the table body
do_table_body(root_node, lib_name, lib_test_dir);
report << "</table>\n";
}
}// unnamed namespace
// main --------------------------------------------------------------------//
#define BOOST_NO_CPP_MAIN_SUCCESS_MESSAGE
#include <boost/test/included/prg_exec_monitor.hpp>
int cpp_main( int argc, char * argv[] ) // note name!
{
fs::path initial_path = fs::initial_path();
while ( argc > 1 && *argv[1] == '-' )
{
if ( argc > 2 && std::strcmp( argv[1], "--compiler" ) == 0 )
{ specific_compiler = argv[2]; --argc; ++argv; }
else if ( argc > 2 && std::strcmp( argv[1], "--locate-root" ) == 0 )
{ locate_root = fs::path( argv[2] ); --argc; ++argv; }
else if ( std::strcmp( argv[1], "--ignore-pass" ) == 0 ) ignore_pass = true;
else if ( std::strcmp( argv[1], "--no-warn" ) == 0 ) no_warn = true;
else if ( std::strcmp( argv[1], "--v2" ) == 0 )
{--argc; ++argv ;} // skip
else if ( argc > 2 && std::strcmp( argv[1], "--jamfile" ) == 0)
{--argc; ++argv;} // skip
else { std::cerr << "Unknown option: " << argv[1] << "\n"; argc = 1; }
--argc;
++argv;
}
if ( argc != 2 && argc != 3 )
{
std::cerr <<
"Usage: library_status [options...] status-file [links-file]\n"
" boost-root is the path to the boost tree root directory.\n"
" status-file and links-file are paths to the output files.\n"
" options: --compiler name Run for named compiler only\n"
" --ignore-pass Do not report tests which pass all compilers\n"
" --no-warn Warnings not reported if test passes\n"
" --locate-root path Path to ALL_LOCATE_TARGET for bjam;\n"
" default boost-root.\n"
"Example: library_status --compiler gcc /boost-root cs.html cs-links.html\n"
"Note: Only the leaf of the links-file path is\n"
"used in status-file HTML links. Thus for browsing, status-file,\n"
"links-file must be in the same directory.\n"
;
return 1;
}
if(locate_root.empty())
if(! fs::exists("bin") && ! fs::exists("bin.v2"))
locate_root = find_boost_root(initial_path);
report.open( fs::path( argv[1] ) );
if ( !report )
{
std::cerr << "Could not open report output file: " << argv[2] << std::endl;
return 1;
}
if ( argc == 3 )
{
fs::path links_path( argv[2] );
links_name = links_path.filename().string();
links_file.open( links_path );
if ( !links_file )
{
std::cerr << "Could not open links output file: " << argv[3] << std::endl;
return 1;
}
}
else no_links = true;
const string library_name = find_lib_name(initial_path);
char run_date[128];
std::time_t tod;
std::time( &tod );
std::strftime( run_date, sizeof(run_date),
"%X UTC, %A %d %B %Y", std::gmtime( &tod ) );
report
<< "<html>\n"
<< "<head>\n"
<< "<title>Boost Library Status Automatic Test</title>\n"
<< "</head>\n"
<< "<body bgcolor=\"#ffffff\" text=\"#000000\">\n"
<< "<table border=\"0\">\n"
<< "<h1>Library Status: " + library_name + "</h1>\n"
<< "<b>Run Date:</b> "
<< run_date
<< "\n<br>"
;
report << "</td>\n</table>\n<br>\n";
if ( !no_links )
{
links_file
<< "<html>\n"
<< "<head>\n"
<< "<title>Boost Library Status Error Log</title>\n"
<< "</head>\n"
<< "<body bgcolor=\"#ffffff\" text=\"#000000\">\n"
<< "<table border=\"0\">\n"
<< "<h1>Library Status: " + library_name + "</h1>\n"
<< "<b>Run Date:</b> "
<< run_date
<< "\n<br></table>\n<br>\n"
;
}
// detect whether in a a directory which looks like
// bin/<library name>/test
// or just
// bin
fs::path library_test_directory = find_lib_test_dir(locate_root);
// if libs exists, drop down a couple of levels
if(fs::is_directory( library_test_directory / "libs")){
library_test_directory /= "libs";
library_test_directory /= library_name;
}
do_table(library_test_directory, library_name);
report << "</body>\n"
"</html>\n"
;
if ( !no_links )
{
links_file
<< "</body>\n"
"</html>\n"
;
}
return 0;
}

View file

@ -1,21 +0,0 @@
@echo off
rem Copyright Robert Ramey 2007
rem Distributed under the Boost Software License, Version 1.0.
rem See http://www.boost.org/LICENSE_1_0.txt
if not "%1" == "" goto bjam
echo Usage: %0 "<bjam arguments>"
echo where typical bjam arguements are:
echo toolset=msvc-7.1,gcc
echo variant=debug,release,profile
echo link=static,shared
echo threading=single,multi
echo -sBOOST_ARCHIVE_LIST="<archive name>"
goto end
:bjam
bjam --dump-tests %* >bjam.log 2>&1
process_jam_log --v2 <bjam.log
library_status library_status.html links.html
:end

View file

@ -1,19 +0,0 @@
# Copyright Robert Ramey 2007
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
if test $# -eq 0
then
echo "Usage: $0 <bjam arguments>"
echo "Typical bjam arguements are:"
echo " toolset=msvc-7.1,gcc"
echo " variant=debug,release,profile"
echo " link=static,shared"
echo " threading=single,multi"
echo " -sBOOST_ARCHIVE_LIST=<archive name>"
else
bjam --dump-tests $@ >bjam.log 2>&1
process_jam_log --v2 <bjam.log
library_status library_status.html links.html
fi

View file

@ -1,85 +0,0 @@
if test $# -eq 0
then
echo "Usage: $0 <bjam arguments>"
echo "Typical bjam arguments are:"
echo " toolset=msvc-7.1,gcc"
echo " variant=debug,release,profile"
echo " link=static,shared"
echo " threading=single,multi"
echo
echo "note: make sure this script is run from boost root directory !!!"
exit 1
fi
if ! test -e libs
then
echo No libs directory found. Run from boost root directory !!!
exit 1
fi
#html header
cat <<end >status/library_status_contents.html
<!doctype HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<!--
(C) Copyright 2007 Robert Ramey - http://www.rrsd.com .
Use, modification and distribution is subject to the Boost Software
License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
-->
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<link rel="stylesheet" type="text/css" href="../boost.css">
<title>Library Status Contents</title>
<body>
end
cd >nul libs
# runtests, create library pages, and body of summary page
for lib_name in *
do
if test -d $lib_name
then
cd >nul $lib_name
if test -e "test/Jamfile.v2"
then
cd >nul test
echo $lib_name
echo >>../../../status/library_status_contents.html "<a target=\"detail\" href=\"../libs/$lib_name/test/library_status.html\">$lib_name</a><br>"
../../../tools/regression/src/library_test.sh $@
cd >nul ..
fi
for sublib_name in *
do
if test -d $sublib_name
then
cd >nul $sublib_name
if test -e "test/Jamfile.v2"
then
cd >nul test
echo $lib_name/$sublib_name
echo >>../../../../status/library_status_contents.html "<a target=\"detail\" href=\"../libs/$lib_name/$sublib_name/test/library_status.html\">$lib_name/$sublib_name</a><br>"
../../../../tools/regression/src/library_test.sh $@
cd >nul ..
fi
cd >nul ..
fi
done
cd >nul ..
fi
done
cd >nul ..
#html trailer
cat <<end >>status/library_status_contents.html
</body>
</html>
end

View file

@ -1,893 +0,0 @@
// process jam regression test output into XML -----------------------------//
// Copyright Beman Dawes 2002. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org/tools/regression for documentation.
#define BOOST_FILESYSTEM_VERSION 3
#include <boost/config/warning_disable.hpp>
#include "detail/tiny_xml.hpp"
#include "boost/filesystem/operations.hpp"
#include "boost/filesystem/fstream.hpp"
#include "boost/filesystem/exception.hpp"
#include "boost/filesystem/convenience.hpp"
#include <iostream>
#include <string>
#include <cstring>
#include <map>
#include <utility> // for make_pair
#include <ctime>
#include <cctype> // for tolower
#include <cstdlib> // for exit
using std::string;
namespace xml = boost::tiny_xml;
namespace fs = boost::filesystem;
// options
static bool echo = false;
static bool create_dirs = false;
static bool boost_build_v2 = true;
namespace
{
struct test_info
{
string file_path; // relative boost-root
string type;
bool always_show_run_output;
};
typedef std::map< string, test_info > test2info_map; // key is test-name
test2info_map test2info;
fs::path boost_root;
fs::path locate_root; // ALL_LOCATE_TARGET (or boost_root if none)
// set_boost_root --------------------------------------------------------//
void set_boost_root()
{
boost_root = fs::initial_path();
for(;;)
{
if ( fs::exists( boost_root / "libs" ) )
{
fs::current_path( fs::initial_path() ); // restore initial path
return;
}
fs::current_path( ".." );
if ( boost_root == fs::current_path() )
{
fs::current_path( fs::initial_path() ); // restore initial path
std::cout <<
"Abort: process_jam_log must be run from within a boost directory tree\n";
std::exit(1);
}
boost_root = fs::current_path();
}
}
// append_html -------------------------------------------------------------//
void append_html( const string & src, string & target )
{
// there are a few lines we want to ignore
if ( src.find( "th target..." ) != string::npos
|| src.find( "cc1plus.exe: warning: changing search order for system directory" ) != string::npos
|| src.find( "cc1plus.exe: warning: as it has already been specified as a non-system directory" ) != string::npos
) return;
// on some platforms (e.g. tru64cxx) the following line is a real performance boost
target.reserve(src.size() * 2 + target.size());
for ( string::size_type pos = 0; pos < src.size(); ++pos )
{
if ( src[pos] == '<' ) target += "&lt;";
else if ( src[pos] == '>' ) target += "&gt;";
else if ( src[pos] == '&' ) target += "&amp;";
else target += src[pos];
}
}
// timestamp ---------------------------------------------------------------//
string timestamp()
{
char run_date[128];
std::time_t tod;
std::time( &tod );
std::strftime( run_date, sizeof(run_date),
"%Y-%m-%d %X UTC", std::gmtime( &tod ) );
return string( run_date );
}
// convert path separators to forward slashes ------------------------------//
void convert_path_separators( string & s )
{
for ( string::iterator itr = s.begin(); itr != s.end(); ++itr )
if ( *itr == '\\' || *itr == '!' ) *itr = '/';
}
// trim_left ----------------------------------------------------------------//
std::string trim_left( std::string const& s )
{
std::string::size_type const pos( s.find_first_not_of(' ') );
return pos != std::string::npos
? s.substr( pos, s.size() - pos + 1 )
: ""
;
}
// split --------------------------------------------------------------------//
std::vector<std::string> split( std::string const& s )
{
std::string::size_type const pos( s.find_first_of(' ') );
std::vector<std::string> result( 1, s.substr( 0, pos ) );
if ( pos == std::string::npos )
return result;
std::vector<std::string> const rest( split( trim_left( s.substr( pos, s.size() - pos + 1 ) ) ) );
result.insert( result.end(), rest.begin(), rest.end() );
return result;
}
// extract a target directory path from a jam target string ----------------//
// s may be relative to the initial_path:
// ..\..\..\libs\foo\build\bin\libfoo.lib\vc7\debug\runtime-link-dynamic\boo.obj
// s may be absolute:
// d:\myboost\libs\foo\build\bin\libfoo.lib\vc7\debug\runtime-link-dynamic\boo.obj
// return path is always relative to the boost directory tree:
// libs/foo/build/bin/libfs.lib/vc7/debug/runtime-link-dynamic
string target_directory( const string & s )
{
string temp( s );
convert_path_separators( temp );
temp.erase( temp.find_last_of( "/" ) ); // remove leaf
temp = split( trim_left( temp ) ).back();
if ( temp[0] == '.' ) temp.erase( 0, temp.find_first_not_of( "./" ) );
else temp.erase( 0, locate_root.string().size()+1 );
if ( echo )
std::cout << "\ttarget_directory( \"" << s << "\") -> \"" << temp << "\"" << std::endl;
return temp;
}
string::size_type target_name_end( const string & s )
{
string::size_type pos = s.find( ".test/" );
if ( pos == string::npos ) pos = s.find( ".dll/" );
if ( pos == string::npos ) pos = s.find( ".so/" );
if ( pos == string::npos ) pos = s.find( ".lib/" );
if ( pos == string::npos ) pos = s.find( ".pyd/" );
if ( pos == string::npos ) pos = s.find( ".a/" );
return pos;
}
string toolset( const string & s )
{
string::size_type pos = target_name_end( s );
if ( pos == string::npos ) pos = s.find( "build/" );
if ( pos == string::npos ) return "";
pos = s.find( "/", pos ) + 1;
return s.substr( pos, s.find( "/", pos ) - pos );
}
string test_name( const string & s )
{
string::size_type pos = target_name_end( s );
if ( pos == string::npos ) return "";
string::size_type pos_start = s.rfind( '/', pos ) + 1;
return s.substr( pos_start,
(s.find( ".test/" ) != string::npos
? pos : s.find( "/", pos )) - pos_start );
}
// Take a path to a target directory of test, and
// returns library name corresponding to that path.
string test_path_to_library_name( string const& path )
{
std::string result;
string::size_type start_pos( path.find( "libs/" ) );
if ( start_pos == string::npos ) {
start_pos = path.find( "tools/" );
}
if ( start_pos != string::npos )
{
// The path format is ...libs/functional/hash/test/something.test/....
// So, the part between "libs" and "test/something.test" can be considered
// as library name. But, for some libraries tests are located too deep,
// say numeric/ublas/test/test1 directory, and some libraries have tests
// in several subdirectories (regex/example and regex/test). So, nested
// directory may belong to several libraries.
// To disambituate, it's possible to place a 'sublibs' file in
// a directory. It means that child directories are separate libraries.
// It's still possible to have tests in the directory that has 'sublibs'
// file.
std::string interesting;
start_pos = path.find( '/', start_pos ) + 1;
string::size_type end_pos( path.find( ".test/", start_pos ) );
end_pos = path.rfind('/', end_pos);
if (path.substr(end_pos - 5, 5) == "/test")
interesting = path.substr( start_pos, end_pos - 5 - start_pos );
else
interesting = path.substr( start_pos, end_pos - start_pos );
// Take slash separate elements until we have corresponding 'sublibs'.
end_pos = 0;
for(;;)
{
end_pos = interesting.find('/', end_pos);
if (end_pos == string::npos) {
result = interesting;
break;
}
result = interesting.substr(0, end_pos);
if ( fs::exists( ( boost_root / "libs" ) / result / "sublibs" ) )
{
end_pos = end_pos + 1;
}
else
break;
}
}
return result;
}
// Tries to find target name in the string 'msg', starting from
// position start.
// If found, extract the directory name from the target name and
// stores it in 'dir', and return the position after the target name.
// Otherwise, returns string::npos.
string::size_type parse_skipped_msg_aux(const string& msg,
string::size_type start,
string& dir)
{
dir.clear();
string::size_type start_pos = msg.find( '<', start );
if ( start_pos == string::npos ) return string::npos;
++start_pos;
string::size_type end_pos = msg.find( '>', start_pos );
dir += msg.substr( start_pos, end_pos - start_pos );
if ( boost_build_v2 )
{
// The first letter is a magic value indicating
// the type of grist.
convert_path_separators( dir );
dir.erase( 0, 1 );
// We need path from root, not from 'status' dir.
if (dir.find("../") == 0)
dir.erase(0,3);
else // dir is always relative to the boost directory tree
dir.erase( 0, locate_root.string().size()+1 );
}
else
{
if ( dir[0] == '@' )
{
// new style build path, rooted build tree
convert_path_separators( dir );
dir.replace( 0, 1, "bin/" );
}
else
{
// old style build path, integrated build tree
start_pos = dir.rfind( '!' );
convert_path_separators( dir );
string::size_type path_sep_pos = dir.find( '/', start_pos + 1 );
if ( path_sep_pos != string::npos )
dir.insert( path_sep_pos, "/bin" );
else
{
// see http://article.gmane.org/gmane.comp.lib.boost.devel/146688;
// the following code assumes that: a) 'dir' is not empty,
// b) 'end_pos != string::npos' and c) 'msg' always ends with '...'
if ( dir[dir.size() - 1] == '@' )
dir += "/" + msg.substr( end_pos + 1, msg.size() - end_pos - 1 - 3 );
}
}
}
return end_pos;
}
// the format of paths is really kinky, so convert to normal form
// first path is missing the leading "..\".
// first path is missing "\bin" after "status".
// second path is missing the leading "..\".
// second path is missing "\bin" after "build".
// second path uses "!" for some separators.
void parse_skipped_msg( const string & msg,
string & first_dir, string & second_dir )
{
string::size_type pos = parse_skipped_msg_aux(msg, 0, first_dir);
if (pos == string::npos)
return;
parse_skipped_msg_aux(msg, pos, second_dir);
}
// test_log hides database details -----------------------------------------//
class test_log
: boost::noncopyable
{
const string & m_target_directory;
xml::element_ptr m_root;
public:
test_log( const string & target_directory,
const string & test_name,
const string & toolset,
bool force_new_file )
: m_target_directory( target_directory )
{
if ( !force_new_file )
{
fs::path pth( locate_root / target_directory / "test_log.xml" );
fs::ifstream file( pth );
if ( file ) // existing file
{
try
{
m_root = xml::parse( file, pth.string() );
return;
}
catch(...)
{
// unable to parse existing XML file, fall through
}
}
}
string library_name( test_path_to_library_name( target_directory ) );
test_info info;
test2info_map::iterator itr( test2info.find( library_name + "/" + test_name ) );
if ( itr != test2info.end() )
info = itr->second;
if ( !info.file_path.empty() )
library_name = test_path_to_library_name( info.file_path );
if ( info.type.empty() )
{
if ( target_directory.find( ".lib/" ) != string::npos
|| target_directory.find( ".dll/" ) != string::npos
|| target_directory.find( ".so/" ) != string::npos
|| target_directory.find( ".dylib/" ) != string::npos
|| target_directory.find( "/build/" ) != string::npos
)
{
info.type = "lib";
}
else if ( target_directory.find( ".pyd/" ) != string::npos )
info.type = "pyd";
}
m_root.reset( new xml::element( "test-log" ) );
m_root->attributes.push_back(
xml::attribute( "library", library_name ) );
m_root->attributes.push_back(
xml::attribute( "test-name", test_name ) );
m_root->attributes.push_back(
xml::attribute( "test-type", info.type ) );
m_root->attributes.push_back(
xml::attribute( "test-program", info.file_path ) );
m_root->attributes.push_back(
xml::attribute( "target-directory", target_directory ) );
m_root->attributes.push_back(
xml::attribute( "toolset", toolset ) );
m_root->attributes.push_back(
xml::attribute( "show-run-output",
info.always_show_run_output ? "true" : "false" ) );
}
~test_log()
{
fs::path pth( locate_root / m_target_directory / "test_log.xml" );
if ( create_dirs && !fs::exists( pth.branch_path() ) )
fs::create_directories( pth.branch_path() );
fs::ofstream file( pth );
if ( !file )
{
std::cout << "*****Warning - can't open output file: "
<< pth.string() << "\n";
}
else xml::write( *m_root, file );
}
const string & target_directory() const { return m_target_directory; }
void remove_action( const string & action_name )
// no effect if action_name not found
{
xml::element_list::iterator itr;
for ( itr = m_root->elements.begin();
itr != m_root->elements.end() && (*itr)->name != action_name;
++itr ) {}
if ( itr != m_root->elements.end() ) m_root->elements.erase( itr );
}
void add_action( const string & action_name,
const string & result,
const string & timestamp,
const string & content )
{
remove_action( action_name );
xml::element_ptr action( new xml::element(action_name) );
m_root->elements.push_back( action );
action->attributes.push_back( xml::attribute( "result", result ) );
action->attributes.push_back( xml::attribute( "timestamp", timestamp ) );
action->content = content;
}
};
// message_manager maps input messages into test_log actions ---------------//
class message_manager
: boost::noncopyable
{
string m_action_name; // !empty() implies action pending
// IOW, a start_message awaits stop_message
string m_target_directory;
string m_test_name;
string m_toolset;
bool m_note; // if true, run result set to "note"
// set false by start_message()
// data needed to stop further compile action after a compile failure
// detected in the same target directory
string m_previous_target_directory;
bool m_compile_failed;
public:
message_manager() : m_note(false) {}
~message_manager() { /*assert( m_action_name.empty() );*/ }
bool note() const { return m_note; }
void note( bool value ) { m_note = value; }
void start_message( const string & action_name,
const string & target_directory,
const string & test_name,
const string & toolset,
const string & prior_content )
{
assert( !target_directory.empty() );
if ( !m_action_name.empty() ) stop_message( prior_content );
m_action_name = action_name;
m_target_directory = target_directory;
m_test_name = test_name;
m_toolset = toolset;
m_note = false;
if ( m_previous_target_directory != target_directory )
{
m_previous_target_directory = target_directory;
m_compile_failed = false;
}
}
void stop_message( const string & content )
{
if ( m_action_name.empty() ) return;
stop_message( m_action_name, m_target_directory,
"succeed", timestamp(), content );
}
void stop_message( const string & action_name,
const string & target_directory,
const string & result,
const string & timestamp,
const string & content )
// the only valid action_names are "compile", "link", "run", "lib"
{
// My understanding of the jam output is that there should never be
// a stop_message that was not preceeded by a matching start_message.
// That understanding is built into message_manager code.
assert( m_action_name == action_name );
assert( m_target_directory == target_directory );
assert( result == "succeed" || result == "fail" );
// if test_log.xml entry needed
if ( !m_compile_failed
|| action_name != "compile"
|| m_previous_target_directory != target_directory )
{
if ( action_name == "compile"
&& result == "fail" ) m_compile_failed = true;
test_log tl( target_directory,
m_test_name, m_toolset, action_name == "compile" );
tl.remove_action( "lib" ); // always clear out lib residue
// dependency removal
if ( action_name == "lib" )
{
tl.remove_action( "compile" );
tl.remove_action( "link" );
tl.remove_action( "run" );
}
else if ( action_name == "compile" )
{
tl.remove_action( "link" );
tl.remove_action( "run" );
if ( result == "fail" ) m_compile_failed = true;
}
else if ( action_name == "link" )
{
tl.remove_action( "run" );
}
// dependency removal won't work right with random names, so assert
else { assert( action_name == "run" ); }
// add the "run" stop_message action
tl.add_action( action_name,
result == "succeed" && note() ? std::string("note") : result,
timestamp, content );
}
m_action_name = ""; // signal no pending action
m_previous_target_directory = target_directory;
}
};
}
// main --------------------------------------------------------------------//
int main( int argc, char ** argv )
{
// Turn off synchronization with corresponding C standard library files. This
// gives a significant speed improvement on platforms where the standard C++
// streams are implemented using standard C files.
std::ios::sync_with_stdio(false);
fs::initial_path();
std::istream* input = 0;
if ( argc <= 1 )
{
std::cout << "process_jam_log [--echo] [--create-directories] [--v1|--v2]\n"
" [--boost-root boost_root] [--locate-root locate_root]\n"
" [--input-file input_file]\n"
" [locate-root]\n"
"--echo - verbose diagnostic output.\n"
"--create-directories - if the directory for xml file doesn't exists - creates it.\n"
" usually used for processing logfile on different machine\n"
"--v2 - bjam version 2 used (default).\n"
"--v1 - bjam version 1 used.\n"
"--boost-root - the root of the boost installation being used. If not defined\n"
" assume to run from within it and discover it heuristically.\n"
"--locate-root - the same as the bjam ALL_LOCATE_TARGET\n"
" parameter, if any. Default is boost-root.\n"
"--input-file - the output of a bjam --dump-tests run. Default is std input.\n"
;
return 1;
}
while ( argc > 1 )
{
if ( std::strcmp( argv[1], "--echo" ) == 0 )
{
echo = true;
--argc; ++argv;
}
else if ( std::strcmp( argv[1], "--create-directories" ) == 0 )
{
create_dirs = true;
--argc; ++argv;
}
else if ( std::strcmp( argv[1], "--v2" ) == 0 )
{
boost_build_v2 = true;
--argc; ++argv;
}
else if ( std::strcmp( argv[1], "--v1" ) == 0 )
{
boost_build_v2 = false;
--argc; ++argv;
}
else if ( std::strcmp( argv[1], "--boost-root" ) == 0 )
{
--argc; ++argv;
if ( argc == 1 )
{
std::cout << "Abort: option --boost-root requires a directory argument\n";
std::exit(1);
}
boost_root = fs::path( argv[1] );
if ( !boost_root.is_complete() )
boost_root = ( fs::initial_path() / boost_root ).normalize();
--argc; ++argv;
}
else if ( std::strcmp( argv[1], "--locate-root" ) == 0 )
{
--argc; ++argv;
if ( argc == 1 )
{
std::cout << "Abort: option --locate-root requires a directory argument\n";
std::exit(1);
}
locate_root = fs::path( argv[1] );
--argc; ++argv;
}
else if ( std::strcmp( argv[1], "--input-file" ) == 0 )
{
--argc; ++argv;
if ( argc == 1 )
{
std::cout << "Abort: option --input-file requires a filename argument\n";
std::exit(1);
}
input = new std::ifstream(argv[1]);
--argc; ++argv;
}
else if ( *argv[1] == '-' )
{
std::cout << "Abort: unknown option; invoke with no arguments to see list of valid options\n";
return 1;
}
else
{
locate_root = fs::path( argv[1] );
--argc; ++argv;
}
}
if ( boost_root.empty() )
{
set_boost_root();
boost_root.normalize();
}
if ( locate_root.empty() )
{
locate_root = boost_root;
}
else if ( !locate_root.is_complete() )
{
locate_root = ( fs::initial_path() / locate_root ).normalize();
}
if ( input == 0 )
{
input = &std::cin;
}
std::cout << "boost_root: " << boost_root.string() << '\n'
<< "locate_root: " << locate_root.string() << '\n';
message_manager mgr;
string line;
string content;
bool capture_lines = false;
// This loop looks at lines for certain signatures, and accordingly:
// * Calls start_message() to start capturing lines. (start_message() will
// automatically call stop_message() if needed.)
// * Calls stop_message() to stop capturing lines.
// * Capture lines if line capture on.
static const int max_line_length = 8192;
int line_num = 0;
while ( std::getline( *input, line ) )
{
if (max_line_length < line.size()) line = line.substr(0, max_line_length);
++line_num;
std::vector<std::string> const line_parts( split( line ) );
std::string const line_start( line_parts[0] != "...failed"
? line_parts[0]
: line_parts[0] + " " + line_parts[1]
);
if ( echo )
{
std::cout
<< "line " << line_num << ": " << line << "\n"
<< "\tline_start: " << line_start << "\n";
}
// create map of test-name to test-info
if ( line_start.find( "boost-test(" ) == 0 )
{
string::size_type pos = line.find( '"' );
string test_name( line.substr( pos+1, line.find( '"', pos+1)-pos-1 ) );
test_info info;
info.always_show_run_output
= line.find( "\"always_show_run_output\"" ) != string::npos;
info.type = line.substr( 11, line.find( ')' )-11 );
for (unsigned int i = 0; i!=info.type.size(); ++i )
{ info.type[i] = std::tolower( info.type[i] ); }
pos = line.find( ':' );
// the rest of line is missing if bjam didn't know how to make target
if ( pos + 1 != line.size() )
{
info.file_path = line.substr( pos+3,
line.find( "\"", pos+3 )-pos-3 );
convert_path_separators( info.file_path );
if ( info.file_path.find( "libs/libs/" ) == 0 ) info.file_path.erase( 0, 5 );
if ( test_name.find( "/" ) == string::npos )
test_name = "/" + test_name;
test2info.insert( std::make_pair( test_name, info ) );
// std::cout << test_name << ", " << info.type << ", " << info.file_path << "\n";
}
else
{
std::cout << "*****Warning - missing test path: " << line << "\n"
<< " (Usually occurs when bjam doesn't know how to make a target)\n";
}
continue;
}
// these actions represent both the start of a new action
// and the end of a failed action
else if ( line_start.find( "C++-action" ) != string::npos
|| line_start.find( "vc-C++" ) != string::npos
|| line_start.find( "C-action" ) != string::npos
|| line_start.find( "Cc-action" ) != string::npos
|| line_start.find( "vc-Cc" ) != string::npos
|| line_start.find( ".compile.") != string::npos
|| line_start.find( "compile-") != string::npos
|| line_start.find( "-compile") != string::npos
|| line_start.find( "Link-action" ) != string::npos
|| line_start.find( "vc-Link" ) != string::npos
|| line_start.find( "Archive-action" ) != string::npos
|| line_start.find( ".archive") != string::npos
|| ( line_start.find( ".link") != string::npos &&
// .linkonce is present in gcc linker messages about
// unresolved symbols. We don't have to parse those
line_start.find( ".linkonce" ) == string::npos )
)
{
//~ if ( !test2info.size() )
//~ {
//~ std::cout << "*****Error - No \"boost-test\" lines encountered.\n"
//~ " (Usually occurs when bjam was envoked without the --dump-tests option\n"
//~ " or bjam was envoked in the wrong directory)\n";
//~ return 1;
//~ }
string action( ( line_start.find( "Link-action" ) != string::npos
|| line_start.find( "vc-Link" ) != string::npos
|| line_start.find( "Archive-action" ) != string::npos
|| line_start.find( ".archive") != string::npos
|| line_start.find( ".link") != string::npos
)
? "link" : "compile"
);
if ( line_start.find( "...failed " ) != string::npos )
{
mgr.stop_message( action, target_directory( line ),
"fail", timestamp(), content );
}
else
{
string target_dir( target_directory( line ) );
mgr.start_message( action, target_dir,
test_name( target_dir ), toolset( target_dir ), content );
}
content = "\n";
capture_lines = true;
}
// these actions are only used to stop the previous action
else if ( line_start.find( "-Archive" ) != string::npos
|| line_start.find( "MkDir" ) == 0
|| line_start.find( "common.mkdir" ) == 0 )
{
mgr.stop_message( content );
content.clear();
capture_lines = false;
}
else if ( line_start.find( "execute-test" ) != string::npos
|| line_start.find( "capture-output" ) != string::npos )
{
if ( line_start.find( "...failed " ) != string::npos )
{
mgr.stop_message( "run", target_directory( line ),
"fail", timestamp(), content );
content = "\n";
capture_lines = true;
}
else
{
string target_dir( target_directory( line ) );
mgr.start_message( "run", target_dir,
test_name( target_dir ), toolset( target_dir ), content );
// contents of .output file for content
capture_lines = false;
content = "\n";
fs::ifstream file( locate_root / target_dir
/ (test_name(target_dir) + ".output") );
if ( file )
{
string ln;
while ( std::getline( file, ln ) )
{
if ( ln.find( "<note>" ) != string::npos ) mgr.note( true );
append_html( ln, content );
content += "\n";
}
}
}
}
// bjam indicates some prior dependency failed by a "...skipped" message
else if ( line_start.find( "...skipped" ) != string::npos
&& line.find( "<directory-grist>" ) == string::npos
)
{
mgr.stop_message( content );
content.clear();
capture_lines = false;
if ( line.find( " for lack of " ) != string::npos )
{
capture_lines = ( line.find( ".run for lack of " ) == string::npos );
string target_dir;
string lib_dir;
parse_skipped_msg( line, target_dir, lib_dir );
if ( target_dir != lib_dir ) // it's a lib problem
{
mgr.start_message( "lib", target_dir,
test_name( target_dir ), toolset( target_dir ), content );
content = lib_dir;
mgr.stop_message( "lib", target_dir, "fail", timestamp(), content );
content = "\n";
}
}
}
else if ( line_start.find( "**passed**" ) != string::npos
|| line_start.find( "failed-test-file" ) != string::npos
|| line_start.find( "command-file-dump" ) != string::npos )
{
mgr.stop_message( content );
content = "\n";
capture_lines = true;
}
else if ( capture_lines ) // hang onto lines for possible later use
{
append_html( line, content );;
content += "\n";
}
}
mgr.stop_message( content );
if (input != &std::cin)
delete input;
return 0;
}

View file

@ -1,468 +0,0 @@
#!/usr/bin/python
# Copyright 2008 Rene Rivera
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import re
import optparse
import time
import xml.dom.minidom
import xml.dom.pulldom
from xml.sax.saxutils import unescape, escape
import os.path
#~ Process a bjam XML log into the XML log format for Boost result processing.
class BJamLog2Results:
def __init__(self,args=None):
opt = optparse.OptionParser(
usage="%prog [options] input")
opt.add_option( '--output',
help="output file" )
opt.add_option( '--runner',
help="runner ID (e.g. 'Metacomm')" )
opt.add_option( '--comment',
help="an HTML comment file to be inserted in the reports" )
opt.add_option( '--tag',
help="the tag for the results" )
opt.add_option( '--incremental',
help="do incremental run (do not remove previous binaries)",
action='store_true' )
opt.add_option( '--platform' )
opt.add_option( '--source' )
opt.add_option( '--revision' )
self.output = None
self.runner = None
self.comment='comment.html'
self.tag='trunk'
self.incremental=False
self.platform=''
self.source='SVN'
self.revision=None
self.input = []
( _opt_, self.input ) = opt.parse_args(args,self)
if self.incremental:
run_type = 'incremental'
else:
run_type = 'full'
self.results = xml.dom.minidom.parseString('''<?xml version="1.0" encoding="UTF-8"?>
<test-run
source="%(source)s"
runner="%(runner)s"
timestamp=""
platform="%(platform)s"
tag="%(tag)s"
run-type="%(run-type)s"
revision="%(revision)s">
</test-run>
''' % {
'source' : self.source,
'runner' : self.runner,
'platform' : self.platform,
'tag' : self.tag,
'run-type' : run_type,
'revision' : self.revision,
} )
self.test = {}
self.target_to_test = {}
self.target = {}
self.parent = {}
self.log = {}
self.add_log()
self.gen_output()
#~ print self.test
#~ print self.target
def add_log(self):
if self.input[0]:
bjam_xml = self.input[0]
else:
bjam_xml = self.input[1]
events = xml.dom.pulldom.parse(bjam_xml)
context = []
test_run = self.results.documentElement
for (event,node) in events:
if event == xml.dom.pulldom.START_ELEMENT:
context.append(node)
if node.nodeType == xml.dom.Node.ELEMENT_NODE:
x_f = self.x_name_(*context)
if x_f:
events.expandNode(node)
# expanding eats the end element, hence walking us out one level
context.pop()
# call the translator, and add returned items to the result
items = (x_f[1])(node)
if items:
for item in items:
if item:
test_run.appendChild(self.results.createTextNode("\n"))
test_run.appendChild(item)
elif event == xml.dom.pulldom.END_ELEMENT:
context.pop()
#~ Add the log items nwo that we've collected all of them.
items = self.log.values()
if items:
for item in items:
if item:
test_run.appendChild(self.results.createTextNode("\n"))
test_run.appendChild(item)
def gen_output(self):
if self.output:
out = open(self.output,'w')
else:
out = sys.stdout
if out:
self.results.writexml(out,encoding='utf-8')
def tostring(self):
return self.results.toxml('utf-8')
def x_name_(self, *context, **kwargs):
node = None
names = [ ]
for c in context:
if c:
if not isinstance(c,xml.dom.Node):
suffix = '_'+c.replace('-','_').replace('#','_')
else:
suffix = '_'+c.nodeName.replace('-','_').replace('#','_')
node = c
names.append('x')
names = map(lambda x: x+suffix,names)
if node:
for name in names:
if hasattr(self,name):
return (name,getattr(self,name))
return None
def x(self, *context, **kwargs):
node = None
names = [ ]
for c in context:
if c:
if not isinstance(c,xml.dom.Node):
suffix = '_'+c.replace('-','_').replace('#','_')
else:
suffix = '_'+c.nodeName.replace('-','_').replace('#','_')
node = c
names.append('x')
names = map(lambda x: x+suffix,names)
if node:
for name in names:
if hasattr(self,name):
return getattr(self,name)(node,**kwargs)
else:
assert False, 'Unknown node type %s'%(name)
return None
#~ The timestamp goes to the corresponding attribute in the result.
def x_build_timestamp( self, node ):
test_run = self.results.documentElement
test_run.setAttribute('timestamp',self.get_data(node).strip())
return None
#~ Comment file becomes a comment node.
def x_build_comment( self, node ):
comment = None
if self.comment:
comment_f = open(self.comment)
if comment_f:
comment = comment_f.read()
comment_f.close()
if not comment:
comment = ''
return [self.new_text('comment',comment)]
#~ Tests are remembered for future reference.
def x_build_test( self, node ):
test_run = self.results.documentElement
test_node = node
test_name = test_node.getAttribute('name')
self.test[test_name] = {
'library' : '/'.join(test_name.split('/')[0:-1]),
'test-name' : test_name.split('/')[-1],
'test-type' : test_node.getAttribute('type').lower(),
'test-program' : self.get_child_data(test_node,tag='source',strip=True),
'target' : self.get_child_data(test_node,tag='target',strip=True),
'info' : self.get_child_data(test_node,tag='info',strip=True)
}
#~ Add a lookup for the test given the test target.
self.target_to_test[self.test[test_name]['target']] = test_name
#~ print "--- %s\n => %s" %(self.test[test_name]['target'],test_name)
return None
#~ Process the target dependency DAG into an ancestry tree so we can look up
#~ which top-level library and test targets specific build actions correspond to.
def x_build_targets_target( self, node ):
test_run = self.results.documentElement
target_node = node
name = self.get_child_data(target_node,tag='name',strip=True)
path = self.get_child_data(target_node,tag='path',strip=True)
jam_target = self.get_child_data(target_node,tag='jam-target',strip=True)
#~ print "--- target :: %s" %(name)
#~ Map for jam targets to virtual targets.
self.target[jam_target] = {
'name' : name,
'path' : path
}
#~ Create the ancestry.
dep_node = self.get_child(self.get_child(target_node,tag='dependencies'),tag='dependency')
while dep_node:
child = self.get_data(dep_node,strip=True)
child_jam_target = '<p%s>%s' % (path,child.split('//',1)[1])
self.parent[child_jam_target] = jam_target
#~ print "--- %s\n ^ %s" %(jam_target,child_jam_target)
dep_node = self.get_sibling(dep_node.nextSibling,tag='dependency')
return None
#~ Given a build action log, process into the corresponding test log and
#~ specific test log sub-part.
def x_build_action( self, node ):
test_run = self.results.documentElement
action_node = node
name = self.get_child(action_node,tag='name')
if name:
name = self.get_data(name)
#~ Based on the action, we decide what sub-section the log
#~ should go into.
action_type = None
if re.match('[^%]+%[^.]+[.](compile)',name):
action_type = 'compile'
elif re.match('[^%]+%[^.]+[.](link|archive)',name):
action_type = 'link'
elif re.match('[^%]+%testing[.](capture-output)',name):
action_type = 'run'
elif re.match('[^%]+%testing[.](expect-failure|expect-success)',name):
action_type = 'result'
#~ print "+ [%s] %s %s :: %s" %(action_type,name,'','')
if action_type:
#~ Get the corresponding test.
(target,test) = self.get_test(action_node,type=action_type)
#~ Skip action that have no correspoding test as they are
#~ regular build actions and don't need to show up in the
#~ regression results.
if not test:
return None
#~ And the log node, which we will add the results to.
log = self.get_log(action_node,test)
#~ print "--- [%s] %s %s :: %s" %(action_type,name,target,test)
#~ Collect some basic info about the action.
result_data = "%(info)s\n\n%(command)s\n%(output)s\n" % {
'command' : self.get_action_command(action_node,action_type),
'output' : self.get_action_output(action_node,action_type),
'info' : self.get_action_info(action_node,action_type)
}
#~ For the test result status we find the appropriate node
#~ based on the type of test. Then adjust the result status
#~ acorrdingly. This makes the result status reflect the
#~ expectation as the result pages post processing does not
#~ account for this inversion.
action_tag = action_type
if action_type == 'result':
if re.match(r'^compile',test['test-type']):
action_tag = 'compile'
elif re.match(r'^link',test['test-type']):
action_tag = 'link'
elif re.match(r'^run',test['test-type']):
action_tag = 'run'
#~ The result sub-part we will add this result to.
result_node = self.get_child(log,tag=action_tag)
if action_node.getAttribute('status') == '0':
action_result = 'succeed'
else:
action_result = 'fail'
if not result_node:
#~ If we don't have one already, create it and add the result.
result_node = self.new_text(action_tag,result_data,
result = action_result,
timestamp = action_node.getAttribute('start'))
log.appendChild(self.results.createTextNode("\n"))
log.appendChild(result_node)
else:
#~ For an existing result node we set the status to fail
#~ when any of the individual actions fail, except for result
#~ status.
if action_type != 'result':
result = result_node.getAttribute('result')
if action_node.getAttribute('status') != '0':
result = 'fail'
else:
result = action_result
result_node.setAttribute('result',result)
result_node.appendChild(self.results.createTextNode("\n"))
result_node.appendChild(self.results.createTextNode(result_data))
return None
#~ The command executed for the action. For run actions we omit the command
#~ as it's just noise.
def get_action_command( self, action_node, action_type ):
if action_type != 'run':
return self.get_child_data(action_node,tag='command')
else:
return ''
#~ The command output.
def get_action_output( self, action_node, action_type ):
return self.get_child_data(action_node,tag='output',default='')
#~ Some basic info about the action.
def get_action_info( self, action_node, action_type ):
info = ""
#~ The jam action and target.
info += "%s %s\n" %(self.get_child_data(action_node,tag='name'),
self.get_child_data(action_node,tag='path'))
#~ The timing of the action.
info += "Time: (start) %s -- (end) %s -- (user) %s -- (system) %s\n" %(
action_node.getAttribute('start'), action_node.getAttribute('end'),
action_node.getAttribute('user'), action_node.getAttribute('system'))
#~ And for compiles some context that may be hidden if using response files.
if action_type == 'compile':
define = self.get_child(self.get_child(action_node,tag='properties'),name='define')
while define:
info += "Define: %s\n" %(self.get_data(define,strip=True))
define = self.get_sibling(define.nextSibling,name='define')
return info
#~ Find the test corresponding to an action. For testing targets these
#~ are the ones pre-declared in the --dump-test option. For libraries
#~ we create a dummy test as needed.
def get_test( self, node, type = None ):
jam_target = self.get_child_data(node,tag='jam-target')
base = self.target[jam_target]['name']
target = jam_target
while target in self.parent:
target = self.parent[target]
#~ print "--- TEST: %s ==> %s" %(jam_target,target)
#~ main-target-type is a precise indicator of what the build target is
#~ proginally meant to be.
main_type = self.get_child_data(self.get_child(node,tag='properties'),
name='main-target-type',strip=True)
if main_type == 'LIB' and type:
lib = self.target[target]['name']
if not lib in self.test:
self.test[lib] = {
'library' : re.search(r'libs/([^/]+)',lib).group(1),
'test-name' : os.path.basename(lib),
'test-type' : 'lib',
'test-program' : os.path.basename(lib),
'target' : lib
}
test = self.test[lib]
else:
target_name_ = self.target[target]['name']
if self.target_to_test.has_key(target_name_):
test = self.test[self.target_to_test[target_name_]]
else:
test = None
return (base,test)
#~ Find, or create, the test-log node to add results to.
def get_log( self, node, test ):
target_directory = os.path.dirname(self.get_child_data(
node,tag='path',strip=True))
target_directory = re.sub(r'.*[/\\]bin[.]v2[/\\]','',target_directory)
target_directory = re.sub(r'[\\]','/',target_directory)
if not target_directory in self.log:
if 'info' in test and test['info'] == 'always_show_run_output':
show_run_output = 'true'
else:
show_run_output = 'false'
self.log[target_directory] = self.new_node('test-log',
library=test['library'],
test_name=test['test-name'],
test_type=test['test-type'],
test_program=test['test-program'],
toolset=self.get_toolset(node),
target_directory=target_directory,
show_run_output=show_run_output)
return self.log[target_directory]
#~ The precise toolset from the build properties.
def get_toolset( self, node ):
toolset = self.get_child_data(self.get_child(node,tag='properties'),
name='toolset',strip=True)
toolset_version = self.get_child_data(self.get_child(node,tag='properties'),
name='toolset-%s:version'%toolset,strip=True)
return '%s-%s' %(toolset,toolset_version)
#~ XML utilities...
def get_sibling( self, sibling, tag = None, id = None, name = None, type = None ):
n = sibling
while n:
found = True
if type and found:
found = found and type == n.nodeType
if tag and found:
found = found and tag == n.nodeName
if (id or name) and found:
found = found and n.nodeType == xml.dom.Node.ELEMENT_NODE
if id and found:
if n.hasAttribute('id'):
found = found and n.getAttribute('id') == id
else:
found = found and n.hasAttribute('id') and n.getAttribute('id') == id
if name and found:
found = found and n.hasAttribute('name') and n.getAttribute('name') == name
if found:
return n
n = n.nextSibling
return None
def get_child( self, root, tag = None, id = None, name = None, type = None ):
return self.get_sibling(root.firstChild,tag=tag,id=id,name=name,type=type)
def get_data( self, node, strip = False, default = None ):
data = None
if node:
data_node = None
if not data_node:
data_node = self.get_child(node,tag='#text')
if not data_node:
data_node = self.get_child(node,tag='#cdata-section')
data = ""
while data_node:
data += data_node.data
data_node = data_node.nextSibling
if data_node:
if data_node.nodeName != '#text' \
and data_node.nodeName != '#cdata-section':
data_node = None
if not data:
data = default
else:
if strip:
data = data.strip()
return data
def get_child_data( self, root, tag = None, id = None, name = None, strip = False, default = None ):
return self.get_data(self.get_child(root,tag=tag,id=id,name=name),strip=strip,default=default)
def new_node( self, tag, *child, **kwargs ):
result = self.results.createElement(tag)
for k in kwargs.keys():
if kwargs[k] != '':
if k == 'id':
result.setAttribute('id',kwargs[k])
elif k == 'klass':
result.setAttribute('class',kwargs[k])
else:
result.setAttribute(k.replace('_','-'),kwargs[k])
for c in child:
if c:
result.appendChild(c)
return result
def new_text( self, tag, data, **kwargs ):
result = self.new_node(tag,**kwargs)
data = data.strip()
if len(data) > 0:
result.appendChild(self.results.createTextNode(data))
return result
if __name__ == '__main__': BJamLog2Results()

View file

@ -1,197 +0,0 @@
#!/usr/bin/perl
#~ Copyright 2003, Rene Rivera.
#~ Use, modification and distribution are subject to the Boost Software
#~ License Version 1.0. (See accompanying file LICENSE_1_0.txt or
#~ http://www.boost.org/LICENSE_1_0.txt)
use FileHandle;
use Time::Local;
# Get the whle percent value
#
sub percent_value
{
my ($count,$total) = @_;
my $percent = int (($count/$total)*100+0.5);
if ($count > 0 && $percent == 0) { $percent = 1; }
if ($count < $total && $percent == 100) { $percent = 99; }
return $percent;
}
# Generate item html for the pass column.
#
sub result_info_pass
{
my ($color,$pass,$warn,$fail,$missing) = @_;
my $percent = 100-percent_value($fail+$missing,$pass+$warn+$fail+$missing);
return "<font color=\"$color\"><font size=\"+1\">$percent%</font><br>($warn&nbsp;warnings)</font>";
}
# Generate item html for the fail column.
#
sub result_info_fail
{
my ($color,$pass,$warn,$fail,$missing) = @_;
my $percent = percent_value($fail+$missing,$pass+$warn+$fail+$missing);
return "<font color=\"$color\"><font size=\"+1\">$percent%</font><br>($fail)</font>";
}
# Generate an age highlighted run date string.
# Use as: data_info(run-date-html)
#
sub date_info
{
my %m = ('January',0,'February',1,'March',2,'April',3,'May',4,'June',5,
'July',6,'August',7,'September',8,'October',9,'November',10,'December',11);
my @d = split(/ |:/,$_[0]);
my ($hour,$min,$sec,$day,$month,$year) = ($d[0],$d[1],$d[2],$d[4],$m{$d[5]},$d[6]);
#print "<!-- $hour.$min.$sec.$day.$month.$year -->\n";
my $test_t = timegm($sec,$min,$hour,$day,$month,$year);
my $age = time-$test_t;
my $age_days = $age/(60*60*24);
#print "<!-- $age_days days old -->\n";
my $age_html = "<font>";
if ($age_days <= 2) { }
elsif ($age_days <= 14) { $age_html = "<font color=\"#FF9900\">"; }
else { $age_html = "<font color=\"#FF0000\">"; }
return $age_html.$_[0]."</font>";
}
# Generate an age string based on the run date.
# Use as: age_info(run-date-html)
#
sub age_info
{
my %m = ('January',0,'February',1,'March',2,'April',3,'May',4,'June',5,
'July',6,'August',7,'September',8,'October',9,'November',10,'December',11);
my @d = split(/ |:/,$_[0]);
my ($hour,$min,$sec,$day,$month,$year) = ($d[0],$d[1],$d[2],$d[4],$m{$d[5]},$d[6]);
#print "<!-- $hour.$min.$sec.$day.$month.$year -->\n";
my $test_t = timegm($sec,$min,$hour,$day,$month,$year);
my $age = time-$test_t;
my $age_days = $age/(60*60*24);
#print "<!-- $age_days days old -->\n";
my $age_html = "<font>";
if ($age_days <= 2) { }
elsif ($age_days <= 14) { $age_html = "<font color=\"#FF9900\">"; }
else { $age_html = "<font color=\"#FF0000\">"; }
if ($age_days <= 1) { $age_html = $age_html."today"; }
elsif ($age_days <= 2) { $age_html = $age_html."yesterday"; }
elsif ($age_days < 14) { my $days = int $age_days; $age_html = $age_html.$days." days"; }
elsif ($age_days < 7*8) { my $weeks = int $age_days/7; $age_html = $age_html.$weeks." weeks"; }
else { my $months = int $age_days/28; $age_html = $age_html.$months." months"; }
return $age_html."</font>";
}
#~ foreach my $k (sort keys %ENV)
#~ {
#~ print "<!-- $k = $ENV{$k} -->\n";
#~ }
my $logdir = "$ENV{PWD}";
#~ my $logdir = "C:\\CVSROOTs\\Boost\\boost\\status";
opendir LOGS, "$logdir";
my @logs = grep /.*links[^.]*\.html$/, readdir LOGS;
closedir LOGS;
my @bgcolor = ( "bgcolor=\"#EEEEFF\"", "" );
my $row = 0;
print "<table>\n";
print "<tr>\n",
"<th align=\"left\" bgcolor=\"#DDDDDD\">Platform</th>\n",
"<th align=\"left\" bgcolor=\"#DDDDDD\">Run Date</th>\n",
"<th align=\"left\" bgcolor=\"#DDDDDD\">Age</th>\n",
"<th align=\"left\" bgcolor=\"#DDDDDD\">Compilers</th>\n",
"<th align=\"left\" bgcolor=\"#DDDDDD\">Pass</th>\n",
"<th align=\"left\" bgcolor=\"#DDDDDD\">Fail</th>\n",
"</tr>\n";
foreach $l (sort { lc($a) cmp lc($b) } @logs)
{
my $log = $l;
$log =~ s/-links//s;
my ($spec) = ($log =~ /cs-([^\.]+)/);
my $fh = new FileHandle;
if ($fh->open("<$logdir/$log"))
{
my $content = join('',$fh->getlines());
$fh->close;
my ($status) = ($content =~ /(<h1>Compiler(.(?!<\/td>))+.)/si);
my ($platform) = ($status =~ /Status: ([^<]+)/si);
my ($run_date) = ($status =~ /Date:<\/b> ([^<]+)/si);
$run_date =~ s/, /<br>/g;
my ($compilers) = ($content =~ /Test Type<\/a><\/t[dh]>((.(?!<\/tr>))+.)/si);
if ($compilers eq "") { next; }
$compilers =~ s/-<br>//g;
$compilers =~ s/<\/td>//g;
my @compiler = ($compilers =~ /<td>(.*)$/gim);
my $count = @compiler;
my @results = ($content =~ /(>Pass<|>Warn<|>Fail<|>Missing<)/gi);
my $test_count = (scalar @results)/$count;
my @pass = map { 0 } (1..$count);
my @warn = map { 0 } (1..$count);
my @fail = map { 0 } (1..$count);
my @missing = map { 0 } (1..$count);
my @total = map { 0 } (1..$count);
#~ print "<!-- ",
#~ "pass = ",join(',',@pass)," ",
#~ "warn = ",join(',',@warn)," ",
#~ "fail = ",join(',',@fail)," ",
#~ "missing = ",join(',',@missing)," ",
#~ "total = ",join(',',@total)," ",
#~ " -->\n";
for my $t (1..$test_count)
{
my $r0 = (($t-1)*$count);
my $r1 = (($t-1)*$count+$count-1);
my @r = @results[(($t-1)*$count)..(($t-1)*$count+$count-1)];
#~ print "<!-- ",
#~ "result = ",join(',',@r)," ",
#~ "range = ",$r0,"..",$r1," (",(scalar @results),")",
#~ " -->\n";
for my $c (1..$count)
{
if ($r[$c-1] =~ /Pass/i) { ++$pass[$c-1]; }
elsif ($r[$c-1] =~ /Warn/i) { ++$warn[$c-1]; }
elsif ($r[$c-1] =~ /Fail/i) { ++$fail[$c-1]; }
elsif ($r[$c-1] =~ /Missing/i) { ++$missing[$c-1]; }
++$total[$c-1];
}
}
#~ print "<!-- ",
#~ "pass = ",join(',',@pass)," ",
#~ "warn = ",join(',',@warn)," ",
#~ "fail = ",join(',',@fail)," ",
#~ "missing = ",join(',',@missing)," ",
#~ "total = ",join(',',@total)," ",
#~ " -->\n";
for my $comp (1..(scalar @compiler))
{
my @lines = split(/<br>/,$compiler[$comp-1]);
if (@lines > 2) { $compiler[$comp-1] = join(' ',@lines[0..(scalar @lines)-2])."<br>".$lines[(scalar @lines)-1]; }
}
print
"<tr>\n",
"<td rowspan=\"$count\" valign=\"top\"><font size=\"+1\">$platform</font><br>(<a href=\"./$log\">$spec</a>)</td>\n",
"<td rowspan=\"$count\" valign=\"top\">",$run_date,"</td>\n",
"<td rowspan=\"$count\" valign=\"top\">",age_info($run_date),"</td>\n",
"<td valign=\"top\" ",$bgcolor[$row],">",$compiler[0],"</td>\n",
"<td valign=\"top\" ",$bgcolor[$row],">",result_info_pass("#000000",$pass[0],$warn[0],$fail[0],$missing[0]),"</td>\n",
"<td valign=\"top\" ",$bgcolor[$row],">",result_info_fail("#FF0000",$pass[0],$warn[0],$fail[0],$missing[0]),"</td>\n",
"</tr>\n";
$row = ($row+1)%2;
foreach my $c (1..($count-1))
{
print
"<tr>\n",
"<td valign=\"top\" ",$bgcolor[$row],">",$compiler[$c],"</td>\n",
"<td valign=\"top\" ",$bgcolor[$row],">",result_info_pass("#000000",$pass[$c],$warn[$c],$fail[$c],$missing[$c]),"</td>\n",
"<td valign=\"top\" ",$bgcolor[$row],">",result_info_fail("#FF0000",$pass[$c],$warn[$c],$fail[$c],$missing[$c]),"</td>\n",
"</tr>\n";
$row = ($row+1)%2;
}
print
"<tr>\n",
"<td colspan=\"7\"><hr size=\"1\" noshade></td>\n",
"</tr>\n";
}
}
print "</table>\n";

View file

@ -1,908 +0,0 @@
#!/usr/bin/python
# Copyright MetaCommunications, Inc. 2003-2007
# Copyright Redshift Software, Inc. 2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import glob
import optparse
import os
import os.path
import platform
import sys
import time
#~ Place holder for xsl_reports/util module
utils = None
repo_root = {
'anon' : 'http://svn.boost.org/svn/boost/',
'user' : 'https://svn.boost.org/svn/boost/'
}
repo_path = {
'trunk' : 'trunk',
'release' : 'branches/release',
'build' : 'trunk/tools/build/v2',
'jam' : 'trunk/tools/build/engine',
'regression' : 'trunk/tools/regression',
'boost-build.jam'
: 'trunk/boost-build.jam'
}
class runner:
def __init__(self,root):
commands = map(
lambda m: m[8:].replace('_','-'),
filter(
lambda m: m.startswith('command_'),
runner.__dict__.keys())
)
commands.sort()
commands = "commands: %s" % ', '.join(commands)
opt = optparse.OptionParser(
usage="%prog [options] [commands]",
description=commands)
#~ Base Options:
opt.add_option( '--runner',
help="runner ID (e.g. 'Metacomm')" )
opt.add_option( '--comment',
help="an HTML comment file to be inserted in the reports" )
opt.add_option( '--tag',
help="the tag for the results" )
opt.add_option( '--toolsets',
help="comma-separated list of toolsets to test with" )
opt.add_option( '--libraries',
help="comma separated list of libraries to test")
opt.add_option( '--incremental',
help="do incremental run (do not remove previous binaries)",
action='store_true' )
opt.add_option( '--timeout',
help="specifies the timeout, in minutes, for a single test run/compilation",
type='int' )
opt.add_option( '--bjam-options',
help="options to pass to the regression test" )
opt.add_option( '--bjam-toolset',
help="bootstrap toolset for 'bjam' executable" )
opt.add_option( '--pjl-toolset',
help="bootstrap toolset for 'process_jam_log' executable" )
opt.add_option( '--platform' )
#~ Source Options:
opt.add_option( '--user',
help="Boost SVN user ID" )
opt.add_option( '--local',
help="the name of the boost tarball" )
opt.add_option( '--force-update',
help="do an SVN update (if applicable) instead of a clean checkout, even when performing a full run",
action='store_true' )
opt.add_option( '--have-source',
help="do neither a tarball download nor an SVN update; used primarily for testing script changes",
action='store_true' )
#~ Connection Options:
opt.add_option( '--ftp',
help="FTP URL to upload results to." )
opt.add_option( '--proxy',
help="HTTP proxy server address and port (e.g.'http://www.someproxy.com:3128')" )
opt.add_option( '--ftp-proxy',
help="FTP proxy server (e.g. 'ftpproxy')" )
opt.add_option( '--dart-server',
help="the dart server to send results to" )
#~ Debug Options:
opt.add_option( '--debug-level',
help="debugging level; controls the amount of debugging output printed",
type='int' )
opt.add_option( '--send-bjam-log',
help="send full bjam log of the regression run",
action='store_true' )
opt.add_option( '--mail',
help="email address to send run notification to" )
opt.add_option( '--smtp-login',
help="STMP server address/login information, in the following form: <user>:<password>@<host>[:<port>]" )
opt.add_option( '--skip-tests',
help="do not run bjam; used for testing script changes",
action='store_true' )
#~ Defaults
self.runner = None
self.comment='comment.html'
self.tag='trunk'
self.toolsets=None
self.libraries=None
self.incremental=False
self.timeout=5
self.bjam_options=''
self.bjam_toolset=''
self.pjl_toolset=''
self.platform=self.platform_name()
self.user='anonymous'
self.local=None
self.force_update=False
self.have_source=False
self.ftp=None
self.proxy=None
self.ftp_proxy=None
self.dart_server=None
self.debug_level=0
self.send_bjam_log=False
self.mail=None
self.smtp_login=None
self.skip_tests=False
( _opt_, self.actions ) = opt.parse_args(None,self)
if not self.actions or self.actions == []:
self.actions = [ 'regression' ]
#~ Initialize option dependent values.
self.regression_root = root
self.boost_root = os.path.join( self.regression_root, 'boost' )
self.regression_results = os.path.join( self.regression_root, 'results' )
if self.pjl_toolset != 'python':
self.regression_log = os.path.join( self.regression_results, 'bjam.log' )
else:
self.regression_log = os.path.join( self.regression_results, 'bjam.xml' )
self.tools_bb_root = os.path.join( self.regression_root,'tools_bb' )
self.tools_bjam_root = os.path.join( self.regression_root,'tools_bjam' )
self.tools_regression_root = os.path.join( self.regression_root,'tools_regression' )
self.xsl_reports_dir = os.path.join( self.tools_regression_root, 'xsl_reports' )
self.timestamp_path = os.path.join( self.regression_root, 'timestamp' )
if sys.platform == 'win32':
self.patch_boost = 'patch_boost.bat'
self.bjam = { 'name' : 'bjam.exe' }
self.process_jam_log = { 'name' : 'process_jam_log.exe' }
elif sys.platform == 'cygwin':
self.patch_boost = 'patch_boost'
self.bjam = { 'name' : 'bjam.exe' }
self.process_jam_log = { 'name' : 'process_jam_log.exe' }
else:
self.patch_boost = 'patch_boost'
self.bjam = { 'name' : 'bjam' }
self.process_jam_log = { 'name' : 'process_jam_log' }
self.bjam = {
'name' : self.bjam['name'],
'build_cmd' : self.bjam_build_cmd,
'path' : os.path.join(self.regression_root,self.bjam['name']),
'source_dir' : self.tools_bjam_root,
'build_dir' : self.tools_bjam_root,
'build_args' : ''
}
self.process_jam_log = {
'name' : self.process_jam_log['name'],
'build_cmd' : self.bjam_cmd,
'path' : os.path.join(self.regression_root,self.process_jam_log['name']),
'source_dir' : os.path.join(self.tools_regression_root,'build'),
'build_dir' : os.path.join(self.tools_regression_root,'build'),
'build_args' : 'process_jam_log -d2'
}
if self.debug_level > 0:
self.log('Regression root = %s'%self.regression_root)
self.log('Boost root = %s'%self.boost_root)
self.log('Regression results = %s'%self.regression_results)
self.log('Regression log = %s'%self.regression_log)
self.log('BB root = %s'%self.tools_bb_root)
self.log('Bjam root = %s'%self.tools_bjam_root)
self.log('Tools root = %s'%self.tools_regression_root)
self.log('XSL reports dir = %s'%self.xsl_reports_dir)
self.log('Timestamp = %s'%self.timestamp_path)
self.log('Patch Boost script = %s'%self.patch_boost)
if self.libraries is not None:
self.libraries = self.libraries.split(",")
# Boost.Build depends on any having run
if "build" in self.libraries and "any" not in self.libraries:
self.libraries += ["any"]
self.bjam_options += ' "--limit-tests=' + \
"|".join(lib for lib in self.libraries if lib != "build") + '"'
self.main()
#~ The various commands that make up the testing sequence...
def command_cleanup(self,*args):
if not args or args == None or args == []: args = [ 'source', 'bin' ]
if 'source' in args:
self.log( 'Cleaning up "%s" directory ...' % self.boost_root )
self.rmtree( self.boost_root )
if 'bin' in args:
boost_bin_dir = os.path.join( self.boost_root, 'bin' )
self.log( 'Cleaning up "%s" directory ...' % boost_bin_dir )
self.rmtree( boost_bin_dir )
boost_binv2_dir = os.path.join( self.boost_root, 'bin.v2' )
self.log( 'Cleaning up "%s" directory ...' % boost_binv2_dir )
self.rmtree( boost_binv2_dir )
self.log( 'Cleaning up "%s" directory ...' % self.regression_results )
self.rmtree( self.regression_results )
def command_get_tools(self):
#~ Get Boost.Build v2...
self.log( 'Getting Boost.Build v2...' )
if self.user and self.user != '':
os.chdir( os.path.dirname(self.tools_bb_root) )
self.svn_command( 'co %s %s' % (
self.svn_repository_url(repo_path['build']),
os.path.basename(self.tools_bb_root) ) )
else:
self.retry( lambda: self.download_tarball(
os.path.basename(self.tools_bb_root)+".tar.bz2",
self.tarball_url(repo_path['build']) ) )
self.unpack_tarball(
self.tools_bb_root+".tar.bz2",
os.path.basename(self.tools_bb_root) )
#~ Get Boost.Jam...
self.log( 'Getting Boost.Jam...' )
if self.user and self.user != '':
os.chdir( os.path.dirname(self.tools_bjam_root) )
self.svn_command( 'co %s %s' % (
self.svn_repository_url(repo_path['jam']),
os.path.basename(self.tools_bjam_root) ) )
else:
self.retry( lambda: self.download_tarball(
os.path.basename(self.tools_bjam_root)+".tar.bz2",
self.tarball_url(repo_path['jam']) ) )
self.unpack_tarball(
self.tools_bjam_root+".tar.bz2",
os.path.basename(self.tools_bjam_root) )
#~ Get the regression tools and utilities...
self.log( 'Getting regression tools an utilities...' )
if self.user and self.user != '':
os.chdir( os.path.dirname(self.tools_regression_root) )
self.svn_command( 'co %s %s' % (
self.svn_repository_url(repo_path['regression']),
os.path.basename(self.tools_regression_root) ) )
else:
self.retry( lambda: self.download_tarball(
os.path.basename(self.tools_regression_root)+".tar.bz2",
self.tarball_url(repo_path['regression']) ) )
self.unpack_tarball(
self.tools_regression_root+".tar.bz2",
os.path.basename(self.tools_regression_root) )
#~ We get a boost-build.jam to make the tool build work even if there's
#~ and existing boost-build.jam above the testing root.
self.log( 'Getting boost-build.jam...' )
self.http_get(
self.svn_repository_url(repo_path['boost-build.jam']),
os.path.join( self.regression_root, 'boost-build.jam' ) )
def command_get_source(self):
self.refresh_timestamp()
self.log( 'Getting sources (%s)...' % self.timestamp() )
if self.user and self.user != '':
self.retry( self.svn_checkout )
else:
self.retry( self.get_tarball )
pass
def command_update_source(self):
if self.user and self.user != '' \
or os.path.exists( os.path.join( self.boost_root, '.svn' ) ):
open( self.timestamp_path, 'w' ).close()
self.log( 'Updating sources from SVN (%s)...' % self.timestamp() )
self.retry( self.svn_update )
else:
self.command_get_source( )
pass
def command_patch(self):
self.import_utils()
patch_boost_path = os.path.join( self.regression_root, self.patch_boost )
if os.path.exists( patch_boost_path ):
self.log( 'Found patch file "%s". Executing it.' % patch_boost_path )
os.chdir( self.regression_root )
utils.system( [ patch_boost_path ] )
pass
def command_setup(self):
self.command_patch()
self.build_if_needed(self.bjam,self.bjam_toolset)
if self.pjl_toolset != 'python':
self.build_if_needed(self.process_jam_log,self.pjl_toolset)
def command_test(self, *args):
if not args or args == None or args == []: args = [ "test", "process" ]
self.import_utils()
self.log( 'Making "%s" directory...' % self.regression_results )
utils.makedirs( self.regression_results )
results_libs = os.path.join( self.regression_results, 'libs' )
results_status = os.path.join( self.regression_results, 'status' )
if "clean" in args:
self.command_test_clean()
if "test" in args:
self.command_test_run()
self.command_test_boost_build()
if "process" in args:
if self.pjl_toolset != 'python':
self.command_test_process()
def command_test_clean(self):
results_libs = os.path.join( self.regression_results, 'libs' )
results_status = os.path.join( self.regression_results, 'status' )
self.rmtree( results_libs )
self.rmtree( results_status )
def command_test_run(self):
self.import_utils()
if self.pjl_toolset != 'python':
test_cmd = '%s -d2 preserve-test-targets=off --dump-tests %s "--build-dir=%s" >>"%s" 2>&1' % (
self.bjam_cmd( self.toolsets ),
self.bjam_options,
self.regression_results,
self.regression_log )
else:
test_cmd = '%s -d1 preserve-test-targets=off --dump-tests --verbose-test %s "--build-dir=%s" "--out-xml=%s"' % (
self.bjam_cmd( self.toolsets ),
self.bjam_options,
self.regression_results,
self.regression_log )
self.log( 'Starting tests (%s)...' % test_cmd )
cd = os.getcwd()
os.chdir( os.path.join( self.boost_root, 'status' ) )
utils.system( [ test_cmd ] )
os.chdir( cd )
def command_test_boost_build(self):
if self.libraries is not None and "build" not in self.libraries:
return
self.import_utils()
self.log( 'Running Boost.Build tests' )
# Find the true names of the toolsets used for testing
toolsets = os.listdir(os.path.join(self.regression_results,
"boost/bin.v2/libs/any/test/any_test.test"));
for t in toolsets:
d = os.path.join(self.regression_results, ("boost-build-%s" % (t)))
utils.makedirs (d)
fn = os.path.join(d, "test_log.xml")
cd = os.getcwd()
try:
os.chdir (os.path.join (self.boost_root, 'tools/build/test'));
bjam_path = os.path.dirname (self.tool_path( self.bjam ))
self.log( "Using bjam binary in '%s'" % (bjam_path))
os.putenv('PATH', bjam_path + os.pathsep + os.environ['PATH'])
utils.system ( [ '"%s" test_all.py --default-bjam --xml %s > %s' % (sys.executable, t, fn) ] )
finally:
os.chdir( cd )
def command_test_process(self):
self.import_utils()
self.log( 'Getting test case results out of "%s"...' % self.regression_log )
cd = os.getcwd()
os.chdir( os.path.join( self.boost_root, 'status' ) )
utils.checked_system( [
'"%s" "%s" <"%s"' % (
self.tool_path(self.process_jam_log),
self.regression_results,
self.regression_log )
] )
os.chdir( cd )
def command_collect_logs(self):
self.import_utils()
comment_path = os.path.join( self.regression_root, self.comment )
if not os.path.exists( comment_path ):
self.log( 'Comment file "%s" not found; creating default comment.' % comment_path )
f = open( comment_path, 'w' )
f.write( '<p>Tests are run on %s platform.</p>' % self.platform_name() )
f.close()
source = 'tarball'
revision = ''
svn_root_file = os.path.join( self.boost_root, '.svn' )
svn_info_file = os.path.join( self.boost_root, 'svn_info.txt' )
if os.path.exists( svn_root_file ):
source = 'SVN'
self.svn_command( 'info --xml "%s" >"%s"' % (self.boost_root,svn_info_file) )
if os.path.exists( svn_info_file ):
f = open( svn_info_file, 'r' )
svn_info = f.read()
f.close()
i = svn_info.find( 'Revision:' )
if i < 0: i = svn_info.find( 'revision=' ) # --xml format
if i >= 0:
i += 10
while svn_info[i] >= '0' and svn_info[i] <= '9':
revision += svn_info[i]
i += 1
if self.pjl_toolset != 'python':
from collect_and_upload_logs import collect_logs
if self.incremental:
run_type = 'incremental'
else:
run_type = 'full'
collect_logs(
self.regression_results,
self.runner, self.tag, self.platform, comment_path,
self.timestamp_path,
self.user,
source, run_type,
self.dart_server, self.proxy,
revision )
else:
from process_jam_log import BJamLog2Results
if self.incremental:
run_type = '--incremental'
else:
run_type = ''
BJamLog2Results([
'--output='+os.path.join(self.regression_results,self.runner+'.xml'),
'--runner='+self.runner,
'--comment='+comment_path,
'--tag='+self.tag,
'--platform='+self.platform,
'--source='+source,
'--revision='+revision,
run_type,
self.regression_log
])
self.compress_file(
os.path.join(self.regression_results,self.runner+'.xml'),
os.path.join(self.regression_results,self.runner+'.zip')
)
def command_upload_logs(self):
self.import_utils()
from collect_and_upload_logs import upload_logs
if self.ftp:
self.retry(
lambda:
upload_logs(
self.regression_results,
self.runner, self.tag,
self.user,
self.ftp_proxy,
self.debug_level, self.send_bjam_log,
self.timestamp_path,
self.dart_server,
ftp_url = self.ftp )
)
else:
self.retry(
lambda:
upload_logs(
self.regression_results,
self.runner, self.tag,
self.user,
self.ftp_proxy,
self.debug_level, self.send_bjam_log,
self.timestamp_path,
self.dart_server )
)
def command_regression(self):
import socket
import string
try:
mail_subject = 'Boost regression for %s on %s' % ( self.tag,
string.split(socket.gethostname(), '.')[0] )
start_time = time.localtime()
if self.mail:
self.log( 'Sending start notification to "%s"' % self.mail )
self.send_mail(
'%s started at %s.' % ( mail_subject, format_time( start_time ) )
)
self.command_get_tools()
if self.local is not None:
self.log( 'Using local file "%s"' % self.local )
b = os.path.basename( self.local )
tag = b[ 0: b.find( '.' ) ]
self.log( 'Tag: "%s"' % tag )
self.unpack_tarball( self.local, self.boost_root )
elif self.have_source:
if not self.incremental: self.command_cleanup( 'bin' )
else:
if self.incremental or self.force_update:
if not self.incremental: self.command_cleanup( 'bin' )
else:
self.command_cleanup()
self.command_get_source()
self.command_setup()
# Not specifying --toolset in command line is not enough
# that would mean to use Boost.Build default ones
# We can skip test only we were explictly
# told to have no toolsets in command line "--toolset="
if self.toolsets != '': # --toolset=,
if not self.skip_tests:
self.command_test()
self.command_collect_logs()
self.command_upload_logs()
if self.mail:
self.log( 'Sending report to "%s"' % self.mail )
end_time = time.localtime()
self.send_mail(
'%s completed successfully at %s.' % ( mail_subject, format_time( end_time ) )
)
except:
if self.mail:
self.log( 'Sending report to "%s"' % self.mail )
traceback_ = '\n'.join( apply( traceback.format_exception, sys.exc_info() ) )
end_time = time.localtime()
self.send_mail(
'%s failed at %s.' % ( mail_subject, format_time( end_time ) ),
traceback_ )
raise
def command_show_revision(self):
modified = '$Date$'
revision = '$Revision$'
import re
re_keyword_value = re.compile( r'^\$\w+:\s+(.*)\s+\$$' )
print '\n\tRevision: %s' % re_keyword_value.match( revision ).group( 1 )
print '\tLast modified on: %s\n' % re_keyword_value.match( modified ).group( 1 )
#~ Utilities...
def main(self):
for action in self.actions:
action_m = "command_"+action.replace('-','_')
if hasattr(self,action_m):
getattr(self,action_m)()
def platform_name(self):
# See http://article.gmane.org/gmane.comp.lib.boost.testing/933
if sys.platform == 'win32':
return 'Windows'
elif sys.platform == 'cygwin':
return 'Windows/Cygwin'
return platform.system()
def log(self,message):
sys.stdout.flush()
sys.stderr.flush()
sys.stderr.write( '# %s\n' % message )
sys.stderr.flush()
def rmtree(self,path):
if os.path.exists( path ):
import shutil
#~ shutil.rmtree( unicode( path ) )
if sys.platform == 'win32':
os.system( 'del /f /s /q "%s" >nul 2>&1' % path )
shutil.rmtree( unicode( path ) )
else:
os.system( 'rm -f -r "%s"' % path )
def refresh_timestamp( self ):
if os.path.exists( self.timestamp_path ):
os.unlink( self.timestamp_path )
open( self.timestamp_path, 'w' ).close()
def timestamp( self ):
return time.strftime(
'%Y-%m-%dT%H:%M:%SZ',
time.gmtime( os.stat( self.timestamp_path ).st_mtime ) )
def retry( self, f, max_attempts=5, sleep_secs=10 ):
for attempts in range( max_attempts, -1, -1 ):
try:
return f()
except Exception, msg:
self.log( '%s failed with message "%s"' % ( f.__name__, msg ) )
if attempts == 0:
self.log( 'Giving up.' )
raise
self.log( 'Retrying (%d more attempts).' % attempts )
time.sleep( sleep_secs )
def http_get( self, source_url, destination_file ):
import urllib
proxies = None
if hasattr(self,'proxy') and self.proxy is not None:
proxies = { 'http' : self.proxy }
src = urllib.urlopen( source_url, proxies = proxies )
f = open( destination_file, 'wb' )
while True:
data = src.read( 16*1024 )
if len( data ) == 0: break
f.write( data )
f.close()
src.close()
def import_utils(self):
global utils
if utils is None:
sys.path.append( self.xsl_reports_dir )
import utils as utils_module
utils = utils_module
def build_if_needed( self, tool, toolset ):
self.import_utils()
if os.path.exists( tool[ 'path' ] ):
self.log( 'Found preinstalled "%s"; will use it.' % tool[ 'path' ] )
return
self.log( 'Preinstalled "%s" is not found; building one...' % tool[ 'path' ] )
if toolset is None:
if self.toolsets is not None:
toolset = string.split( self.toolsets, ',' )[0]
else:
toolset = tool[ 'default_toolset' ]
self.log( 'Warning: No bootstrap toolset for "%s" was specified.' % tool[ 'name' ] )
self.log( ' Using default toolset for the platform (%s).' % toolset )
if os.path.exists( tool[ 'source_dir' ] ):
self.log( 'Found "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] ) )
build_cmd = tool[ 'build_cmd' ]( toolset, tool['build_args'] )
self.log( 'Building "%s" (%s)...' % ( tool[ 'name'], build_cmd ) )
utils.system( [ 'cd "%s"' % tool[ 'source_dir' ], build_cmd ] )
else:
raise 'Could not find "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] )
if not tool.has_key( 'build_path' ):
tool[ 'build_path' ] = self.tool_path( tool )
if not os.path.exists( tool[ 'build_path' ] ):
raise 'Failed to find "%s" after build.' % tool[ 'build_path' ]
self.log( '%s succesfully built in "%s" location' % ( tool[ 'name' ], tool[ 'build_path' ] ) )
def tool_path( self, name_or_spec ):
if isinstance( name_or_spec, basestring ):
return os.path.join( self.regression_root, name_or_spec )
if os.path.exists( name_or_spec[ 'path' ] ):
return name_or_spec[ 'path' ]
if name_or_spec.has_key( 'build_path' ):
return name_or_spec[ 'build_path' ]
build_dir = name_or_spec[ 'build_dir' ]
self.log( 'Searching for "%s" in "%s"...' % ( name_or_spec[ 'name' ], build_dir ) )
for root, dirs, files in os.walk( build_dir ):
if name_or_spec[ 'name' ] in files:
return os.path.join( root, name_or_spec[ 'name' ] )
raise Exception( 'Cannot find "%s" in any of the following locations:\n%s' % (
name_or_spec[ 'name' ]
, '\n'.join( [ name_or_spec[ 'path' ], build_dir ] )
) )
def bjam_build_cmd( self, *rest ):
if sys.platform == 'win32':
cmd = 'build.bat %s' % self.bjam_toolset
else:
cmd = './build.sh %s' % self.bjam_toolset
env_setup_key = 'BJAM_ENVIRONMENT_SETUP'
if os.environ.has_key( env_setup_key ):
return '%s & %s' % ( os.environ[env_setup_key], cmd )
return cmd
def bjam_cmd( self, toolsets, args = '', *rest ):
build_path = self.regression_root
if build_path[-1] == '\\': build_path += '\\'
if self.timeout > 0:
args += ' -l%s' % (self.timeout*60)
cmd = '"%(bjam)s"' +\
' "-sBOOST_BUILD_PATH=%(bbpath)s"' +\
' "-sBOOST_ROOT=%(boost)s"' +\
' "--boost=%(boost)s"' +\
' "--boost-build=%(bb)s"' +\
' "--debug-configuration"' +\
' %(arg)s'
cmd %= {
'bjam' : self.tool_path( self.bjam ),
'bbpath' : os.pathsep.join([build_path,self.tools_bb_root]),
'bb' : self.tools_bb_root,
'boost' : self.boost_root,
'arg' : args }
if toolsets:
import string
cmd += ' ' + string.join(string.split( toolsets, ',' ), ' ' )
return cmd
def send_mail( self, subject, msg = '' ):
import smtplib
if not self.smtp_login:
server_name = 'mail.%s' % mail.split( '@' )[-1]
user_name = None
password = None
else:
server_name = self.smtp_login.split( '@' )[-1]
( user_name, password ) = string.split( self.smtp_login.split( '@' )[0], ':' )
log( ' Sending mail through "%s"...' % server_name )
smtp_server = smtplib.SMTP( server_name )
smtp_server.set_debuglevel( self.debug_level )
if user_name:
smtp_server.login( user_name, password )
smtp_server.sendmail( self.mail, [ self.mail ],
'Subject: %s\nTo: %s\n\n%s' % ( subject, self.mail, msg ) )
def compress_file( self, file_path, archive_path ):
self.import_utils()
utils.log( 'Compressing "%s"...' % file_path )
try:
import zipfile
z = zipfile.ZipFile( archive_path, 'w', zipfile.ZIP_DEFLATED )
z.write( file_path, os.path.basename( file_path ) )
z.close()
utils.log( 'Done writing "%s".'% archive_path )
except Exception, msg:
utils.log( 'Warning: Compressing falied (%s)' % msg )
utils.log( ' Trying to compress using a platform-specific tool...' )
try:
import zip_cmd
except ImportError:
script_dir = os.path.dirname( os.path.abspath( sys.argv[0] ) )
utils.log( 'Could not find \'zip_cmd\' module in the script directory (%s).' % script_dir )
raise Exception( 'Compressing failed!' )
else:
if os.path.exists( archive_path ):
os.unlink( archive_path )
utils.log( 'Removing stale "%s".' % archive_path )
zip_cmd.main( file_path, archive_path )
utils.log( 'Done compressing "%s".' % archive_path )
#~ Dowloading source, from SVN...
def svn_checkout( self ):
os.chdir( self.regression_root )
self.svn_command( 'co %s %s' % (self.svn_repository_url(self.tag),'boost') )
def svn_update( self ):
os.chdir( self.boost_root )
self.svn_command( 'update' )
def svn_command( self, command ):
svn_anonymous_command_line = 'svn --non-interactive %(command)s'
svn_command_line = 'svn --non-interactive --username=%(user)s %(command)s'
if not hasattr(self,'user') or self.user is None or self.user == 'anonymous':
cmd = svn_anonymous_command_line % { 'command': command }
else:
cmd = svn_command_line % { 'user': self.user, 'command': command }
self.log( 'Executing SVN command "%s"' % cmd )
rc = os.system( cmd )
if rc != 0:
raise Exception( 'SVN command "%s" failed with code %d' % ( cmd, rc ) )
def svn_repository_url( self, path ):
if self.user != 'anonymous' and self.user != '':
return '%s%s' % (repo_root['user'],path)
else:
return '%s%s' % (repo_root['anon'],path)
#~ Downloading and extracting source archives, from tarballs or zipballs...
def get_tarball( self, *args ):
if not args or args == []:
args = [ 'download', 'unpack' ]
tarball_path = None
if hasattr(self,'local') and self.local is not None:
tarball_path = self.local
elif 'download' in args:
tarball_path = self.download_tarball(self.boost_tarball_name(),self.boost_tarball_url())
if not tarball_path:
tarball_path = os.path.join( self.regression_root, self.boost_tarball_url() )
if 'unpack' in args:
self.unpack_tarball( tarball_path, self.boost_root )
pass
def download_tarball( self, tarball_name, tarball_url ):
tarball_path = os.path.join( self.regression_root, tarball_name )
self.log( 'Downloading "%s" to "%s"...' % ( tarball_url, os.path.dirname( tarball_path ) ) )
if os.path.exists( tarball_path ):
os.unlink( tarball_path )
self.http_get( tarball_url, tarball_path )
return tarball_path
def tarball_url( self, path ):
return 'http://beta.boost.org/development/snapshot.php/%s' % path
def boost_tarball_name( self ):
return 'boost-%s.tar.bz2' % self.tag.split( '/' )[-1]
def boost_tarball_url( self ):
return self.tarball_url( self.tag )
def unpack_tarball( self, tarball_path, target_path ):
self.log( 'Looking for old unpacked archives...' )
old_boost_dirs = self.find_boost_dirs( )
for old_boost_dir in old_boost_dirs:
if old_boost_dir != tarball_path:
self.log( 'Deleting old directory %s.' % old_boost_dir )
self.rmtree( old_boost_dir )
self.log( 'Unpacking boost tarball ("%s")...' % tarball_path )
tarball_name = os.path.basename( tarball_path )
extension = tarball_name[ tarball_name.find( '.' ) : ]
if extension in ( ".tar.gz", ".tar.bz2" ):
import tarfile
import stat
mode = os.path.splitext( extension )[1][1:]
tar = tarfile.open( tarball_path, 'r:%s' % mode )
for tarinfo in tar:
tar.extract( tarinfo, self.regression_root )
if sys.platform == 'win32' and not tarinfo.isdir():
# workaround what appears to be a Win32-specific bug in 'tarfile'
# (modification times for extracted files are not set properly)
f = os.path.join( self.regression_root, tarinfo.name )
os.chmod( f, stat.S_IWRITE )
os.utime( f, ( tarinfo.mtime, tarinfo.mtime ) )
tar.close()
elif extension in ( ".zip" ):
import zipfile
z = zipfile.ZipFile( tarball_path, 'r', zipfile.ZIP_DEFLATED )
for f in z.infolist():
destination_file_path = os.path.join( self.regression_root, f.filename )
if destination_file_path[-1] == "/": # directory
if not os.path.exists( destination_file_path ):
os.makedirs( destination_file_path )
else: # file
result = open( destination_file_path, 'wb' )
result.write( z.read( f.filename ) )
result.close()
z.close()
else:
raise 'Do not know how to unpack archives with extension \"%s\"' % extension
boost_dir = self.find_boost_dirs()[0]
self.log( ' Unpacked into directory "%s"' % boost_dir )
if os.path.exists( target_path ):
self.log( 'Deleting "%s" directory...' % target_path )
self.rmtree( target_path )
self.log( 'Renaming "%s" into "%s"' % ( boost_dir, target_path ) )
os.rename( boost_dir, target_path )
def find_boost_dirs( self ):
return [
x for x in
glob.glob( os.path.join( self.regression_root, 'boost[-_]*' ) )
if os.path.isdir( x )
]

View file

@ -1,60 +0,0 @@
#!/usr/bin/python
# Copyright Redshift Software, Inc. 2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import os.path
import shutil
import sys
import urllib
#~ Using --skip-script-download is useful to avoid repeated downloading of
#~ the regression scripts when doing the regression commands individually.
no_update_argument = "--skip-script-download"
no_update = no_update_argument in sys.argv
if no_update:
del sys.argv[sys.argv.index(no_update_argument)]
#~ The directory this file is in.
root = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
print '# Running regressions in %s...' % root
script_sources = [ 'collect_and_upload_logs.py', 'process_jam_log.py', 'regression.py' ]
script_local = os.path.join(root,'tools','regression','src')
script_remote = 'http://svn.boost.org/svn/boost/trunk/tools/regression/src'
script_dir = os.path.join(root,'tools_regression_src')
if not no_update:
#~ Bootstrap.
#~ * Clear out any old versions of the scripts
print '# Creating regression scripts at %s...' % script_dir
if os.path.exists(script_dir):
shutil.rmtree(script_dir)
os.mkdir(script_dir)
#~ * Get new scripts, either from local working copy, or from svn
if os.path.exists(script_local):
print '# Copying regression scripts from %s...' % script_local
for src in script_sources:
shutil.copyfile( os.path.join(script_local,src), os.path.join(script_dir,src) )
else:
print '# Dowloading regression scripts from %s...' % script_remote
proxy = None
for a in sys.argv[1:]:
if a.startswith('--proxy='):
proxy = {'http' : a.split('=')[1] }
print '--- %s' %(proxy['http'])
break
for src in script_sources:
urllib.FancyURLopener(proxy).retrieve(
'%s/%s' % (script_remote,src), os.path.join(script_dir,src) )
#~ * Make the scripts available to Python
sys.path.insert(0,os.path.join(root,'tools_regression_src'))
#~ Launch runner.
from regression import runner
runner(root)

View file

@ -1,197 +0,0 @@
#!/bin/sh
#
# Copyright John Maddock
# Copyright Rene Rivera
#
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
#
# shell script for running the boost regression test suite and generating
# a html table of results.
# Set the following variables to configure the operation. Variables you
# should set, i.e. usually required are listed first. Optional variables
# have reasonable defaults for most situations.
### THESE SHOULD BE CHANGED!
#
# "boost_root" points to the root of you boost installation:
# This can be either a non-exitent directory or an already complete Boost
# source tree.
#
boost_root="$HOME/CVSROOTs/Boost/boost_regression"
#
# Wether to fetch the most current Boost code from CVS (yes/no):
# There are two contexts to use this script in: on an active Boost CVS
# tree, and on a fresh Boost CVS tree. If "yes" is specified here an attempt
# to fetch the latest CVS Boost files is made. For an active Boost CVS
# the CVS connection information is used. If an empty tree is detected
# the code is fetched with the anonymous read only information.
#
cvs_update=no
#
# "test_tools" are the Boost.Build toolsets to use for building and running the
# regression tests. Specify a space separated list, of the Boost.Build toolsets.
# Each will be built and tested in sequence.
#
test_tools=gcc
#
# "toolset" is the Boost.Build toolset to use for building the helper programs.
# This is usually different than the toolsets one is testing. And this is
# normally a toolset that corresponds to the compiler built into your platform.
#
toolset=gcc
#
# "comment_path" is the path to an html-file describing the test environment.
# The content of this file will be embedded in the status pages being produced.
#
comment_path="$boost_root/../regression_comment.html"
#
# "test_dir" is the relative path to the directory to run the tests in,
# defaults to "status" and runs all the tests, but could be a sub-directory
# for example "libs/regex/test" to run the regex tests alone.
#
test_dir="status"
### DEFAULTS ARE OK FOR THESE.
#
# "exe_suffix" the suffix used by exectable files:
# In case your platform requires use of a special suffix for executables specify
# it here, including the "." if needed. This should not be needed even in Windows
# like platforms as they will execute without the suffix anyway.
#
exe_suffix=
#
# "bjam" points to your built bjam executable:
# The location of the binary for running bjam. The default should work
# under most circumstances.
#
bjam="$boost_root/tools/build/engine/bin/bjam$exe_suffix"
#
# "process_jam_log", and "compiler_status" paths to built helper programs:
# The location of the executables of the regression help programs. These
# are built locally so the default should work in most situations.
#
process_jam_log="$boost_root/dist/bin/process_jam_log$exe_suffix"
compiler_status="$boost_root/dist/bin/compiler_status$exe_suffix"
#
# "boost_build_path" can point to additional locations to find toolset files.
#
boost_build_path="$HOME/.boost-build"
### NO MORE CONFIGURABLE PARTS.
#
# Some setup.
#
boost_dir=`basename "$boost_root"`
if test -n "${BOOST_BUILD_PATH}" ; then
BOOST_BUILD_PATH="$boost_build_path:$BOOST_BUILD_PATH"
else
BOOST_BUILD_PATH="$boost_build_path"
fi
export BOOST_BUILD_PATH
#
# STEP 0:
#
# Get the source code:
#
if test ! -d "$boost_root" ; then
mkdir -p "$boost_root"
if test $? -ne 0 ; then
echo "creation of $boost_root directory failed."
exit 256
fi
fi
if test $cvs_update = yes ; then
echo fetching Boost:
echo "/1 :pserver:anonymous@cvs.sourceforge.net:2401/cvsroot/boost A" >> "$HOME/.cvspass"
cat "$HOME/.cvspass" | sort | uniq > "$HOME/.cvspass"
cd `dirname "$boost_root"`
if test -f boost/CVS/Root ; then
cvs -z3 -d `cat "$boost_dir/CVS/Root"` co -d "$boost_dir" boost
else
cvs -z3 -d :pserver:anonymous@cvs.sourceforge.net:2401/cvsroot/boost co -d "$boost_dir" boost
fi
fi
#
# STEP 1:
# rebuild bjam if required:
#
echo building bjam:
cd "$boost_root/tools/build/engine" && \
LOCATE_TARGET=bin sh ./build.sh
if test $? != 0 ; then
echo "bjam build failed."
exit 256
fi
#
# STEP 2:
# rebuild the regression test helper programs if required:
#
echo building regression test helper programs:
cd "$boost_root/tools/regression/build" && \
"$bjam" $toolset release
if test $? != 0 ; then
echo "helper program build failed."
exit 256
fi
#
# STEP 5:
# repeat steps 3 and 4 for each additional toolset:
#
for tool in $test_tools ; do
#
# STEP 3:
# run the regression tests:
#
echo running the $tool regression tests:
cd "$boost_root/$test_dir"
"$bjam" $tool --dump-tests 2>&1 | tee regress.log
#
# STEP 4:
# post process the results:
#
echo processing the regression test results for $tool:
cat regress.log | "$process_jam_log" --v2
if test $? != 0 ; then
echo "Failed regression log post processing."
exit 256
fi
done
#
# STEP 6:
# create the html table:
#
uname=`uname`
echo generating html tables:
"$compiler_status" --v2 --comment "$comment_path" "$boost_root" cs-$uname.html cs-$uname-links.html
if test $? != 0 ; then
echo "Failed HTML result table generation."
exit 256
fi
echo "done!"

View file

@ -1,197 +0,0 @@
# smoke test - every so many minutes, check svn revision, and if changed:
# update working copy, run tests, upload results
# Copyright Beman Dawes 2007
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# ---------------------------------------------------------------------------- #
import os
import sys
import platform
import time
import ftplib
# invoke the system command line processor
def cmd(command):
print "command:", command
os.system(command)
# update SVN working copy
def update_working_copy(boost_path):
os.chdir(boost_path)
cmd("svn update")
# get repository url
def repository_url(path, results_path):
url = ""
svn_info_file = results_path + "/svn_info.xml"
command = "svn info --xml " + path + " >" + svn_info_file
cmd(command)
f = open( svn_info_file, 'r' )
svn_info = f.read()
f.close()
i = svn_info.find('//svn.boost.org')
if i >= 0:
url = svn_info[i:svn_info.find("</url>")]
return url
# get revision number of a path, which may be a filesystem path or URL
def revision(path, results_path, test_name):
rev = 0
svn_info_file = results_path + "/" + test_name + "-svn_info.xml"
command = "svn info --xml " + path + " >" + svn_info_file
cmd(command)
f = open( svn_info_file, 'r' )
svn_info = f.read()
f.close()
i = svn_info.find( 'revision=' )
if i >= 0:
i += 10
while svn_info[i] >= '0' and svn_info[i] <= '9':
rev = rev*10 + int(svn_info[i])
i += 1
return rev
# run bjam in current directory
def bjam(boost_path, args, output_path, test_name):
# bjam seems to need BOOST_BUILD_PATH
#os.environ["BOOST_BUILD_PATH"]=boost_path + "/tools/build/v2"
print "Begin bjam..."
command = "bjam --v2 --dump-tests -l180"
if args != "": command += " " + args
command += " >" + output_path + "/" + test_name +"-bjam.log 2>&1"
cmd(command)
# run process_jam_log in current directory
def process_jam_log(boost_path, output_path, test_name):
print "Begin log processing..."
command = "process_jam_log " + boost_path + " <" +\
output_path + "/" + test_name +"-bjam.log"
cmd(command)
# run compiler_status in current directory
def compiler_status(boost_path, output_path, test_name):
print "Begin compiler status html creation... "
command = "compiler_status --v2 --ignore-pass --no-warn --locate-root " + boost_path + " " +\
boost_path + " " + output_path + "/" + test_name + "-results.html " +\
output_path + "/" + test_name + "-details.html "
cmd(command)
# upload results via ftp
def upload_to_ftp(results_path, test_name, ftp_url, user, psw, debug_level):
# to minimize the time web pages are not available, upload with temporary
# names and then rename to the permanent names
i = 0 # dummy variable
os.chdir(results_path)
tmp_results = "temp-" + test_name + "-results.html"
results = test_name + "-results.html"
tmp_details = "temp-" + test_name + "-details.html"
details = test_name + "-details.html"
print "Uploading results via ftp..."
ftp = ftplib.FTP( ftp_url, user, psw )
ftp.set_debuglevel( debug_level )
# ftp.cwd( site_path )
try: ftp.delete(tmp_results)
except: ++i
f = open( results, 'rb' )
ftp.storbinary( 'STOR %s' % tmp_results, f )
f.close()
try: ftp.delete(tmp_details)
except: ++i
f = open( details, 'rb' )
ftp.storbinary( 'STOR %s' % tmp_details, f )
f.close()
try: ftp.delete(results)
except: ++i
try: ftp.delete(details)
except: ++i
ftp.rename(tmp_results, results)
ftp.rename(tmp_details, details)
ftp.dir()
ftp.quit()
def commit_results(results_path, test_name, rev):
print "Commit results..."
cwd = os.getcwd()
os.chdir(results_path)
command = "svn commit --non-interactive -m "+'"'+str(rev)+'" '+test_name+"-results.html"
cmd(command)
os.chdir(cwd)
# ---------------------------------------------------------------------------- #
if len(sys.argv) < 7:
print "Invoke with: minutes boost-path test-name results-path ftp-url user psw [bjam-args]"
print " boost-path must be path for a boost svn working directory."
print " results-path must be path for a svn working directory where an"
print " svn commit test-name+'-results.html' is valid."
print "Warning: This program hangs or crashes on network failures."
exit()
minutes = int(sys.argv[1])
boost_path = sys.argv[2]
test_name = sys.argv[3]
results_path = sys.argv[4]
ftp_url = sys.argv[5]
user = sys.argv[6]
psw = sys.argv[7]
if len(sys.argv) > 8: bjam_args = sys.argv[8]
else: bjam_args = ""
os.chdir(boost_path) # convert possible relative path
boost_path = os.getcwd() # to absolute path
print "minutes is ", minutes
print "boost_path is ", boost_path
print "test_name is ", test_name
print "results_path is ", results_path
print "ftp_url is ", ftp_url
print "user is ", user
print "psw is ", psw
print 'bjam args are "' + bjam_args + '"'
url = repository_url(boost_path, results_path)
print "respository url is ", url
first = 1
while 1:
working_rev = revision(boost_path, results_path, test_name)
repos_rev = revision("http:" + url, results_path, test_name)
print "Working copy revision: ", working_rev, " repository revision: ", repos_rev
if first or working_rev != repos_rev:
first = 0
start_time = time.time()
print
print "start at", time.strftime("%H:%M:%S", time.localtime())
update_working_copy(boost_path)
os.chdir(boost_path+"/status")
bjam(boost_path, bjam_args, results_path, test_name)
process_jam_log(boost_path, results_path, test_name)
compiler_status(boost_path, results_path, test_name)
upload_to_ftp(results_path, test_name, ftp_url, user, psw, 0)
commit_results(results_path, test_name,revision(boost_path, results_path, test_name))
elapsed_time = time.time() - start_time
print elapsed_time/60.0, "minutes elapsed time"
print
print "sleep ", minutes, "minutes..."
time.sleep(60 * minutes)

View file

@ -1,27 +0,0 @@
# Copyright Misha Bergal 2006
#
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
test-suite testlib :
[ compile-fail compile-fail_fail.cpp ]
[ compile-fail compile-fail_pass.cpp ]
[ compile compile_fail.cpp ]
[ compile compile_pass.cpp ]
[ compile compile_warn.cpp ]
# The link test .cpp files were apparently never committed to the repository,
# and were lost.
# [ link link_fail.cpp ]
# [ link link_pass.cpp ]
# [ link-fail link-fail_fail.cpp ]
# [ link-fail link-fail_pass.cpp ]
[ run-fail run-fail_compile-fail.cpp ]
[ run-fail run-fail_fail-warn.cpp ]
[ run-fail run-fail_fail.cpp ]
[ run-fail run-fail_pass.cpp ]
[ run run_fail.cpp ]
[ run run_note.cpp ]
[ run run_pass.cpp ]
[ run run_warn-note.cpp ]
[ run run_warn.cpp ]
;

View file

@ -1,10 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
int main() { return 0; }

View file

@ -1,9 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
#error example of a compile failure

View file

@ -1,9 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
#error example of a compile failure

View file

@ -1,9 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
int main() { return 0; }

View file

@ -1,18 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
// provoke one or more compiler warnings
int main(int argc, char * argv[] )
{
short s;
unsigned long ul;
s = s & ul; // warning from many compilers
if ( s == ul ) {} // warning from GCC
return 0;
}

View file

@ -1,9 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
#error example of a compile failure

View file

@ -1,16 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
int main()
{
short s;
unsigned long ul;
s = s & ul; // warning from many compilers
if ( s == ul ) {} // warning from GCC
return 0;
}

View file

@ -1,12 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
int main()
{
return 0;
}

View file

@ -1,15 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
#include <iostream>
int main()
{
std::cout << "example of output from a run-time failure\n";
return 1;
}

View file

@ -1,20 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
#include <iostream>
int main()
{
short s;
unsigned long ul;
s = s & ul; // warning from many compilers
if ( s == ul ) {} // warning from GCC
std::cout << "example of output from a run-time failure\n";
return 1;
}

View file

@ -1,9 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
#error example of a compile failure

View file

@ -1,17 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
#include <iostream>
int main()
{
std::cout << "example of output before a <note> line\n";
std::cout << "<note>\n";
std::cout << "example of output after a <note> line\n";
return 1;
}

View file

@ -1,20 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
#include <iostream>
int main()
{
short s;
unsigned long ul;
s = s & ul; // warning from many compilers
if ( s == ul ) {} // warning from GCC
std::cout << "example of output from a run-time failure\n";
return 1;
}

View file

@ -1,14 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
#include <iostream>
int main()
{
return 1;
}

View file

@ -1,17 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
#include <iostream>
int main()
{
std::cout << "example of output before a <note> line\n";
std::cout << "<note>\n";
std::cout << "example of output after a <note> line\n";
return 0;
}

View file

@ -1,12 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
int main()
{
return 0;
}

View file

@ -1,24 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
#include <iostream>
int main()
{
std::cout << "example of output before a <note> line\n";
std::cout << "<note>\n";
std::cout << "example of output after a <note> line\n";
// provoke a compiler warning to make sure <note> takes priority over
// a warning, but neither is lost from status reporting links HTML.
short s;
unsigned long ul;
s = s & ul; // warning from many compilers
if ( s == ul ) {} // warning from GCC
return 0;
}

View file

@ -1,18 +0,0 @@
// (C) Copyright Beman Dawes 2003. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// Test naming convention: the portion of the name before the tilde ("~")
// identifies the bjam test type. The portion after the tilde
// identifies the correct result to be reported by compiler_status.
// provoke one or more compiler warnings
int main(int argc, char * argv[] )
{
short s;
unsigned long ul;
s = s & ul; // warning from many compilers
if ( s == ul ) {} // warning from GCC
return 0;
}

View file

@ -1,11 +0,0 @@
rule failure
{
}
actions failure
{
dir _
echo a
}
failure f ;

View file

@ -1,12 +0,0 @@
rule failure
{
}
actions failure
{
dir _
if errorlevel 1 exit %errorlevel%
echo a
}
failure f ;

View file

@ -1,9 +0,0 @@
project
: requirements
<library>/boost/filesystem//boost_filesystem
<define>BOOST_ALL_NO_LIB
;
test-suite "missing_dependencies" :
[ run test.cpp lib//<link>static ]
;

View file

@ -1,7 +0,0 @@
SOURCES =
lib ;
lib lib
:
$(SOURCES).cpp
;

View file

@ -1 +0,0 @@
int main() { return 0; }

View file

@ -1,36 +0,0 @@
boost-test(RUN) "statechart/DllTestNative" : "libs/statechart/test/TuTestMain.cpp"
boost-test(RUN) "statechart/DllTestNormal" : "libs/statechart/test/TuTestMain.cpp"
compile-c-c++ ..\..\..\bin.v2\libs\statechart\test\DllTestNormal.test\msvc-7.1\debug\threading-multi\TuTestMain.obj
TuTestMain.cpp
c:\Users\Misha\Stuff\boost\HEAD\boost\libs\statechart\test\TuTest.hpp(36) : warning C4275: non dll-interface class 'boost::statechart::event_base' used as base for dll-interface class 'boost::statechart::detail::rtti_policy::rtti_derived_type<MostDerived,Base>'
with
[
MostDerived=EvX,
Base=boost::statechart::event_base
]
..\..\..\boost\statechart\event_base.hpp(49) : see declaration of 'boost::statechart::event_base'
compile-c-c++ ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLibTuTest.obj
TuTest.cpp
c:\Users\Misha\Stuff\boost\HEAD\boost\libs\statechart\test\TuTest.hpp(36) : warning C4275: non dll-interface class 'boost::statechart::event_base' used as base for dll-interface class 'boost::statechart::detail::rtti_policy::rtti_derived_type<MostDerived,Base>'
with
[
MostDerived=EvX,
Base=boost::statechart::event_base
]
..\..\..\boost\statechart\event_base.hpp(49) : see declaration of 'boost::statechart::event_base'
msvc.link.dll ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib
Creating library ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib and object ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.exp
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" >nul
link /NOLOGO /INCREMENTAL:NO /DLL /DEBUG /subsystem:console /out:"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll" /IMPLIB:"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib" @"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.rsp"
if %errorlevel% 1 exit %errorlevel%
if exist "..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.manifest" (
mt -nologo -manifest "..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.manifest" "-outputresource:..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll;2"
)
...failed msvc.link.dll ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib...
...removing ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll
...removing ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib
...skipped <p..\..\..\bin.v2\libs\statechart\test\DllTestNormal.test\msvc-7.1\debug\threading-multi>DllTestNormal.exe for lack of <p..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi>DllTestNormalLib-vc71-mt-gd-1_35.lib...
...skipped <p..\..\..\bin.v2\libs\statechart\test\DllTestNormal.test\msvc-7.1\debug\threading-multi>DllTestNormal.run for lack of <p..\..\..\bin.v2\libs\statechart\test\DllTestNormal.test\msvc-7.1\debug\threading-multi>DllTestNormal.exe...

View file

@ -1,27 +0,0 @@
<test-log library="statechart" test-name="DllTestNormal" test-type="run" test-program="libs/statechart/test/TuTestMain.cpp" target-directory="bin.v2/libs/statechart/test/DllTestNormal.test/msvc-7.1/debug/threading-multi" toolset="msvc-7.1" show-run-output="false">
<lib result="fail" timestamp="">../../bin.v2/libs/statechart/test/msvc-7.1/debug/threading-multi</lib>
</test-log>
<test-log library="statechart" test-name="" test-type="" test-program="" target-directory="bin.v2/libs/statechart/test/msvc-7.1/debug/threading-multi" toolset="" show-run-output="true">
<compile result="succeed" timestamp="">
TuTest.cpp
c:\Users\Misha\Stuff\boost\HEAD\boost\libs\statechart\test\TuTest.hpp(36) : warning C4275: non dll-interface class 'boost::statechart::event_base' used as base for dll-interface class 'boost::statechart::detail::rtti_policy::rtti_derived_type&lt;MostDerived,Base&gt;'
with
[
MostDerived=EvX,
Base=boost::statechart::event_base
]
..\..\..\boost\statechart\event_base.hpp(49) : see declaration of 'boost::statechart::event_base'
</compile>
<link result="fail" timestamp="">
Creating library ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib and object ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.exp
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" &gt;nul
link /NOLOGO /INCREMENTAL:NO /DLL /DEBUG /subsystem:console /out:"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll" /IMPLIB:"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib" @"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.rsp"
if %errorlevel% 1 exit %errorlevel%
if exist "..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.manifest" (
mt -nologo -manifest "..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.manifest" "-outputresource:..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll;2"
)
</link>
</test-log>

View file

@ -1,325 +0,0 @@
locate-root "..\..\..\bin.v2"
C:\Users\Misha\Stuff\boost\HEAD\boost\tools\regression\test>C:\Users\Misha\Stuff\boost\HEAD\bin\..\boost\tools\jam\src\bin.ntx86\bjam.exe --dump-tests --v2 msvc-7.1 "-sBOOST_BUILD_PATH=C:\Users\Misha\Stuff\boost\HEAD\bin\.." "-sBOOST_ROOT="C:\Users\Misha\Stuff\boost\HEAD\bin\..\boost"
warning: Python location is not configured
warning: the Boost.Python library won't be built
Building Boost.Regex with the optional Unicode/ICU support disabled.
Please refer to the Boost.Regex documentation for more information
(and if you don't know what ICU is then you probably don't need it).
boost-test(RUN) "testlib/run~warn" : "tools/regression/test/run~warn.cpp"
boost-test(RUN) "testlib/run~warn-note" : "tools/regression/test/run~warn-note.cpp"
boost-test(RUN) "testlib/run~pass" : "tools/regression/test/run~pass.cpp"
boost-test(RUN) "testlib/run~note" : "tools/regression/test/run~note.cpp"
boost-test(RUN) "testlib/run~fail" : "tools/regression/test/run~fail.cpp"
boost-test(RUN_FAIL) "testlib/run-fail~pass" : "tools/regression/test/run-fail~pass.cpp"
boost-test(RUN_FAIL) "testlib/run-fail~fail" : "tools/regression/test/run-fail~fail.cpp"
boost-test(RUN_FAIL) "testlib/run-fail~fail-warn" : "tools/regression/test/run-fail~fail-warn.cpp"
boost-test(RUN_FAIL) "testlib/run-fail~compile-fail" : "tools/regression/test/run-fail~compile-fail.cpp"
boost-test(LINK_FAIL) "testlib/link-fail~pass" : "tools/regression/test/link-fail~pass.cpp"
boost-test(LINK_FAIL) "testlib/link-fail~fail" : "tools/regression/test/link-fail~fail.cpp"
boost-test(LINK) "testlib/link~pass" : "tools/regression/test/link~pass.cpp"
boost-test(LINK) "testlib/link~fail" : "tools/regression/test/link~fail.cpp"
boost-test(COMPILE) "testlib/compile~warn" : "tools/regression/test/compile~warn.cpp"
boost-test(COMPILE) "testlib/compile~pass" : "tools/regression/test/compile~pass.cpp"
boost-test(COMPILE) "testlib/compile~fail" : "tools/regression/test/compile~fail.cpp"
boost-test(COMPILE_FAIL) "testlib/compile-fail~pass" : "tools/regression/test/compile-fail~pass.cpp"
boost-test(COMPILE_FAIL) "testlib/compile-fail~fail" : "tools/regression/test/compile-fail~fail.cpp"
...found 210 targets...
...updating 157 targets...
MkDir1 ..\..\..\bin.v2\tools\regression\test
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~fail.obj
compile-fail~fail.cpp
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" >nul
cl /Zm800 -nologo @"..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~fail.obj.rsp"
...failed compile-c-c++ ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~fail.obj...
...removing ..\..\..\bin.v2\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~fail.obj
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~pass.obj
compile-fail~pass.cpp
compile-fail~pass.cpp(9) : fatal error C1189: #error : example of a compile failure
(failed-as-expected) ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~pass.obj
**passed** ..\..\..\bin.v2\tools\regression\test\compile-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~pass.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~fail.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~fail.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~fail.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~fail.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~fail.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\compile~fail.test\msvc-7.1\debug\link-static\threading-multi\compile~fail.obj
compile~fail.cpp
compile~fail.cpp(9) : fatal error C1189: #error : example of a compile failure
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" >nul
cl /Zm800 -nologo @"..\..\..\bin.v2\tools\regression\test\compile~fail.test\msvc-7.1\debug\link-static\threading-multi\compile~fail.obj.rsp"
...failed compile-c-c++ ..\..\..\bin.v2\tools\regression\test\compile~fail.test\msvc-7.1\debug\link-static\threading-multi\compile~fail.obj...
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~pass.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~pass.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~pass.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~pass.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~pass.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\compile~pass.test\msvc-7.1\debug\link-static\threading-multi\compile~pass.obj
compile~pass.cpp
**passed** ..\..\..\bin.v2\tools\regression\test\compile~pass.test\msvc-7.1\debug\link-static\threading-multi\compile~pass.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~warn.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~warn.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~warn.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~warn.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\compile~warn.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\compile~warn.test\msvc-7.1\debug\link-static\threading-multi\compile~warn.obj
compile~warn.cpp
compile~warn.cpp(15) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
c:\users\misha\stuff\boost\head\boost\tools\regression\test\compile~warn.cpp(15) : warning C4700: local variable 'ul' used without having been initialized
**passed** ..\..\..\bin.v2\tools\regression\test\compile~warn.test\msvc-7.1\debug\link-static\threading-multi\compile~warn.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\link~fail.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.obj
link~fail.cpp
msvc.link ..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe
link~fail.obj : error LNK2019: unresolved external symbol "int __cdecl f(void)" (?f@@YAHXZ) referenced in function _main
..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe : fatal error LNK1120: 1 unresolved externals
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" >nul
link /NOLOGO /INCREMENTAL:NO /DEBUG /subsystem:console /out:"..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe" @"..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe.rsp"
if errorlevel 1 exit %errorlevel%
if exist "..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe.manifest" (
mt -nologo -manifest "..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe.manifest" "-outputresource:..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe;1"
)
...failed msvc.link ..\..\..\bin.v2\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe...
MkDir1 ..\..\..\bin.v2\tools\regression\test\link~pass.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\link~pass.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\link~pass.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\link~pass.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\link~pass.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\link~pass.test\msvc-7.1\debug\link-static\threading-multi\link~pass.obj
link~pass.cpp
msvc.link ..\..\..\bin.v2\tools\regression\test\link~pass.test\msvc-7.1\debug\link-static\threading-multi\link~pass.exe
**passed** ..\..\..\bin.v2\tools\regression\test\link~pass.test\msvc-7.1\debug\link-static\threading-multi\link~pass.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.obj
link-fail~fail.cpp
msvc.link ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" >nul
link /NOLOGO /INCREMENTAL:NO /DEBUG /subsystem:console /out:"..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe" @"..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe.rsp"
if errorlevel 1 exit %errorlevel%
if exist "..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe.manifest" (
mt -nologo -manifest "..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe.manifest" "-outputresource:..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe;1"
)
...failed msvc.link ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe...
...removing ..\..\..\bin.v2\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe
MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\link-fail~pass.obj
link-fail~pass.cpp
msvc.link ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\link-fail~pass.exe
link-fail~pass.obj : error LNK2019: unresolved external symbol "int __cdecl f(void)" (?f@@YAHXZ) referenced in function _main
..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\link-fail~pass.exe : fatal error LNK1120: 1 unresolved externals
(failed-as-expected) ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\link-fail~pass.exe
**passed** ..\..\..\bin.v2\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\link-fail~pass.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~compile-fail.obj
run-fail~compile-fail.cpp
run-fail~compile-fail.cpp(9) : fatal error C1189: #error : example of a compile failure
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" >nul
cl /Zm800 -nologo @"..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~compile-fail.obj.rsp"
...failed compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~compile-fail.obj...
...skipped <p..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi>run-fail~compile-fail.exe for lack of <p..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi>run-fail~compile-fail.obj...
...skipped <p..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi>run-fail~compile-fail.run for lack of <p..\..\..\bin.v2\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi>run-fail~compile-fail.exe...
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.obj
run-fail~fail-warn.cpp
run-fail~fail-warn.cpp(13) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
c:\users\misha\stuff\boost\head\boost\tools\regression\test\run-fail~fail-warn.cpp(13) : warning C4700: local variable 'ul' used without having been initialized
msvc.link ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.exe
testing.capture-output ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.run
1 file(s) copied.
..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.exe > ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.output 2>&1
set status=%ERRORLEVEL%
echo. >> ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.output
echo EXIT STATUS: %status% >> ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.output
if %status% EQU 0 (
copy ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.output ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.run
)
set verbose=0
if %status% NEQ 0 (
set verbose=1
)
if %verbose% EQU 1 (
echo ====== BEGIN OUTPUT ======
type ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.output
echo ====== END OUTPUT ======
)
exit %status%
...failed testing.capture-output ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.run...
...removing ..\..\..\bin.v2\tools\regression\test\run-fail~fail-warn.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail-warn.run
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.obj
run-fail~fail.cpp
msvc.link ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.exe
testing.capture-output ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.run
1 file(s) copied.
..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.exe > ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.output 2>&1
set status=%ERRORLEVEL%
echo. >> ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.output
echo EXIT STATUS: %status% >> ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.output
if %status% EQU 0 (
copy ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.output ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.run
)
set verbose=0
if %status% NEQ 0 (
set verbose=1
)
if %verbose% EQU 1 (
echo ====== BEGIN OUTPUT ======
type ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.output
echo ====== END OUTPUT ======
)
exit %status%
...failed testing.capture-output ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.run...
...removing ..\..\..\bin.v2\tools\regression\test\run-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~fail.run
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\run-fail~pass.obj
run-fail~pass.cpp
msvc.link ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\run-fail~pass.exe
testing.capture-output ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\run-fail~pass.run
====== BEGIN OUTPUT ======
example of output from a run-time failure
EXIT STATUS: 1
====== END OUTPUT ======
del /f /q "..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\run-fail~pass.exe"
...failed RmTemps ..\..\..\bin.v2\tools\regression\test\run-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\run-fail~pass.run...
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~fail.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug
...on 100th target...
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.obj
run~fail.cpp
msvc.link ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.exe
testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.run
====== BEGIN OUTPUT ======
EXIT STATUS: 1
====== END OUTPUT ======
..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.exe > ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.output 2>&1
set status=%ERRORLEVEL%
echo. >> ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.output
echo EXIT STATUS: %status% >> ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.output
if %status% EQU 0 (
copy ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.output ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.run
)
set verbose=0
if %status% NEQ 0 (
set verbose=1
)
if %verbose% EQU 1 (
echo ====== BEGIN OUTPUT ======
type ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.output
echo ====== END OUTPUT ======
)
exit %status%
...failed testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~fail.test\msvc-7.1\debug\link-static\threading-multi\run~fail.run...
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~note.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1\debug\link-static\threading-multi\run~note.obj
run~note.cpp
msvc.link ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1\debug\link-static\threading-multi\run~note.exe
testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1\debug\link-static\threading-multi\run~note.run
1 file(s) copied.
**passed** ..\..\..\bin.v2\tools\regression\test\run~note.test\msvc-7.1\debug\link-static\threading-multi\run~note.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.obj
run~pass.cpp
msvc.link ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.exe
testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.run
1 file(s) copied.
**passed** ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn-note.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1\debug\link-static\threading-multi\run~warn-note.obj
run~warn-note.cpp
run~warn-note.cpp(21) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
c:\users\misha\stuff\boost\head\boost\tools\regression\test\run~warn-note.cpp(21) : warning C4700: local variable 'ul' used without having been initialized
msvc.link ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1\debug\link-static\threading-multi\run~warn-note.exe
testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1\debug\link-static\threading-multi\run~warn-note.run
1 file(s) copied.
**passed** ..\..\..\bin.v2\tools\regression\test\run~warn-note.test\msvc-7.1\debug\link-static\threading-multi\run~warn-note.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1\debug\link-static\threading-multi\run~warn.obj
run~warn.cpp
run~warn.cpp(15) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
c:\users\misha\stuff\boost\head\boost\tools\regression\test\run~warn.cpp(15) : warning C4700: local variable 'ul' used without having been initialized
msvc.link ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1\debug\link-static\threading-multi\run~warn.exe
testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1\debug\link-static\threading-multi\run~warn.run
1 file(s) copied.
**passed** ..\..\..\bin.v2\tools\regression\test\run~warn.test\msvc-7.1\debug\link-static\threading-multi\run~warn.test
...failed updating 9 targets...
...skipped 17 targets...
...updated 131 targets...

View file

@ -1,167 +0,0 @@
<test-log library="" test-name="compile-fail~fail" test-type="" test-program="" target-directory="tools/regression/test/compile-fail~fail.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="fail" timestamp="xxx">
compile-fail~fail.cpp
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" &gt;nul
cl /Zm800 -nologo @"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\compile-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~fail.obj.rsp"
</compile>
</test-log>
<test-log library="" test-name="compile-fail~pass" test-type="" test-program="" target-directory="tools/regression/test/compile-fail~pass.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
compile-fail~pass.cpp
compile-fail~pass.cpp(9) : fatal error C1189: #error : example of a compile failure
(failed-as-expected) C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\compile-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\compile-fail~pass.obj
</compile>
</test-log>
<test-log library="" test-name="compile~fail" test-type="" test-program="" target-directory="tools/regression/test/compile~fail.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="fail" timestamp="xxx">
compile~fail.cpp
compile~fail.cpp(9) : fatal error C1189: #error : example of a compile failure
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" &gt;nul
cl /Zm800 -nologo @"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\compile~fail.test\msvc-7.1\debug\link-static\threading-multi\compile~fail.obj.rsp"
</compile>
</test-log>
<test-log library="" test-name="compile~pass" test-type="" test-program="" target-directory="tools/regression/test/compile~pass.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
compile~pass.cpp
</compile>
</test-log>
<test-log library="" test-name="compile~warn" test-type="" test-program="" target-directory="tools/regression/test/compile~warn.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
compile~warn.cpp
compile~warn.cpp(15) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
c:\users\misha\stuff\boost\head\boost\tools\regression\test\compile~warn.cpp(15) : warning C4700: local variable 'ul' used without having been initialized
</compile>
</test-log>
<test-log library="" test-name="link-fail~fail" test-type="" test-program="" target-directory="tools/regression/test/link-fail~fail.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
link-fail~fail.cpp
</compile>
<link result="fail" timestamp="xxx">
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" &gt;nul
link /NOLOGO /INCREMENTAL:NO /DEBUG /subsystem:console /out:"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe" @"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe.rsp"
if errorlevel 1 exit %errorlevel%
if exist "C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe.manifest" (
mt -nologo -manifest "C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe.manifest" "-outputresource:C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link-fail~fail.test\msvc-7.1\debug\link-static\threading-multi\link-fail~fail.exe;1"
)
</link>
</test-log>
<test-log library="" test-name="link-fail~pass" test-type="" test-program="" target-directory="tools/regression/test/link-fail~pass.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
link-fail~pass.cpp
</compile>
<link result="succeed" timestamp="xxx">
link-fail~pass.obj : error LNK2019: unresolved external symbol "int __cdecl f(void)" (?f@@YAHXZ) referenced in function _main
C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\link-fail~pass.exe : fatal error LNK1120: 1 unresolved externals
(failed-as-expected) C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link-fail~pass.test\msvc-7.1\debug\link-static\threading-multi\link-fail~pass.exe
</link>
</test-log>
<test-log library="" test-name="link~fail" test-type="" test-program="" target-directory="tools/regression/test/link~fail.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
link~fail.cpp
</compile>
<link result="fail" timestamp="xxx">
link~fail.obj : error LNK2019: unresolved external symbol "int __cdecl f(void)" (?f@@YAHXZ) referenced in function _main
C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe : fatal error LNK1120: 1 unresolved externals
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" &gt;nul
link /NOLOGO /INCREMENTAL:NO /DEBUG /subsystem:console /out:"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe" @"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe.rsp"
if errorlevel 1 exit %errorlevel%
if exist "C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe.manifest" (
mt -nologo -manifest "C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe.manifest" "-outputresource:C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\link~fail.test\msvc-7.1\debug\link-static\threading-multi\link~fail.exe;1"
)
</link>
</test-log>
<test-log library="" test-name="link~pass" test-type="" test-program="" target-directory="tools/regression/test/link~pass.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
link~pass.cpp
</compile>
<link result="succeed" timestamp="xxx">
</link>
</test-log>
<test-log library="" test-name="run-fail~compile-fail" test-type="" test-program="" target-directory="tools/regression/test/run-fail~compile-fail.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="fail" timestamp="xxx">
run-fail~compile-fail.cpp
run-fail~compile-fail.cpp(9) : fatal error C1189: #error : example of a compile failure
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" &gt;nul
cl /Zm800 -nologo @"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\general\actual\tools\regression\test\run-fail~compile-fail.test\msvc-7.1\debug\link-static\threading-multi\run-fail~compile-fail.obj.rsp"
</compile>
</test-log>
<test-log library="" test-name="run-fail~fail-warn" test-type="" test-program="" target-directory="tools/regression/test/run-fail~fail-warn.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
run-fail~fail-warn.cpp
run-fail~fail-warn.cpp(13) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
c:\users\misha\stuff\boost\head\boost\tools\regression\test\run-fail~fail-warn.cpp(13) : warning C4700: local variable 'ul' used without having been initialized
</compile>
<link result="succeed" timestamp="xxx"></link>
<run result="fail" timestamp="xxx">
</run>
</test-log>
<test-log library="" test-name="run-fail~fail" test-type="" test-program="" target-directory="tools/regression/test/run-fail~fail.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
run-fail~fail.cpp
</compile>
<link result="succeed" timestamp="xxx"></link>
<run result="fail" timestamp="xxx">
</run>
</test-log>
<test-log library="" test-name="run-fail~pass" test-type="" test-program="" target-directory="tools/regression/test/run-fail~pass.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
run-fail~pass.cpp
</compile>
<link result="succeed" timestamp="xxx"></link>
<run result="succeed" timestamp="xxx">
</run>
</test-log>
<test-log library="" test-name="run~fail" test-type="" test-program="" target-directory="tools/regression/test/run~fail.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
run~fail.cpp
</compile>
<link result="succeed" timestamp="xxx"></link>
<run result="fail" timestamp="xxx">
</run>
</test-log>
<test-log library="" test-name="run~note" test-type="" test-program="" target-directory="tools/regression/test/run~note.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
run~note.cpp
</compile>
<link result="succeed" timestamp="xxx"></link>
<run result="succeed" timestamp="xxx">
</run>
</test-log>
<test-log library="" test-name="run~pass" test-type="" test-program="" target-directory="tools/regression/test/run~pass.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
run~pass.cpp
</compile>
<link result="succeed" timestamp="xxx"></link>
<run result="succeed" timestamp="xxx">
</run>
</test-log>
<test-log library="" test-name="run~warn-note" test-type="" test-program="" target-directory="tools/regression/test/run~warn-note.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
run~warn-note.cpp
run~warn-note.cpp(21) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
c:\users\misha\stuff\boost\head\boost\tools\regression\test\run~warn-note.cpp(21) : warning C4700: local variable 'ul' used without having been initialized
</compile>
<link result="succeed" timestamp="xxx"></link>
<run result="succeed" timestamp="xxx">
</run>
</test-log>
<test-log library="" test-name="run~warn" test-type="" test-program="" target-directory="tools/regression/test/run~warn.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="succeed" timestamp="xxx">
run~warn.cpp
run~warn.cpp(15) : warning C4244: '=' : conversion from 'unsigned long' to 'short', possible loss of data
c:\users\misha\stuff\boost\head\boost\tools\regression\test\run~warn.cpp(15) : warning C4700: local variable 'ul' used without having been initialized
</compile>
<link result="succeed" timestamp="xxx"></link>
<run result="succeed" timestamp="xxx">
</run>
</test-log>

View file

@ -1,33 +0,0 @@
locate-root "..\..\..\bin.v2"
C:\Users\Misha\Stuff\boost\HEAD\boost\tools\regression\test>C:\Users\Misha\Stuff\boost\HEAD\bin\..\boost\tools\jam\src\bin.ntx86\bjam.exe --dump-tests --v2 msvc-7.1 "-sBOOST_BUILD_PATH=C:\Users\Misha\Stuff\boost\HEAD\bin\.." "-sBOOST_ROOT="C:\Users\Misha\Stuff\boost\HEAD\bin\..\boost"
boost-test(RUN) "testlib/run~pass" : "tools/regression/test/run~pass.cpp"
boost-test(RUN) "testlib/run~pass" : "tools/regression/test/run~pass2s.cpp"
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.obj
run~pass.cpp
msvc.link ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.exe
testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.run
1 file(s) copied.
**passed** ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.obj
run~pass2.cpp
msvc.link ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe
testing.capture-output ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.run
1 file(s) copied.
**passed** ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.test
...failed updating 9 targets...
...skipped 17 targets...
...updated 131 targets...

View file

@ -1,38 +0,0 @@
locate-root "..\..\..\bin.v2"
C:\Users\Misha\Stuff\boost\HEAD\boost\tools\regression\test>C:\Users\Misha\Stuff\boost\HEAD\bin\..\boost\tools\jam\src\bin.ntx86\bjam.exe --dump-tests --v2 msvc-7.1 "-sBOOST_BUILD_PATH=C:\Users\Misha\Stuff\boost\HEAD\bin\.." "-sBOOST_ROOT="C:\Users\Misha\Stuff\boost\HEAD\bin\..\boost"
boost-test(RUN) "testlib/run~pass" : "tools/regression/test/run~pass.cpp"
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.obj
run~pass.cpp
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" >nul
cl /Zm800 -nologo @"..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.obj.rsp"
...failed compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.obj...
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static
MkDir1 ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi
compile-c-c++ ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.obj
run~pass2.cpp
msvc.link ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" >nul
link /NOLOGO /INCREMENTAL:NO /DEBUG /subsystem:console /out:"..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe" @"..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe.rsp"
if errorlevel 1 exit %errorlevel%
if exist "..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe.manifest" (
mt -nologo -manifest "..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe.manifest" "-outputresource:..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe;1"
)
...failed msvc.link ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe...
...removing ..\..\..\bin.v2\tools\regression\test\run~pass2.test\msvc-7.1\debug\link-static\threading-multi\run~pass2.exe
...failed updating 9 targets...
...skipped 17 targets...
...updated 131 targets...

View file

@ -1,9 +0,0 @@
<test-log library="" test-name="run~pass" test-type="" test-program="" target-directory="tools/regression/test/run~pass.test/msvc-7.1/debug/link-static/threading-multi" toolset="msvc-7.1" show-run-output="false">
<compile result="fail" timestamp="xxx">
run~pass.cpp
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" &gt;nul
cl /Zm800 -nologo @"C:\users\Misha\Stuff\boost\boost\tools\regression\test\test-cases\incremental\actual\tools\regression\test\run~pass.test\msvc-7.1\debug\link-static\threading-multi\run~pass.obj.rsp"
</compile>
</test-log>

View file

@ -1,15 +0,0 @@
rem Copyright Beman Dawes 2005
rem Distributed under the Boost Software License, Version 1.0.
rem See http://www.boost.org/LICENSE_1_0.txt
set TEST_LOCATE_ROOT=%TEMP%
echo Begin test processing...
bjam --dump-tests "-sALL_LOCATE_TARGET=%TEST_LOCATE_ROOT%" %* >bjam.log 2>&1
echo Begin log processing...
process_jam_log %TEST_LOCATE_ROOT% <bjam.log
start bjam.log
echo Begin compiler status processing...
compiler_status --locate-root %TEST_LOCATE_ROOT% %BOOST_ROOT% test_status.html test_links.html
start test_status.html

View file

@ -1,181 +0,0 @@
# Copyright (c) MetaCommunications, Inc. 2003-2005
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import difflib
import os
import re
import shutil
import string
import sys
def scan_for_test_cases():
return [ os.path.join( "test-cases", x ) for x in os.listdir( "test-cases" ) if x != "CVS" ]
def clean_dir( dir ):
if os.path.exists( dir ):
shutil.rmtree( dir )
os.makedirs( dir )
def system( commands ):
if sys.platform == 'win32':
f = open( 'tmp.cmd', 'w' )
f.write( string.join( commands, '\n' ) )
f.close()
rc = os.system( 'tmp.cmd' )
os.unlink( 'tmp.cmd' )
return rc
else:
rc = os.system( '&&'.join( commands ) )
return rc
def checked_system( commands, valid_return_codes = [ 0 ] ):
rc = system( commands )
if rc not in [ 0 ] + valid_return_codes:
raise Exception( 'Command sequence "%s" failed with return code %d' % ( commands, rc ) )
return rc
def list_recursively( dir ):
r = []
for root, dirs, files in os.walk( dir, topdown=False ):
root = root[ len( dir ) + 1 : ]
r.extend( [ os.path.join( root, x ) for x in dirs ] )
r.extend( [ os.path.join( root, x ) for x in files ] )
return r
def find_process_jam_log():
root = "../../../"
for root, dirs, files in os.walk( os.path.join( root, "bin.v2" ), topdown=False ):
if "process_jam_log.exe" in files:
return os.path.abspath( os.path.normpath( os.path.join( root, "process_jam_log.exe" ) ) )
if "process_jam_log" in files:
return os.path.abspath( os.path.normpath( os.path.join( root, "process_jam_log" ) ) )
return None
def process_jam_log( executable, file, locate_root, results_dir ):
args = []
args.append( executable )
# args.append( '--echo' )
args.append( '--create-directories' )
args.append( '--v2' )
args.append( locate_root )
args.append( '<' )
args.append( file )
cmd = " ".join( args )
print "Running process_jam_log (%s)" % cmd
checked_system( [ cmd ] )
def read_file( file_path ):
f = open( file_path )
try:
return f.read()
finally:
f.close()
def remove_timestamps( log_lines ):
return [ re.sub( "timestamp=\"[^\"]+\"", "timestamp=\"\"", x ) for x in log_lines ]
def determine_locate_root( bjam_log ):
locate_root = None
f = open( 'bjam.log' )
try:
locate_root_re = re.compile( r'locate-root\s+"(.*)"' )
for l in f.readlines():
m = locate_root_re.match( l )
if m:
locate_root = m.group(1)
break
finally:
f.close()
return locate_root
def read_file( path ):
f = open( path )
try:
return f.read()
finally:
f.close()
def read_file_lines( path ):
f = open( path )
try:
return f.readlines()
finally:
f.close()
def write_file( path, content ):
f = open( path, 'w' )
try:
return f.write( content )
finally:
f.close()
def write_file_lines( path, content ):
f = open( path, 'w' )
try:
return f.writelines( content )
finally:
f.close()
def run_test_cases( test_cases ):
process_jam_log_executable = find_process_jam_log()
print 'Found process_jam_log: %s' % process_jam_log_executable
initial_dir = os.getcwd()
for test_case in test_cases:
os.chdir( initial_dir )
print 'Running test case "%s"' % test_case
os.chdir( test_case )
if os.path.exists( "expected" ):
locate_root = determine_locate_root( 'bjam.log' )
print 'locate_root: %s' % locate_root
actual_results_dir = os.path.join( test_case, "actual" )
clean_dir( "actual" )
os.chdir( "actual" )
root = os.getcwd()
i = 0
while 1:
if i == 0:
bjam_log_file = 'bjam.log'
else:
bjam_log_file = 'bjam.log.%0d' % i
i += 1
print 'Looking for %s' % bjam_log_file
if not os.path.exists( os.path.join( '..', bjam_log_file ) ):
print ' does not exists'
break
print ' found'
write_file_lines(bjam_log_file.replace( 'bjam', 'bjam_' ),
[ x.replace( locate_root, root ) for x in read_file_lines( os.path.join( '..', bjam_log_file ) ) ] )
process_jam_log( executable = process_jam_log_executable
, results_dir = "."
, locate_root = root
, file=bjam_log_file.replace( 'bjam', 'bjam_' ) )
actual_content = list_recursively( "." )
actual_content.sort()
result_xml = []
for test_log in [ x for x in actual_content if os.path.splitext( x )[1] == '.xml' ]:
print 'reading %s' % test_log
result = [ re.sub( r'timestamp="(.*)"', 'timestamp="xxx"', x ) for x in read_file_lines( test_log ) ]
result_xml.extend( result )
write_file_lines( 'results.xml', result_xml )
os.chdir( '..' )
assert read_file( 'expected/results.xml' ) == read_file( 'actual/results.xml' )
os.chdir( '..' )
else:
raise ' Test case "%s" doesn\'t contain the expected results directory ("expected" )' % ( test_case )
run_test_cases( scan_for_test_cases() )
# print find_process_jam_log()

View file

@ -1,27 +0,0 @@
This folder keeps scripts the produce the Boost regression test tables.
The entry point is the boost_wide_report.py script. In the simplest
case, it should be run as:
python boost_wide_report.py
--locate-root=XXX
--results-dir=YYY
--tag trunk
--expected-results=XXX
--failures-markup=XXX
The 'trunk' is the tag of things that are tested, and should match the
directory name on the server keeping uploaded individual results.
'results-dir' is a directory where individual results (zip files) will
be downloaded, and then processed. expected-results and failures-markup
should be paths to corresponding files in 'status' subdir of boost tree.
locate-root should point at boost root, it's unclear if it of any use
now.
This will download and process *all* test results, but it will not
upload them, so good for local testing. It's possible to run
this command, interrupt it while it processes results, leave just
a few .zip files in result dir, and then re-run with --dont-collect-logs
option, to use downloaded zips only.

View file

@ -1,835 +0,0 @@
# Copyright (c) MetaCommunications, Inc. 2003-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import shutil
import codecs
import xml.sax.handler
import xml.sax.saxutils
import glob
import re
import os.path
import os
import string
import time
import sys
import ftplib
import utils
report_types = [ 'us', 'ds', 'ud', 'dd', 'l', 'p', 'i', 'n', 'ddr', 'dsr', 'udr', 'usr' ]
if __name__ == '__main__':
run_dir = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
else:
run_dir = os.path.abspath( os.path.dirname( sys.modules[ __name__ ].__file__ ) )
def map_path( path ):
return os.path.join( run_dir, path )
def xsl_path( xsl_file_name ):
return map_path( os.path.join( 'xsl/v2', xsl_file_name ) )
class file_info:
def __init__( self, file_name, file_size, file_date ):
self.name = file_name
self.size = file_size
self.date = file_date
def __repr__( self ):
return "name: %s, size: %s, date %s" % ( self.name, self.size, self.date )
#
# Find the mod time from unix format directory listing line
#
def get_date( words ):
date = words[ 5: -1 ]
t = time.localtime()
month_names = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ]
year = time.localtime()[0] # If year is not secified is it the current year
month = month_names.index( date[0] ) + 1
day = int( date[1] )
hours = 0
minutes = 0
if date[2].find( ":" ) != -1:
( hours, minutes ) = [ int(x) for x in date[2].split( ":" ) ]
else:
# there is no way to get seconds for not current year dates
year = int( date[2] )
return ( year, month, day, hours, minutes, 0, 0, 0, 0 )
def list_ftp( f ):
# f is an ftp object
utils.log( "listing source content" )
lines = []
# 1. get all lines
f.dir( lambda x: lines.append( x ) )
# 2. split lines into words
word_lines = [ x.split( None, 8 ) for x in lines ]
# we don't need directories
result = [ file_info( l[-1], None, get_date( l ) ) for l in word_lines if l[0][0] != "d" ]
for f in result:
utils.log( " %s" % f )
return result
def list_dir( dir ):
utils.log( "listing destination content %s" % dir )
result = []
for file_path in glob.glob( os.path.join( dir, "*.zip" ) ):
if os.path.isfile( file_path ):
mod_time = time.localtime( os.path.getmtime( file_path ) )
mod_time = ( mod_time[0], mod_time[1], mod_time[2], mod_time[3], mod_time[4], mod_time[5], 0, 0, mod_time[8] )
# no size (for now)
result.append( file_info( os.path.basename( file_path ), None, mod_time ) )
for fi in result:
utils.log( " %s" % fi )
return result
def find_by_name( d, name ):
for dd in d:
if dd.name == name:
return dd
return None
def diff( source_dir_content, destination_dir_content ):
utils.log( "Finding updated files" )
result = ( [], [] ) # ( changed_files, obsolete_files )
for source_file in source_dir_content:
found = find_by_name( destination_dir_content, source_file.name )
if found is None: result[0].append( source_file.name )
elif time.mktime( found.date ) != time.mktime( source_file.date ): result[0].append( source_file.name )
else:
pass
for destination_file in destination_dir_content:
found = find_by_name( source_dir_content, destination_file.name )
if found is None: result[1].append( destination_file.name )
utils.log( " Updated files:" )
for f in result[0]:
utils.log( " %s" % f )
utils.log( " Obsolete files:" )
for f in result[1]:
utils.log( " %s" % f )
return result
def _modtime_timestamp( file ):
return os.stat( file ).st_mtime
root_paths = []
def shorten( file_path ):
root_paths.sort( lambda x, y: cmp( len(y ), len( x ) ) )
for root in root_paths:
if file_path.lower().startswith( root.lower() ):
return file_path[ len( root ): ].replace( "\\", "/" )
return file_path.replace( "\\", "/" )
class action:
def __init__( self, file_path ):
self.file_path_ = file_path
self.relevant_paths_ = [ self.file_path_ ]
self.boost_paths_ = []
self.dependencies_ = []
self.other_results_ = []
def run( self ):
utils.log( "%s: run" % shorten( self.file_path_ ) )
__log__ = 2
for dependency in self.dependencies_:
if not os.path.exists( dependency ):
utils.log( "%s doesn't exists, removing target" % shorten( dependency ) )
self.clean()
return
if not os.path.exists( self.file_path_ ):
utils.log( "target doesn't exists, building" )
self.update()
return
dst_timestamp = _modtime_timestamp( self.file_path_ )
utils.log( " target: %s [%s]" % ( shorten( self.file_path_ ), dst_timestamp ) )
needs_updating = 0
utils.log( " dependencies:" )
for dependency in self.dependencies_:
dm = _modtime_timestamp( dependency )
update_mark = ""
if dm > dst_timestamp:
needs_updating = 1
utils.log( ' %s [%s] %s' % ( shorten( dependency ), dm, update_mark ) )
if needs_updating:
utils.log( "target needs updating, rebuilding" )
self.update()
return
else:
utils.log( "target is up-to-date" )
def clean( self ):
to_unlink = self.other_results_ + [ self.file_path_ ]
for result in to_unlink:
utils.log( ' Deleting obsolete "%s"' % shorten( result ) )
if os.path.exists( result ):
os.unlink( result )
class merge_xml_action( action ):
def __init__( self, source, destination, expected_results_file, failures_markup_file, tag ):
action.__init__( self, destination )
self.source_ = source
self.destination_ = destination
self.tag_ = tag
self.expected_results_file_ = expected_results_file
self.failures_markup_file_ = failures_markup_file
self.dependencies_.extend( [
self.source_
, self.expected_results_file_
, self.failures_markup_file_
]
)
self.relevant_paths_.extend( [ self.source_ ] )
self.boost_paths_.extend( [ self.expected_results_file_, self.failures_markup_file_ ] )
def update( self ):
def filter_xml( src, dest ):
class xmlgen( xml.sax.saxutils.XMLGenerator ):
def __init__( self, writer ):
xml.sax.saxutils.XMLGenerator.__init__( self, writer )
self.trimmed = 0
self.character_content = ""
def startElement( self, name, attrs):
self.flush()
xml.sax.saxutils.XMLGenerator.startElement( self, name, attrs )
def endElement( self, name ):
self.flush()
xml.sax.saxutils.XMLGenerator.endElement( self, name )
def flush( self ):
content = self.character_content
self.character_content = ""
self.trimmed = 0
xml.sax.saxutils.XMLGenerator.characters( self, content )
def characters( self, content ):
if not self.trimmed:
max_size = pow( 2, 16 )
self.character_content += content
if len( self.character_content ) > max_size:
self.character_content = self.character_content[ : max_size ] + "...\n\n[The content has been trimmed by the report system because it exceeds %d bytes]" % max_size
self.trimmed = 1
o = open( dest, "w" )
try:
gen = xmlgen( o )
xml.sax.parse( src, gen )
finally:
o.close()
return dest
utils.log( 'Merging "%s" with expected results...' % shorten( self.source_ ) )
try:
trimmed_source = filter_xml( self.source_, '%s-trimmed.xml' % os.path.splitext( self.source_ )[0] )
utils.libxslt(
utils.log
, trimmed_source
, xsl_path( 'add_expected_results.xsl' )
, self.file_path_
, {
"expected_results_file" : self.expected_results_file_
, "failures_markup_file": self.failures_markup_file_
, "source" : self.tag_
}
)
os.unlink( trimmed_source )
except Exception, msg:
utils.log( ' Skipping "%s" due to errors (%s)' % ( self.source_, msg ) )
if os.path.exists( self.file_path_ ):
os.unlink( self.file_path_ )
def _xml_timestamp( xml_path ):
class timestamp_reader( xml.sax.handler.ContentHandler ):
def startElement( self, name, attrs ):
if name == 'test-run':
self.timestamp = attrs.getValue( 'timestamp' )
raise self
try:
xml.sax.parse( xml_path, timestamp_reader() )
raise 'Cannot extract timestamp from "%s". Invalid XML file format?' % xml_path
except timestamp_reader, x:
return x.timestamp
class make_links_action( action ):
def __init__( self, source, destination, output_dir, tag, run_date, comment_file, failures_markup_file ):
action.__init__( self, destination )
self.dependencies_.append( source )
self.source_ = source
self.output_dir_ = output_dir
self.tag_ = tag
self.run_date_ = run_date
self.comment_file_ = comment_file
self.failures_markup_file_ = failures_markup_file
self.links_file_path_ = os.path.join( output_dir, 'links.html' )
def update( self ):
utils.makedirs( os.path.join( os.path.dirname( self.links_file_path_ ), "output" ) )
utils.makedirs( os.path.join( os.path.dirname( self.links_file_path_ ), "developer", "output" ) )
utils.makedirs( os.path.join( os.path.dirname( self.links_file_path_ ), "user", "output" ) )
utils.log( ' Making test output files...' )
try:
utils.libxslt(
utils.log
, self.source_
, xsl_path( 'links_page.xsl' )
, self.links_file_path_
, {
'source': self.tag_
, 'run_date': self.run_date_
, 'comment_file': self.comment_file_
, 'explicit_markup_file': self.failures_markup_file_
}
)
except Exception, msg:
utils.log( ' Skipping "%s" due to errors (%s)' % ( self.source_, msg ) )
open( self.file_path_, "w" ).close()
class unzip_action( action ):
def __init__( self, source, destination, unzip_func ):
action.__init__( self, destination )
self.dependencies_.append( source )
self.source_ = source
self.unzip_func_ = unzip_func
def update( self ):
try:
utils.log( ' Unzipping "%s" ... into "%s"' % ( shorten( self.source_ ), os.path.dirname( self.file_path_ ) ) )
self.unzip_func_( self.source_, os.path.dirname( self.file_path_ ) )
except Exception, msg:
utils.log( ' Skipping "%s" due to errors (%s)' % ( self.source_, msg ) )
def ftp_task( site, site_path , destination ):
__log__ = 1
utils.log( '' )
utils.log( 'ftp_task: "ftp://%s/%s" -> %s' % ( site, site_path, destination ) )
utils.log( ' logging on ftp site %s' % site )
f = ftplib.FTP( site )
f.login()
utils.log( ' cwd to "%s"' % site_path )
f.cwd( site_path )
source_content = list_ftp( f )
source_content = [ x for x in source_content if re.match( r'.+[.](?<!log[.])zip', x.name ) and x.name.lower() != 'boostbook.zip' ]
destination_content = list_dir( destination )
d = diff( source_content, destination_content )
def synchronize():
for source in d[0]:
utils.log( 'Copying "%s"' % source )
result = open( os.path.join( destination, source ), 'wb' )
f.retrbinary( 'RETR %s' % source, result.write )
result.close()
mod_date = find_by_name( source_content, source ).date
m = time.mktime( mod_date )
os.utime( os.path.join( destination, source ), ( m, m ) )
for obsolete in d[1]:
utils.log( 'Deleting "%s"' % obsolete )
os.unlink( os.path.join( destination, obsolete ) )
utils.log( " Synchronizing..." )
__log__ = 2
synchronize()
f.quit()
def unzip_archives_task( source_dir, processed_dir, unzip_func ):
utils.log( '' )
utils.log( 'unzip_archives_task: unpacking updated archives in "%s" into "%s"...' % ( source_dir, processed_dir ) )
__log__ = 1
target_files = [ os.path.join( processed_dir, os.path.basename( x.replace( ".zip", ".xml" ) ) ) for x in glob.glob( os.path.join( source_dir, "*.zip" ) ) ] + glob.glob( os.path.join( processed_dir, "*.xml" ) )
actions = [ unzip_action( os.path.join( source_dir, os.path.basename( x.replace( ".xml", ".zip" ) ) ), x, unzip_func ) for x in target_files ]
for a in actions:
a.run()
def merge_xmls_task( source_dir, processed_dir, merged_dir, expected_results_file, failures_markup_file, tag ):
utils.log( '' )
utils.log( 'merge_xmls_task: merging updated XMLs in "%s"...' % source_dir )
__log__ = 1
utils.makedirs( merged_dir )
target_files = [ os.path.join( merged_dir, os.path.basename( x ) ) for x in glob.glob( os.path.join( processed_dir, "*.xml" ) ) ] + glob.glob( os.path.join( merged_dir, "*.xml" ) )
actions = [ merge_xml_action( os.path.join( processed_dir, os.path.basename( x ) )
, x
, expected_results_file
, failures_markup_file
, tag ) for x in target_files ]
for a in actions:
a.run()
def make_links_task( input_dir, output_dir, tag, run_date, comment_file, extended_test_results, failures_markup_file ):
utils.log( '' )
utils.log( 'make_links_task: make output files for test results in "%s"...' % input_dir )
__log__ = 1
target_files = [ x + ".links" for x in glob.glob( os.path.join( input_dir, "*.xml" ) ) ] + glob.glob( os.path.join( input_dir, "*.links" ) )
actions = [ make_links_action( x.replace( ".links", "" )
, x
, output_dir
, tag
, run_date
, comment_file
, failures_markup_file
) for x in target_files ]
for a in actions:
a.run()
class xmlgen( xml.sax.saxutils.XMLGenerator ):
document_started = 0
def startDocument( self ):
if not self.document_started:
xml.sax.saxutils.XMLGenerator.startDocument( self )
self.document_started = 1
def merge_processed_test_runs( test_runs_dir, tag, writer ):
utils.log( '' )
utils.log( 'merge_processed_test_runs: merging processed test runs from %s into a single XML...' % test_runs_dir )
__log__ = 1
all_runs_xml = xmlgen( writer, encoding='utf-8' )
all_runs_xml.startDocument()
all_runs_xml.startElement( 'all-test-runs', {} )
files = glob.glob( os.path.join( test_runs_dir, '*.xml' ) )
for test_run in files:
#file_pos = writer.stream.tell()
file_pos = writer.tell()
try:
utils.log( ' Writing "%s" into the resulting XML...' % test_run )
xml.sax.parse( test_run, all_runs_xml )
except Exception, msg:
utils.log( ' Skipping "%s" due to errors (%s)' % ( test_run, msg ) )
#writer.stream.seek( file_pos )
#writer.stream.truncate()
writer.seek( file_pos )
writer.truncate()
all_runs_xml.endElement( 'all-test-runs' )
all_runs_xml.endDocument()
def execute_tasks(
tag
, user
, run_date
, comment_file
, results_dir
, output_dir
, reports
, warnings
, extended_test_results
, dont_collect_logs
, expected_results_file
, failures_markup_file
):
incoming_dir = os.path.join( results_dir, 'incoming', tag )
processed_dir = os.path.join( incoming_dir, 'processed' )
merged_dir = os.path.join( processed_dir, 'merged' )
if not os.path.exists( incoming_dir ):
os.makedirs( incoming_dir )
if not os.path.exists( processed_dir ):
os.makedirs( processed_dir )
if not os.path.exists( merged_dir ):
os.makedirs( merged_dir )
if not dont_collect_logs:
ftp_site = 'boost.cowic.de'
site_path = '/boost/do-not-publish-this-url/results/%s' % tag
ftp_task( ftp_site, site_path, incoming_dir )
unzip_archives_task( incoming_dir, processed_dir, utils.unzip )
merge_xmls_task( incoming_dir, processed_dir, merged_dir, expected_results_file, failures_markup_file, tag )
make_links_task( merged_dir
, output_dir
, tag
, run_date
, comment_file
, extended_test_results
, failures_markup_file )
results_xml_path = os.path.join( output_dir, 'extended_test_results.xml' )
#writer = codecs.open( results_xml_path, 'w', 'utf-8' )
writer = open( results_xml_path, 'w' )
merge_processed_test_runs( merged_dir, tag, writer )
writer.close()
make_result_pages(
extended_test_results
, expected_results_file
, failures_markup_file
, tag
, run_date
, comment_file
, output_dir
, reports
, warnings
)
def make_result_pages(
extended_test_results
, expected_results_file
, failures_markup_file
, tag
, run_date
, comment_file
, output_dir
, reports
, warnings
):
utils.log( 'Producing the reports...' )
__log__ = 1
warnings_text = '+'.join( warnings )
if comment_file != '':
comment_file = os.path.abspath( comment_file )
links = os.path.join( output_dir, 'links.html' )
utils.makedirs( os.path.join( output_dir, 'output' ) )
for mode in ( 'developer', 'user' ):
utils.makedirs( os.path.join( output_dir, mode , 'output' ) )
issues = os.path.join( output_dir, 'developer', 'issues.html' )
if 'i' in reports:
utils.log( ' Making issues list...' )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'issues_page.xsl' )
, issues
, {
'source': tag
, 'run_date': run_date
, 'warnings': warnings_text
, 'comment_file': comment_file
, 'expected_results_file': expected_results_file
, 'explicit_markup_file': failures_markup_file
, 'release': "yes"
}
)
for mode in ( 'developer', 'user' ):
if mode[0] + 'd' in reports:
utils.log( ' Making detailed %s report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'result_page.xsl' )
, os.path.join( output_dir, mode, 'index.html' )
, {
'links_file': 'links.html'
, 'mode': mode
, 'source': tag
, 'run_date': run_date
, 'warnings': warnings_text
, 'comment_file': comment_file
, 'expected_results_file': expected_results_file
, 'explicit_markup_file' : failures_markup_file
}
)
for mode in ( 'developer', 'user' ):
if mode[0] + 's' in reports:
utils.log( ' Making summary %s report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'summary_page.xsl' )
, os.path.join( output_dir, mode, 'summary.html' )
, {
'mode' : mode
, 'source': tag
, 'run_date': run_date
, 'warnings': warnings_text
, 'comment_file': comment_file
, 'explicit_markup_file' : failures_markup_file
}
)
for mode in ( 'developer', 'user' ):
if mode[0] + 'dr' in reports:
utils.log( ' Making detailed %s release report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'result_page.xsl' )
, os.path.join( output_dir, mode, 'index_release.html' )
, {
'links_file': 'links.html'
, 'mode': mode
, 'source': tag
, 'run_date': run_date
, 'warnings': warnings_text
, 'comment_file': comment_file
, 'expected_results_file': expected_results_file
, 'explicit_markup_file' : failures_markup_file
, 'release': "yes"
}
)
for mode in ( 'developer', 'user' ):
if mode[0] + 'sr' in reports:
utils.log( ' Making summary %s release report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'summary_page.xsl' )
, os.path.join( output_dir, mode, 'summary_release.html' )
, {
'mode' : mode
, 'source': tag
, 'run_date': run_date
, 'warnings': warnings_text
, 'comment_file': comment_file
, 'explicit_markup_file' : failures_markup_file
, 'release': 'yes'
}
)
if 'e' in reports:
utils.log( ' Generating expected_results ...' )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'produce_expected_results.xsl' )
, os.path.join( output_dir, 'expected_results.xml' )
)
if 'n' in reports:
utils.log( ' Making runner comment files...' )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'runners.xsl' )
, os.path.join( output_dir, 'runners.html' )
)
shutil.copyfile(
xsl_path( 'html/master.css' )
, os.path.join( output_dir, 'master.css' )
)
fix_file_names( output_dir )
def fix_file_names( dir ):
"""
The current version of xslproc doesn't correctly handle
spaces. We have to manually go through the
result set and decode encoded spaces (%20).
"""
utils.log( 'Fixing encoded file names...' )
for root, dirs, files in os.walk( dir ):
for file in files:
if file.find( "%20" ) > -1:
new_name = file.replace( "%20", " " )
utils.rename(
utils.log
, os.path.join( root, file )
, os.path.join( root, new_name )
)
def build_xsl_reports(
locate_root_dir
, tag
, expected_results_file
, failures_markup_file
, comment_file
, results_dir
, result_file_prefix
, dont_collect_logs = 0
, reports = report_types
, warnings = []
, user = None
, upload = False
):
( run_date ) = time.strftime( '%Y-%m-%dT%H:%M:%SZ', time.gmtime() )
root_paths.append( locate_root_dir )
root_paths.append( results_dir )
bin_boost_dir = os.path.join( locate_root_dir, 'bin', 'boost' )
output_dir = os.path.join( results_dir, result_file_prefix )
utils.makedirs( output_dir )
if expected_results_file != '':
expected_results_file = os.path.abspath( expected_results_file )
else:
expected_results_file = os.path.abspath( map_path( 'empty_expected_results.xml' ) )
extended_test_results = os.path.join( output_dir, 'extended_test_results.xml' )
execute_tasks(
tag
, user
, run_date
, comment_file
, results_dir
, output_dir
, reports
, warnings
, extended_test_results
, dont_collect_logs
, expected_results_file
, failures_markup_file
)
if upload:
upload_dir = 'regression-logs/'
utils.log( 'Uploading results into "%s" [connecting as %s]...' % ( upload_dir, user ) )
archive_name = '%s.tar.gz' % result_file_prefix
utils.tar(
os.path.join( results_dir, result_file_prefix )
, archive_name
)
utils.sourceforge.upload( os.path.join( results_dir, archive_name ), upload_dir, user )
utils.sourceforge.untar( os.path.join( upload_dir, archive_name ), user, background = True )
def accept_args( args ):
args_spec = [
'locate-root='
, 'tag='
, 'expected-results='
, 'failures-markup='
, 'comment='
, 'results-dir='
, 'results-prefix='
, 'dont-collect-logs'
, 'reports='
, 'user='
, 'upload'
, 'help'
]
options = {
'--comment': ''
, '--expected-results': ''
, '--failures-markup': ''
, '--reports': string.join( report_types, ',' )
, '--tag': None
, '--user': None
, 'upload': False
}
utils.accept_args( args_spec, args, options, usage )
if not options.has_key( '--results-dir' ):
options[ '--results-dir' ] = options[ '--locate-root' ]
if not options.has_key( '--results-prefix' ):
options[ '--results-prefix' ] = 'all'
return (
options[ '--locate-root' ]
, options[ '--tag' ]
, options[ '--expected-results' ]
, options[ '--failures-markup' ]
, options[ '--comment' ]
, options[ '--results-dir' ]
, options[ '--results-prefix' ]
, options.has_key( '--dont-collect-logs' )
, options[ '--reports' ].split( ',' )
, options[ '--user' ]
, options.has_key( '--upload' )
)
def usage():
print 'Usage: %s [options]' % os.path.basename( sys.argv[0] )
print '''
\t--locate-root the same as --locate-root in compiler_status
\t--tag the tag for the results (i.e. 'trunk')
\t--expected-results the file with the results to be compared with
\t the current run
\t--failures-markup the file with the failures markup
\t--comment an html comment file (will be inserted in the reports)
\t--results-dir the directory containing -links.html, -fail.html
\t files produced by compiler_status (by default the
\t same as specified in --locate-root)
\t--results-prefix the prefix of -links.html, -fail.html
\t files produced by compiler_status
\t--user SourceForge user name for a shell account
\t--upload upload reports to SourceForge
The following options are useful in debugging:
\t--dont-collect-logs dont collect the test logs
\t--reports produce only the specified reports
\t us - user summary
\t ds - developer summary
\t ud - user detailed
\t dd - developer detailed
\t l - links
\t p - patches
\t x - extended results file
\t i - issues
\t n - runner comment files
'''
def main():
build_xsl_reports( *accept_args( sys.argv[ 1 : ] ) )
if __name__ == '__main__':
main()

View file

@ -1,179 +0,0 @@
import ftplib
import optparse
import os
import time
import urlparse
import utils
import shutil
import sys
import zipfile
import xml.sax.saxutils
import utils.libxslt
def get_date( words ):
date = words[ 5: -1 ]
t = time.localtime()
month_names = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ]
year = time.localtime()[0] # If year is not secified is it the current year
month = month_names.index( date[0] ) + 1
day = int( date[1] )
hours = 0
minutes = 0
if date[2].find( ":" ) != -1:
( hours, minutes ) = [ int(x) for x in date[2].split( ":" ) ]
else:
# there is no way to get seconds for not current year dates
year = int( date[2] )
return ( year, month, day, hours, minutes, 0, 0, 0, 0 )
#def check_for_new_upload( target_dir, boostbook_info ):
def accept_args( args ):
parser = optparse.OptionParser()
parser.add_option( '-t', '--tag', dest='tag', help="the tag for the results (i.e. 'RC_1_34_0')" )
parser.add_option( '-d', '--destination', dest='destination', help='destination directory' )
if len(args) == 0:
parser.print_help()
sys.exit( 1 )
(options, args) = parser.parse_args()
if not options.destination:
print '-d is required'
parser.print_help()
sys.exit( 1 )
return options
def unzip( archive_path, result_dir ):
utils.log( 'Unpacking %s into %s' % ( archive_path, result_dir ) )
z = zipfile.ZipFile( archive_path, 'r', zipfile.ZIP_DEFLATED )
for f in z.infolist():
dir = os.path.join( result_dir, os.path.dirname( f.filename ) )
if not os.path.exists( dir ):
os.makedirs( dir )
result = open( os.path.join( result_dir, f.filename ), 'wb' )
result.write( z.read( f.filename ) )
result.close()
z.close()
def boostbook_report( options ):
site = 'fx.meta-comm.com'
site_path = '/boost-regression/%s' % options.tag
utils.log( 'Opening %s ...' % site )
f = ftplib.FTP( site )
f.login()
utils.log( ' cd %s ...' % site_path )
f.cwd( site_path )
utils.log( ' dir' )
lines = []
f.dir( lambda x: lines.append( x ) )
word_lines = [ x.split( None, 8 ) for x in lines ]
boostbook_info = [ ( l[-1], get_date( l ) ) for l in word_lines if l[-1] == "BoostBook.zip" ]
if len( boostbook_info ) > 0:
boostbook_info = boostbook_info[0]
utils.log( 'BoostBook found! (%s)' % ( boostbook_info, ) )
local_copy = os.path.join( options.destination,'BoostBook-%s.zip' % options.tag )
if 1:
if os.path.exists( local_copy ):
utils.log( 'Local copy exists. Checking if it is older than uploaded one...' )
uploaded_mtime = time.mktime( boostbook_info[1] )
local_mtime = os.path.getmtime( local_copy )
utils.log( ' uploaded: %s %s, local: %s %s' %
( uploaded_mtime
, boostbook_info[1]
, local_mtime
, time.localtime( local_mtime )) )
modtime = time.localtime( os.path.getmtime( local_copy ) )
if uploaded_mtime <= local_mtime:
utils.log( 'Local copy is newer: exiting' )
sys.exit()
if 1:
temp = os.path.join( options.destination,'BoostBook.zip' )
result = open( temp, 'wb' )
f.retrbinary( 'RETR %s' % boostbook_info[0], result.write )
result.close()
if os.path.exists( local_copy ):
os.unlink( local_copy )
os.rename( temp, local_copy )
m = time.mktime( boostbook_info[1] )
os.utime( local_copy, ( m, m ) )
docs_name = os.path.splitext( os.path.basename( local_copy ) )[0]
if 1:
unpacked_docs_dir = os.path.join( options.destination, docs_name )
utils.log( 'Dir %s ' % unpacked_docs_dir )
if os.path.exists( unpacked_docs_dir ):
utils.log( 'Cleaning up...' )
shutil.rmtree( unpacked_docs_dir )
os.makedirs( unpacked_docs_dir )
unzip( local_copy, unpacked_docs_dir )
utils.system( [ 'cd %s' % unpacked_docs_dir
, 'tar -c -f ../%s.tar.gz -z --exclude=tarball *' % docs_name ] )
process_boostbook_build_log( os.path.join( unpacked_docs_dir, 'boostbook.log' ), read_timestamp( unpacked_docs_dir ) )
utils.libxslt( log
, os.path.abspath( os.path.join( unpacked_docs_dir, 'boostbook.log.xml' ) )
, os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'xsl', 'v2', 'boostbook_log.xsl' ) )
, os.path.abspath( os.path.join( unpacked_docs_dir, 'boostbook.log.html' ) ) )
def log( msg ):
print msg
def process_boostbook_build_log( path, timestamp ):
f = open( path + '.xml', 'w' )
g = xml.sax.saxutils.XMLGenerator( f )
lines = open( path ).read().splitlines()
output_lines = []
result = 'success'
for line in lines:
type = 'output'
if line.startswith( '...failed' ):
type = 'failure'
result='failure'
if line.startswith( 'runtime error:' ):
type = 'failure'
if line.startswith( '...skipped' ):
type = 'skipped'
output_lines.append( ( type, line ) )
g.startDocument()
g.startElement( 'build', { 'result': result, 'timestamp': timestamp } )
for line in output_lines:
g.startElement( 'line', { 'type': line[0]} )
g.characters( line[1] )
g.endElement( 'line' )
g.endElement( 'build' )
g.endDocument()
def read_timestamp( docs_directory ):
f = open( os.path.join( docs_directory, 'timestamp' ) )
try:
return f.readline()
finally:
f.close()
def main():
options = accept_args( sys.argv[1:])
boostbook_report( options )
if __name__ == '__main__':
main()

View file

@ -1,146 +0,0 @@
#!/bin/sh
#~ Copyright Redshift Software, Inc. 2007-2008
#~ Distributed under the Boost Software License, Version 1.0.
#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
set -e
build_all()
{
update_tools ${1} ${2}
build_results ${1} ${2}
upload_results ${1} ${2}
}
update_tools()
{
cwd=`pwd`
cd boost
svn up
cd "${cwd}"
}
report_info()
{
cat - > comment.html <<HTML
<table style="border-spacing: 0.5em;">
<tr>
<td style="vertical-align: top;"><tt>uname</tt></td>
<td>
<pre style="border: 1px solid #666; overflow: auto;">
`uname -a`
</pre>
</td>
</tr>
<tr>
<td style="vertical-align: top;"><tt>uptime</tt></td>
<td>
<pre style="border: 1px solid #666; overflow: auto;">
`uptime`
</pre>
</td>
</tr>
<tr>
<td style="vertical-align: top;"><tt>vmstat</tt></td>
<td>
<pre style="border: 1px solid #666; overflow: auto;">
`vmstat`
</pre>
</td>
</tr>
<tr>
<td style="vertical-align: top;"><tt>xsltproc</tt></td>
<td>
<pre style="border: 1px solid #666; overflow: auto;">
`xsltproc --version`
</pre>
</td>
</tr>
<tr>
<td style="vertical-align: top;"><tt>python</tt></td>
<td>
<pre style="border: 1px solid #666; overflow: auto;">
`python --version 2>&1`
</pre>
</td>
</tr>
<tr>
<td style="vertical-align: top;">previous run</td>
<td>
<pre style="border: 1px solid #666; overflow: auto;">
`cat previous.txt`
</pre>
</td>
</tr>
<tr>
<td style="vertical-align: top;">current run</td>
<td>
<pre style="border: 1px solid #666; overflow: auto;">
`date -u`
</pre>
</td>
</tr>
</table>
HTML
date -u > previous.txt
}
build_results()
{
cwd=`pwd`
cd ${1}
root=`pwd`
boost=${cwd}/boost
case ${1} in
trunk)
tag=trunk
reports="dd,ds,i,n"
;;
release)
tag=branches/release
reports="dd,ds,i,n"
;;
release-1_35_0)
tag=tags/release/Boost_1_35_0
reports="dd,ud,ds,us,ddr,udr,dsr,usr,i,n,e"
;;
release-1_36_0)
tag=tags/release/Boost_1_36_0
reports="dd,ud,ds,us,ddr,udr,dsr,usr,i,n,e"
;;
esac
report_info
python "${boost}/tools/regression/xsl_reports/boost_wide_report.py" \
--locate-root="${root}" \
--tag=${tag} \
--expected-results="${boost}/status/expected_results.xml" \
--failures-markup="${boost}/status/explicit-failures-markup.xml" \
--comment="comment.html" \
--user="" \
--reports=${reports}
cd "${cwd}"
}
upload_results()
{
cwd=`pwd`
upload_dir=/home/grafik/www.boost.org/testing
cd ${1}/all
rm -f ../../${1}.zip*
#~ zip -q -r -9 ../../${1} * -x '*.xml'
7za a -tzip -mx=9 ../../${1}.zip * '-x!*.xml'
cd "${cwd}"
mv ${1}.zip ${1}.zip.uploading
rsync -vuz --rsh=ssh --stats \
${1}.zip.uploading grafik@beta.boost.org:/${upload_dir}/incoming/
ssh grafik@beta.boost.org \
cp ${upload_dir}/incoming/${1}.zip.uploading ${upload_dir}/live/${1}.zip
mv ${1}.zip.uploading ${1}.zip
}
build_all ${1} ${2}

View file

@ -1,840 +0,0 @@
#
# Copyright (C) 2005, 2007 The Trustees of Indiana University
# Author: Douglas Gregor
#
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#
import re
import smtplib
import os
import time
import string
import datetime
import sys
report_author = "Douglas Gregor <dgregor@osl.iu.edu>"
boost_dev_list = "Boost Developer List <boost@lists.boost.org>"
boost_testing_list = "Boost Testing List <boost-testing@lists.boost.org>"
def sorted_keys( dict ):
result = dict.keys()
result.sort()
return result
class Platform:
"""
All of the failures for a particular platform.
"""
def __init__(self, name):
self.name = name
self.failures = list()
self.maintainers = list()
return
def addFailure(self, failure):
self.failures.append(failure)
return
def isBroken(self):
return len(self.failures) > 300
def addMaintainer(self, maintainer):
"""
Add a new maintainer for this platform.
"""
self.maintainers.append(maintainer)
return
class Failure:
"""
A single test case failure in the report.
"""
def __init__(self, test, platform):
self.test = test
self.platform = platform
return
class Test:
"""
All of the failures for a single test name within a library.
"""
def __init__(self, library, name):
self.library = library
self.name = name
self.failures = list()
return
def addFailure(self, failure):
self.failures.append(failure)
return
def numFailures(self):
return len(self.failures)
def numReportableFailures(self):
"""
Returns the number of failures that we will report to the
maintainers of the library. This doesn't count failures on
broken platforms.
"""
count = 0
for failure in self.failures:
if not failure.platform.isBroken():
count += 1
pass
pass
return count
class Library:
"""
All of the information about the failures in a single library.
"""
def __init__(self, name):
self.name = name
self.maintainers = list()
self.tests = list()
return
def addTest(self, test):
"""
Add another test to the library.
"""
self.tests.append(test)
return
def addMaintainer(self, maintainer):
"""
Add a new maintainer for this library.
"""
self.maintainers.append(maintainer)
return
def numFailures(self):
count = 0
for test in self.tests:
count += test.numFailures()
pass
return count
def numReportableFailures(self):
count = 0
for test in self.tests:
count += test.numReportableFailures()
pass
return count
class Maintainer:
"""
Information about the maintainer of a library
"""
def __init__(self, name, email):
self.name = name
self.email = email
self.libraries = list()
return
def addLibrary(self, library):
self.libraries.append(library)
return
def composeEmail(self, report):
"""
Composes an e-mail to this maintainer with information about
the failures in his or her libraries, omitting those that come
from "broken" platforms. Returns the e-mail text if a message
needs to be sent, or None otherwise.
"""
# Determine if we need to send a message to this developer.
requires_message = False
for library in self.libraries:
if library.numReportableFailures() > 0:
requires_message = True
break
if not requires_message:
return None
# Build the message header
message = """From: Douglas Gregor <dgregor@osl.iu.edu>
To: """
message += self.name + ' <' + self.email + '>'
message += """
Reply-To: boost@lists.boost.org
Subject: Failures in your Boost libraries as of """
message += str(datetime.date.today()) + " [" + report.branch + "]"
message += """
You are receiving this report because one or more of the libraries you
maintain has regression test failures that are not accounted for.
A full version of the report is sent to the Boost developer's mailing
list.
Detailed report:
"""
message += ' ' + report.url + """
There are failures in these libraries you maintain:
"""
# List the libraries this maintainer is responsible for and
# the number of reportable failures in that library.
for library in self.libraries:
num_failures = library.numReportableFailures()
if num_failures > 0:
message += ' ' + library.name + ' (' + str(num_failures) + ')\n'
pass
pass
# Provide the details for the failures in each library.
for library in self.libraries:
if library.numReportableFailures() > 0:
message += '\n|' + library.name + '|\n'
for test in library.tests:
if test.numReportableFailures() > 0:
message += ' ' + test.name + ':'
for failure in test.failures:
if not failure.platform.isBroken():
message += ' ' + failure.platform.name
pass
pass
message += '\n'
pass
pass
pass
pass
return message
class PlatformMaintainer:
"""
Information about the platform maintainer of a library
"""
def __init__(self, name, email):
self.name = name
self.email = email
self.platforms = list()
return
def addPlatform(self, runner, platform):
self.platforms.append(platform)
return
def composeEmail(self, report):
"""
Composes an e-mail to this platform maintainer if one or more of
the platforms s/he maintains has a large number of failures.
Returns the e-mail text if a message needs to be sent, or None
otherwise.
"""
# Determine if we need to send a message to this developer.
requires_message = False
for platform in self.platforms:
if platform.isBroken():
requires_message = True
break
if not requires_message:
return None
# Build the message header
message = """From: Douglas Gregor <dgregor@osl.iu.edu>
To: """
message += self.name + ' <' + self.email + '>'
message += """
Reply-To: boost@lists.boost.org
Subject: Large number of Boost failures on a platform you maintain as of """
message += str(datetime.date.today()) + " [" + report.branch + "]"
message += """
You are receiving this report because one or more of the testing
platforms that you maintain has a large number of Boost failures that
are not accounted for. A full version of the report is sent to the
Boost developer's mailing list.
Detailed report:
"""
message += ' ' + report.url + """
The following platforms have a large number of failures:
"""
for platform in self.platforms:
if platform.isBroken():
message += (' ' + platform.name + ' ('
+ str(len(platform.failures)) + ' failures)\n')
return message
class Report:
"""
The complete report of all failing test cases.
"""
def __init__(self, branch = 'trunk'):
self.branch = branch
self.date = None
self.url = None
self.libraries = dict()
self.platforms = dict()
self.maintainers = dict()
self.platform_maintainers = dict()
return
def getPlatform(self, name):
"""
Retrieve the platform with the given name.
"""
if self.platforms.has_key(name):
return self.platforms[name]
else:
self.platforms[name] = Platform(name)
return self.platforms[name]
def getMaintainer(self, name, email):
"""
Retrieve the maintainer with the given name and e-mail address.
"""
if self.maintainers.has_key(name):
return self.maintainers[name]
else:
self.maintainers[name] = Maintainer(name, email)
return self.maintainers[name]
def getPlatformMaintainer(self, name, email):
"""
Retrieve the platform maintainer with the given name and
e-mail address.
"""
if self.platform_maintainers.has_key(name):
return self.platform_maintainers[name]
else:
self.platform_maintainers[name] = PlatformMaintainer(name, email)
return self.platform_maintainers[name]
def parseIssuesEmail(self):
"""
Try to parse the issues e-mail file. Returns True if everything was
successful, false otherwise.
"""
# See if we actually got the file
if not os.path.isfile('issues-email.txt'):
return False
# Determine the set of libraries that have unresolved failures
date_regex = re.compile('Report time: (.*)')
url_regex = re.compile(' (http://.*)')
library_regex = re.compile('\|(.*)\|')
failure_regex = re.compile(' ([^:]*): (.*)')
current_library = None
for line in file('issues-email.txt', 'r'):
# Check for the report time line
m = date_regex.match(line)
if m:
self.date = m.group(1)
continue
# Check for the detailed report URL
m = url_regex.match(line)
if m:
self.url = m.group(1)
continue
# Check for a library header
m = library_regex.match(line)
if m:
current_library = Library(m.group(1))
self.libraries[m.group(1)] = current_library
continue
# Check for a library test and its failures
m = failure_regex.match(line)
if m:
test = Test(current_library, m.group(1))
for platform_name in re.split('\s*', m.group(2)):
if platform_name != '':
platform = self.getPlatform(platform_name)
failure = Failure(test, platform)
test.addFailure(failure)
platform.addFailure(failure)
pass
current_library.addTest(test)
continue
pass
return True
def getIssuesEmail(self):
"""
Retrieve the issues email from beta.boost.org, trying a few
times in case something wonky is happening. If we can retrieve
the file, calls parseIssuesEmail and return True; otherwise,
return False.
"""
base_url = "http://beta.boost.org/development/tests/"
base_url += self.branch
base_url += "/developer/";
got_issues = False
# Ping the server by looking for an HTML file
print "Pinging the server to initiate extraction..."
ping_url = base_url + "issues.html"
os.system('curl -O ' + ping_url)
os.system('rm -f issues.html')
for x in range(30):
# Update issues-email.txt
url = base_url + "issues-email.txt"
print 'Retrieving issues email from ' + url
os.system('rm -f issues-email.txt')
os.system('curl -O ' + url)
if self.parseIssuesEmail():
return True
print 'Failed to fetch issues email. '
time.sleep (30)
return False
# Parses the file $BOOST_ROOT/libs/maintainers.txt to create a hash
# mapping from the library name to the list of maintainers.
def parseLibraryMaintainersFile(self):
"""
Parse the maintainers file in ../../../libs/maintainers.txt to
collect information about the maintainers of broken libraries.
"""
lib_maintainer_regex = re.compile('(\S+)\s*(.*)')
name_email_regex = re.compile('\s*(\w*(\s*\w+)+)\s*<\s*(\S*(\s*\S+)+)\S*>')
at_regex = re.compile('\s*-\s*at\s*-\s*')
for line in file('../../../libs/maintainers.txt', 'r'):
if line.startswith('#'):
continue
m = lib_maintainer_regex.match (line)
if m:
libname = m.group(1)
if self.libraries.has_key(m.group(1)):
library = self.libraries[m.group(1)]
for person in re.split('\s*,\s*', m.group(2)):
nmm = name_email_regex.match(person)
if nmm:
name = nmm.group(1)
email = nmm.group(3)
email = at_regex.sub('@', email)
maintainer = self.getMaintainer(name, email)
maintainer.addLibrary(library)
library.addMaintainer(maintainer)
pass
pass
pass
pass
pass
pass
# Parses the file $BOOST_ROOT/libs/platform_maintainers.txt to
# create a hash mapping from the platform name to the list of
# maintainers.
def parsePlatformMaintainersFile(self):
"""
Parse the platform maintainers file in
../../../libs/platform_maintainers.txt to collect information
about the maintainers of the various platforms.
"""
platform_maintainer_regex = re.compile('([A-Za-z0-9_.-]*|"[^"]*")\s+(\S+)\s+(.*)')
name_email_regex = re.compile('\s*(\w*(\s*\w+)+)\s*<\s*(\S*(\s*\S+)+)\S*>')
at_regex = re.compile('\s*-\s*at\s*-\s*')
for line in file('../../../libs/platform_maintainers.txt', 'r'):
if line.startswith('#'):
continue
m = platform_maintainer_regex.match (line)
if m:
platformname = m.group(2)
if self.platforms.has_key(platformname):
platform = self.platforms[platformname]
for person in re.split('\s*,\s*', m.group(3)):
nmm = name_email_regex.match(person)
if nmm:
name = nmm.group(1)
email = nmm.group(3)
email = at_regex.sub('@', email)
maintainer = self.getPlatformMaintainer(name, email)
maintainer.addPlatform(m.group(1), platform)
platform.addMaintainer(maintainer)
pass
pass
pass
pass
pass
def numFailures(self):
count = 0
for library in self.libraries:
count += self.libraries[library].numFailures()
pass
return count
def numReportableFailures(self):
count = 0
for library in self.libraries:
count += self.libraries[library].numReportableFailures()
pass
return count
def composeSummaryEmail(self):
"""
Compose a message to send to the Boost developer's
list. Return the message and return it.
"""
message = """From: Douglas Gregor <dgregor@osl.iu.edu>
To: boost@lists.boost.org
Reply-To: boost@lists.boost.org
Subject: [Report] """
message += str(self.numFailures()) + " failures on " + branch
if branch != 'trunk':
message += ' branch'
message += " (" + str(datetime.date.today()) + ")"
message += """
Boost regression test failures
"""
message += "Report time: " + self.date + """
This report lists all regression test failures on high-priority platforms.
Detailed report:
"""
message += ' ' + self.url + '\n\n'
if self.numFailures() == 0:
message += "No failures! Yay!\n"
return message
# List the platforms that are broken
any_broken_platforms = self.numReportableFailures() < self.numFailures()
if any_broken_platforms:
message += """The following platforms have a large number of failures:
"""
for platform in sorted_keys( self.platforms ):
if self.platforms[platform].isBroken():
message += (' ' + platform + ' ('
+ str(len(self.platforms[platform].failures))
+ ' failures)\n')
message += """
Failures on these "broken" platforms will be omitted from the results below.
Please see the full report for information about these failures.
"""
# Display the number of failures
message += (str(self.numReportableFailures()) + ' failures in ' +
str(len(self.libraries)) + ' libraries')
if any_broken_platforms:
message += (' (plus ' + str(self.numFailures() - self.numReportableFailures())
+ ' from broken platforms)')
message += '\n'
# Display the number of failures per library
for k in sorted_keys( self.libraries ):
library = self.libraries[k]
num_failures = library.numFailures()
message += ' ' + library.name + ' ('
if library.numReportableFailures() > 0:
message += (str(library.numReportableFailures())
+ " failures")
if library.numReportableFailures() < num_failures:
if library.numReportableFailures() > 0:
message += ', plus '
message += (str(num_failures-library.numReportableFailures())
+ ' failures on broken platforms')
message += ')\n'
pass
message += '\n'
# Provide the details for the failures in each library.
for k in sorted_keys( self.libraries ):
library = self.libraries[k]
if library.numReportableFailures() > 0:
message += '\n|' + library.name + '|\n'
for test in library.tests:
if test.numReportableFailures() > 0:
message += ' ' + test.name + ':'
for failure in test.failures:
platform = failure.platform
if not platform.isBroken():
message += ' ' + platform.name
message += '\n'
return message
def composeTestingSummaryEmail(self):
"""
Compose a message to send to the Boost Testing list. Returns
the message text if a message is needed, returns None
otherwise.
"""
brokenPlatforms = 0
for platform in sorted_keys( self.platforms ):
if self.platforms[platform].isBroken():
brokenPlatforms = brokenPlatforms + 1
if brokenPlatforms == 0:
return None;
message = """From: Douglas Gregor <dgregor@osl.iu.edu>
To: boost-testing@lists.boost.org
Reply-To: boost-testing@lists.boost.org
Subject: [Report] """
message += str(brokenPlatforms) + " potentially broken platforms on " + branch
if branch != 'trunk':
message += ' branch'
message += " (" + str(datetime.date.today()) + ")"
message += """
Potentially broken platforms for Boost regression testing
"""
message += "Report time: " + self.date + """
This report lists the high-priority platforms that are exhibiting a
large number of regression test failures, which might indicate a problem
with the test machines or testing harness.
Detailed report:
"""
message += ' ' + self.url + '\n'
message += """
Platforms with a large number of failures:
"""
for platform in sorted_keys( self.platforms ):
if self.platforms[platform].isBroken():
message += (' ' + platform + ' ('
+ str(len(self.platforms[platform].failures))
+ ' failures)\n')
return message
# Send a message to "person" (a maintainer of a library that is
# failing).
# maintainers is the result of get_library_maintainers()
def send_individualized_message (branch, person, maintainers):
# There are several states we could be in:
# 0 Initial state. Eat everything up to the "NNN failures in MMM
# libraries" line
# 1 Suppress output within this library
# 2 Forward output within this library
state = 0
failures_in_lib_regex = re.compile('\d+ failur.*\d+ librar')
lib_failures_regex = re.compile(' (\S+) \((\d+)\)')
lib_start_regex = re.compile('\|(\S+)\|')
general_pass_regex = re.compile(' http://')
for line in file('issues-email.txt', 'r'):
if state == 0:
lfm = lib_failures_regex.match(line)
if lfm:
# Pass the line through if the current person is a
# maintainer of this library
if lfm.group(1) in maintainers and person in maintainers[lfm.group(1)]:
message += line
print line,
elif failures_in_lib_regex.match(line):
message += "\nThere are failures in these libraries you maintain:\n"
elif general_pass_regex.match(line):
message += line
lib_start = lib_start_regex.match(line)
if lib_start:
if state == 0:
message += '\n'
if lib_start.group(1) in maintainers and person in maintainers[lib_start.group(1)]:
message += line
state = 2
else:
state = 1
else:
if state == 1:
pass
elif state == 2:
message += line
if '--debug' in sys.argv:
print '-----------------Message text----------------'
print message
else:
print
if '--send' in sys.argv:
print "Sending..."
smtp = smtplib.SMTP('milliways.osl.iu.edu')
smtp.sendmail(from_addr = 'Douglas Gregor <dgregor@osl.iu.edu>',
to_addrs = person[1],
msg = message)
print "Done."
# Send a message to the developer's list
def send_boost_developers_message(branch, maintainers, failing_libraries):
to_line = 'boost@lists.boost.org'
from_line = 'Douglas Gregor <dgregor@osl.iu.edu>'
message = """From: Douglas Gregor <dgregor@osl.iu.edu>
To: boost@lists.boost.org
Reply-To: boost@lists.boost.org
Subject: Boost regression testing notification ("""
message += str(datetime.date.today()) + " [" + branch + "]"
message += ")"
message += """
"""
for line in file('issues-email.txt', 'r'):
# Right before the detailed report, put out a warning message if
# any libraries with failures to not have maintainers listed.
if line.startswith('Detailed report:'):
missing_maintainers = False
for lib in failing_libraries:
if not(lib in maintainers) or maintainers[lib] == list():
missing_maintainers = True
if missing_maintainers:
message += """WARNING: The following libraries have failing regression tests but do
not have a maintainer on file. Once a maintainer is found, add an
entry to libs/maintainers.txt to eliminate this message.
"""
for lib in failing_libraries:
if not(lib in maintainers) or maintainers[lib] == list():
message += ' ' + lib + '\n'
message += '\n'
message += line
if '--send' in sys.argv:
print 'Sending notification email...'
smtp = smtplib.SMTP('milliways.osl.iu.edu')
smtp.sendmail(from_addr = from_line, to_addrs = to_line, msg = message)
print 'Done.'
if '--debug' in sys.argv:
print "----------Boost developer's message text----------"
print message
###############################################################################
# Main program #
###############################################################################
# Parse command-line options
branch = "trunk"
for arg in sys.argv:
if arg.startswith("--branch="):
branch = arg[len("--branch="):]
report = Report(branch)
# Try to parse the issues e-mail
if '--no-get' in sys.argv:
okay = report.parseIssuesEmail()
else:
okay = report.getIssuesEmail()
if not okay:
print 'Aborting.'
if '--send' in sys.argv:
message = """From: Douglas Gregor <dgregor@osl.iu.edu>
To: Douglas Gregor <dgregor@osl.iu.edu>
Reply-To: boost@lists.boost.org
Subject: Regression status script failed on """
message += str(datetime.date.today()) + " [" + branch + "]"
smtp = smtplib.SMTP('milliways.osl.iu.edu')
smtp.sendmail(from_addr = 'Douglas Gregor <dgregor@osl.iu.edu>',
to_addrs = 'dgregor@osl.iu.edu',
msg = message)
sys.exit(1)
# Try to parse maintainers information
report.parseLibraryMaintainersFile()
report.parsePlatformMaintainersFile()
# Generate individualized e-mail for library maintainers
for maintainer_name in report.maintainers:
maintainer = report.maintainers[maintainer_name]
email = maintainer.composeEmail(report)
if email:
if '--send' in sys.argv:
print ('Sending notification email to ' + maintainer.name + '...')
smtp = smtplib.SMTP('milliways.osl.iu.edu')
smtp.sendmail(from_addr = report_author,
to_addrs = maintainer.email,
msg = email)
print 'done.\n'
else:
print 'Would send a notification e-mail to',maintainer.name
if '--debug' in sys.argv:
print ('Message text for ' + maintainer.name + ':\n')
print email
# Generate individualized e-mail for platform maintainers
for maintainer_name in report.platform_maintainers:
maintainer = report.platform_maintainers[maintainer_name]
email = maintainer.composeEmail(report)
if email:
if '--send' in sys.argv:
print ('Sending notification email to ' + maintainer.name + '...')
smtp = smtplib.SMTP('milliways.osl.iu.edu')
smtp.sendmail(from_addr = report_author,
to_addrs = maintainer.email,
msg = email)
print 'done.\n'
else:
print 'Would send a notification e-mail to',maintainer.name
if '--debug' in sys.argv:
print ('Message text for ' + maintainer.name + ':\n')
print email
email = report.composeSummaryEmail()
if '--send' in sys.argv:
print 'Sending summary email to Boost developer list...'
smtp = smtplib.SMTP('milliways.osl.iu.edu')
smtp.sendmail(from_addr = report_author,
to_addrs = boost_dev_list,
msg = email)
print 'done.\n'
if '--debug' in sys.argv:
print 'Message text for summary:\n'
print email
email = report.composeTestingSummaryEmail()
if email:
if '--send' in sys.argv:
print 'Sending summary email to Boost testing list...'
smtp = smtplib.SMTP('milliways.osl.iu.edu')
smtp.sendmail(from_addr = report_author,
to_addrs = boost_testing_list,
msg = email)
print 'done.\n'
if '--debug' in sys.argv:
print 'Message text for testing summary:\n'
print email
if not ('--send' in sys.argv):
print 'Chickening out and not sending any e-mail.'
print 'Use --send to actually send e-mail, --debug to see e-mails.'

View file

@ -1,5 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<root>
<expected-failures>
</expected-failures>
</root>

View file

@ -1,174 +0,0 @@
# Copyright (c) MetaCommunications, Inc. 2003-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import tarfile
import shutil
import time
import os.path
import string
import sys
import traceback
def retry( f, args, max_attempts=5, sleep_secs=10 ):
for attempts in range( max_attempts, -1, -1 ):
try:
return f( *args )
except Exception, msg:
utils.log( '%s failed with message "%s"' % ( f.__name__, msg ) )
if attempts == 0:
utils.log( 'Giving up.' )
raise
utils.log( 'Retrying (%d more attempts).' % attempts )
time.sleep( sleep_secs )
def rmtree( path ):
if os.path.exists( path ):
if sys.platform == 'win32':
os.system( 'del /f /s /q "%s" >nul 2>&1' % path )
shutil.rmtree( path )
else:
os.system( 'rm -f -r "%s"' % path )
def svn_command( command ):
utils.log( 'Executing SVN command "%s"' % command )
rc = os.system( command )
if rc != 0:
raise Exception( 'SVN command "%s" failed with code %d' % ( command, rc ) )
def svn_export( sources_dir, user, tag ):
if user is None or user == 'anonymous':
command = 'svn export --force http://svn.boost.org/svn/boost/%s %s' % ( tag, sources_dir )
else:
command = 'svn export --force --non-interactive --username=%s https://svn.boost.org/svn/boost/%s %s' \
% ( user, tag, sources_dir )
os.chdir( os.path.basename( sources_dir ) )
retry(
svn_command
, ( command, )
)
def make_tarball(
working_dir
, tag
, user
, site_dir
):
timestamp = time.time()
timestamp_suffix = time.strftime( '%y-%m-%d-%H%M', time.gmtime( timestamp ) )
tag_suffix = tag.split( '/' )[-1]
sources_dir = os.path.join(
working_dir
, 'boost-%s-%s' % ( tag_suffix, timestamp_suffix )
)
if os.path.exists( sources_dir ):
utils.log( 'Directory "%s" already exists, cleaning it up...' % sources_dir )
rmtree( sources_dir )
try:
os.mkdir( sources_dir )
utils.log( 'Exporting files from SVN...' )
svn_export( sources_dir, user, tag )
except:
utils.log( 'Cleaning up...' )
rmtree( sources_dir )
raise
tarball_name = 'boost-%s.tar.bz2' % tag_suffix
tarball_path = os.path.join( working_dir, tarball_name )
utils.log( 'Archiving "%s" to "%s"...' % ( sources_dir, tarball_path ) )
tar = tarfile.open( tarball_path, 'w|bz2' )
tar.posix = False # see http://tinyurl.com/4ebd8
tar.add( sources_dir, os.path.basename( sources_dir ) )
tar.close()
tarball_timestamp_path = os.path.join( working_dir, 'boost-%s.timestamp' % tag_suffix )
utils.log( 'Writing timestamp into "%s"...' % tarball_timestamp_path )
timestamp_file = open( tarball_timestamp_path, 'w' )
timestamp_file.write( '%f' % timestamp )
timestamp_file.close()
md5sum_path = os.path.join( working_dir, 'boost-%s.md5' % tag_suffix )
utils.log( 'Writing md5 checksum into "%s"...' % md5sum_path )
old_dir = os.getcwd()
os.chdir( os.path.dirname( tarball_path ) )
os.system( 'md5sum -b "%s" >"%s"' % ( os.path.basename( tarball_path ), md5sum_path ) )
os.chdir( old_dir )
if site_dir is not None:
utils.log( 'Moving "%s" to the site location "%s"...' % ( tarball_name, site_dir ) )
temp_site_dir = os.path.join( site_dir, 'temp' )
if not os.path.exists( temp_site_dir ):
os.mkdir( temp_site_dir )
shutil.move( tarball_path, temp_site_dir )
shutil.move( os.path.join( temp_site_dir, tarball_name ), site_dir )
shutil.move( tarball_timestamp_path, site_dir )
shutil.move( md5sum_path, site_dir )
utils.log( 'Removing "%s"...' % sources_dir )
rmtree( sources_dir )
def accept_args( args ):
args_spec = [
'working-dir='
, 'tag='
, 'user='
, 'site-dir='
, 'mail='
, 'help'
]
options = {
'--tag': 'trunk'
, '--user': None
, '--site-dir': None
}
utils.accept_args( args_spec, args, options, usage )
return (
options[ '--working-dir' ]
, options[ '--tag' ]
, options[ '--user' ]
, options[ '--site-dir' ]
)
def usage():
print 'Usage: %s [options]' % os.path.basename( sys.argv[0] )
print '''
\t--working-dir working directory
\t--tag snapshot tag (i.e. 'trunk')
\t--user Boost SVN user ID (optional)
\t--site-dir site directory to copy the snapshot to (optional)
'''
def main():
make_tarball( *accept_args( sys.argv[ 1: ] ) )
if __name__ != '__main__': import utils
else:
# in absense of relative import...
xsl_path = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
while os.path.basename( xsl_path ) != 'xsl_reports': xsl_path = os.path.dirname( xsl_path )
sys.path.append( xsl_path )
import utils
main()

View file

@ -1,371 +0,0 @@
# Copyright (c) MetaCommunications, Inc. 2003-2004
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import shutil
import os.path
import os
import string
import time
import sys
import utils
import runner
report_types = [ 'us', 'ds', 'ud', 'dd', 'l', 'p', 'x', 'i', 'n', 'ddr', 'dsr' ]
if __name__ == '__main__':
run_dir = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
else:
run_dir = os.path.abspath( os.path.dirname( sys.modules[ __name__ ].__file__ ) )
def map_path( path ):
return os.path.join( run_dir, path )
def xsl_path( xsl_file_name, v2 = 0 ):
if v2:
return map_path( os.path.join( 'xsl/v2', xsl_file_name ) )
else:
return map_path( os.path.join( 'xsl', xsl_file_name ) )
def make_result_pages(
test_results_file
, expected_results_file
, failures_markup_file
, tag
, run_date
, comment_file
, results_dir
, result_prefix
, reports
, v2
):
utils.log( 'Producing the reports...' )
__log__ = 1
output_dir = os.path.join( results_dir, result_prefix )
utils.makedirs( output_dir )
if comment_file != '':
comment_file = os.path.abspath( comment_file )
if expected_results_file != '':
expected_results_file = os.path.abspath( expected_results_file )
else:
expected_results_file = os.path.abspath( map_path( 'empty_expected_results.xml' ) )
extended_test_results = os.path.join( output_dir, 'extended_test_results.xml' )
if 'x' in reports:
utils.log( ' Merging with expected results...' )
utils.libxslt(
utils.log
, test_results_file
, xsl_path( 'add_expected_results.xsl', v2 )
, extended_test_results
, { 'expected_results_file': expected_results_file
, 'failures_markup_file' : failures_markup_file
, 'source' : tag }
)
links = os.path.join( output_dir, 'links.html' )
utils.makedirs( os.path.join( output_dir, 'output' ) )
for mode in ( 'developer', 'user' ):
utils.makedirs( os.path.join( output_dir, mode , 'output' ) )
if 'l' in reports:
utils.log( ' Making test output files...' )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'links_page.xsl', v2 )
, links
, {
'source': tag
, 'run_date': run_date
, 'comment_file': comment_file
, 'explicit_markup_file': failures_markup_file
}
)
issues = os.path.join( output_dir, 'developer', 'issues.html' )
if 'i' in reports:
utils.log( ' Making issues list...' )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'issues_page.xsl', v2 )
, issues
, {
'source': tag
, 'run_date': run_date
, 'comment_file': comment_file
, 'explicit_markup_file': failures_markup_file
}
)
for mode in ( 'developer', 'user' ):
if mode[0] + 'd' in reports:
utils.log( ' Making detailed %s report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'result_page.xsl', v2 )
, os.path.join( output_dir, mode, 'index.html' )
, {
'links_file': 'links.html'
, 'mode': mode
, 'source': tag
, 'run_date': run_date
, 'comment_file': comment_file
, 'expected_results_file': expected_results_file
, 'explicit_markup_file' : failures_markup_file
}
)
for mode in ( 'developer', 'user' ):
if mode[0] + 's' in reports:
utils.log( ' Making summary %s report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'summary_page.xsl', v2 )
, os.path.join( output_dir, mode, 'summary.html' )
, {
'mode' : mode
, 'source': tag
, 'run_date': run_date
, 'comment_file': comment_file
, 'explicit_markup_file' : failures_markup_file
}
)
if v2 and "ddr" in reports:
utils.log( ' Making detailed %s release report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'result_page.xsl', v2 )
, os.path.join( output_dir, "developer", 'index_release.html' )
, {
'links_file': 'links.html'
, 'mode': "developer"
, 'source': tag
, 'run_date': run_date
, 'comment_file': comment_file
, 'expected_results_file': expected_results_file
, 'explicit_markup_file' : failures_markup_file
, 'release': "yes"
}
)
if v2 and "dsr" in reports:
utils.log( ' Making summary %s release report...' % mode )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'summary_page.xsl', v2 )
, os.path.join( output_dir, "developer", 'summary_release.html' )
, {
'mode' : "developer"
, 'source': tag
, 'run_date': run_date
, 'comment_file': comment_file
, 'explicit_markup_file' : failures_markup_file
, 'release': 'yes'
}
)
if 'e' in reports:
utils.log( ' Generating expected_results ...' )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'produce_expected_results.xsl', v2 )
, os.path.join( output_dir, 'expected_results.xml' )
)
if v2 and 'n' in reports:
utils.log( ' Making runner comment files...' )
utils.libxslt(
utils.log
, extended_test_results
, xsl_path( 'runners.xsl', v2 )
, os.path.join( output_dir, 'runners.html' )
)
shutil.copyfile(
xsl_path( 'html/master.css', v2 )
, os.path.join( output_dir, 'master.css' )
)
def build_xsl_reports(
locate_root_dir
, tag
, expected_results_file
, failures_markup_file
, comment_file
, results_dir
, result_file_prefix
, dont_collect_logs = 0
, reports = report_types
, v2 = 0
, user = None
, upload = False
):
( run_date ) = time.strftime( '%Y-%m-%dT%H:%M:%SZ', time.gmtime() )
test_results_file = os.path.join( results_dir, 'test_results.xml' )
bin_boost_dir = os.path.join( locate_root_dir, 'bin', 'boost' )
if v2:
import merger
merger.merge_logs(
tag
, user
, results_dir
, test_results_file
, dont_collect_logs
)
else:
utils.log( ' dont_collect_logs: %s' % dont_collect_logs )
if not dont_collect_logs:
f = open( test_results_file, 'w+' )
f.write( '<tests>\n' )
runner.collect_test_logs( [ bin_boost_dir ], f )
f.write( '</tests>\n' )
f.close()
make_result_pages(
test_results_file
, expected_results_file
, failures_markup_file
, tag
, run_date
, comment_file
, results_dir
, result_file_prefix
, reports
, v2
)
if v2 and upload:
upload_dir = 'regression-logs/'
utils.log( 'Uploading v2 results into "%s" [connecting as %s]...' % ( upload_dir, user ) )
archive_name = '%s.tar.gz' % result_file_prefix
utils.tar(
os.path.join( results_dir, result_file_prefix )
, archive_name
)
utils.sourceforge.upload( os.path.join( results_dir, archive_name ), upload_dir, user )
utils.sourceforge.untar( os.path.join( upload_dir, archive_name ), user, background = True )
def accept_args( args ):
args_spec = [
'locate-root='
, 'tag='
, 'expected-results='
, 'failures-markup='
, 'comment='
, 'results-dir='
, 'results-prefix='
, 'dont-collect-logs'
, 'reports='
, 'v2'
, 'user='
, 'upload'
, 'help'
]
options = {
'--comment': ''
, '--expected-results': ''
, '--failures-markup': ''
, '--reports': string.join( report_types, ',' )
, '--tag': None
, '--user': None
, 'upload': False
}
utils.accept_args( args_spec, args, options, usage )
if not options.has_key( '--results-dir' ):
options[ '--results-dir' ] = options[ '--locate-root' ]
if not options.has_key( '--results-prefix' ):
if options.has_key( '--v2' ):
options[ '--results-prefix' ] = 'all'
else:
options[ '--results-prefix' ] = ''
return (
options[ '--locate-root' ]
, options[ '--tag' ]
, options[ '--expected-results' ]
, options[ '--failures-markup' ]
, options[ '--comment' ]
, options[ '--results-dir' ]
, options[ '--results-prefix' ]
, options.has_key( '--dont-collect-logs' )
, options[ '--reports' ].split( ',' )
, options.has_key( '--v2' )
, options[ '--user' ]
, options.has_key( '--upload' )
)
def usage():
print 'Usage: %s [options]' % os.path.basename( sys.argv[0] )
print '''
\t--locate-root the same as --locate-root in compiler_status
\t--tag the tag for the results (i.e. 'CVS-HEAD')
\t--expected-results the file with the results to be compared with
\t the current run
\t--failures-markup the file with the failures markup
\t--comment an html comment file (will be inserted in the reports)
\t--results-dir the directory containing -links.html, -fail.html
\t files produced by compiler_status (by default the
\t same as specified in --locate-root)
\t--results-prefix the prefix of -links.html, -fail.html
\t files produced by compiler_status
\t--v2 v2 reports (combine multiple runners results into a
\t single set of reports)
The following options are valid only for v2 reports:
\t--user SourceForge user name for a shell account
\t--upload upload v2 reports to SourceForge
The following options are useful in debugging:
\t--dont-collect-logs dont collect the test logs
\t--reports produce only the specified reports
\t us - user summary
\t ds - developer summary
\t ud - user detailed
\t dd - developer detailed
\t l - links
\t p - patches
\t x - extended results file
\t i - issues
'''
def main():
build_xsl_reports( *accept_args( sys.argv[ 1 : ] ) )
if __name__ == '__main__':
main()

View file

@ -1,165 +0,0 @@
import xml.sax.saxutils
import time
def make_test_name( library_idx, test_idx ):
return "test_%02d_%02d" % ( library_idx, test_idx )
def make_library_name( library_idx ):
if library_idx % 4 in ( 0, 1 ):
return "library_%02d/%02d" % ( int( library_idx / 4 ) * 4, library_idx % 4 )
else:
return "library_%02d" % library_idx
def make_toolset_name( toolset_idx ):
return "toolset_%02d" % toolset_idx
def make_library_target_directory( library_idx, toolset_idx, variant = None ):
base = "lib/%s/%s" % ( make_library_name( library_idx )
, make_toolset_name( toolset_idx ) )
if variant is not None:
return "%s/%s" % ( base, variant )
else:
return base
def make_test_target_directory( library_idx, toolset_idx, test_name, variant ):
base = "%s/%s/%s" % ( make_library_name( library_idx )
, make_toolset_name( toolset_idx )
, test_name )
if variant is not None:
return "%s/%s" % ( base, variant )
else:
return base
def format_timestamp( timestamp ):
return time.strftime( "%Y-%m-%dT%H:%M:%SZ", timestamp )
def make_test_log( xml_generator
, library_idx
, toolset_idx
, test_name
, test_type
, test_result
, show_run_output
, variant ):
library = make_library_name( library_idx )
toolset_name = make_toolset_name( toolset_idx )
target_directory = ""
if test_type != "lib":
target_directory = make_test_target_directory( library_idx, toolset_idx, test_name, variant )
else:
target_directory = make_library_target_directory( library_idx, toolset_idx, variant )
xml_generator.startElement( "test-log", { "library": library
, "test-name": test_name
, "toolset": toolset_name
, "test-type": test_type
, "test-program": "some_program"
, "target-directory": target_directory
, "show-run-output": show_run_output
} )
if test_type != "lib":
if test_result == "success" and ( toolset_idx + 1 ) % 4:
xml_generator.startElement( "compile", { "result": "success" } );
xml_generator.characters( "Compiling in %s" % target_directory )
xml_generator.endElement( "compile" )
if test_type.find( "link" ) == 0 or test_type.find( "run" ) == 0 and toolset_idx % 4:
xml_generator.startElement( "lib", { "result": test_result } );
xml_generator.characters( make_library_target_directory( library_idx, toolset_idx ) )
xml_generator.endElement( "lib" )
xml_generator.startElement( "link", { "result": "success" } );
xml_generator.characters( "Linking in %s" % target_directory )
xml_generator.endElement( "link" )
if test_type.find( "run" ) == 0 and ( toolset_idx + 2 ) % 4:
xml_generator.startElement( "run", { "result": test_result } );
xml_generator.characters( "Running in %s" % target_directory )
xml_generator.endElement( "run" )
else:
xml_generator.startElement( "compile", { "result": test_result } );
xml_generator.characters( "Compiling in %s" % make_library_target_directory( library_idx, toolset_idx ) )
xml_generator.endElement( "compile" )
xml_generator.endElement( "test-log" )
def make_expicit_failure_markup( num_of_libs, num_of_toolsets, num_of_tests ):
g = xml.sax.saxutils.XMLGenerator( open( "explicit-failures-markup.xml", "w" ), "utf-8" )
g.startDocument()
g.startElement( "explicit-failures-markup", {} );
# required toolsets
for i_toolset in range( 0, num_of_toolsets ):
if i_toolset < 2:
g.startElement( "mark-toolset", { "name": "toolset_%02d" % i_toolset, "status":"required"} )
g.endElement( "mark-toolset" )
for i_library in range( 0, num_of_libs ):
g.startElement( "library", { "name": make_library_name( i_library ) } )
if i_library % 4 == 0:
g.startElement( "mark-unusable", {} )
for i_toolset in range( 0, num_of_toolsets ):
if i_toolset % 2 == 1:
g.startElement( "toolset", { "name": make_toolset_name( i_toolset ) } )
g.endElement( "toolset" )
g.startElement( "note", { "author": u"T. T\xe8st" } )
g.characters( "Test note" )
g.endElement( "note" )
g.endElement( "mark-unusable" )
for i_test in range( 0, num_of_tests ):
category = 0
explicitly_marked_failure = 0
unresearched = 0
if i_test % 2 == 0:
category = i_test % 3
if i_test % 3 == 0:
explicitly_marked_failure = 1
if i_test % 2 == 0:
unresearched = 1
if category or explicitly_marked_failure:
test_attrs = { "name": make_test_name( i_library, i_test ) }
if category:
test_attrs[ "category" ] = "Category %s" % category
g.startElement( "test", test_attrs )
if explicitly_marked_failure:
failure_attrs = {}
if unresearched: failure_attrs[ "reason" ] = "not-researched"
g.startElement( "mark-failure", failure_attrs )
g.startElement( "toolset", { "name": make_toolset_name( 1 ) } )
g.endElement( "toolset" )
g.startElement( "toolset", { "name": make_toolset_name( 0 ) } )
g.endElement( "toolset" )
g.startElement( "toolset", { "name": make_toolset_name( 2 ) } )
g.endElement( "toolset" )
g.startElement( "note", { "author": u"V. Ann\xf3tated" } )
g.characters( "Some thoughtful note" )
g.endElement( "note" )
g.endElement( "mark-failure" )
g.endElement( "test" );
g.endElement( "library" )
g.endElement( "explicit-failures-markup" )
g.endDocument()
def make_expected_results( num_of_libs, num_of_toolsets, num_of_tests ):
pass

View file

@ -1,3 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<expected-failures>
</expected-failures>

View file

@ -1,160 +0,0 @@
#
# Generates test test results for testing of boost_wide_report.py
#
import common
import xml.sax.saxutils
import os
import time
num_of_libs = 5
num_of_runners = 5
num_of_toolsets = 3
num_of_tests = 10
results_directory = "results/incoming/CVS-HEAD/processed"
# Generated results follow the rules:
#
# * odd runners are testing on Win32, even runners are testin on Unix
# * the third toolset has 2 variants
#
# Generated expected markup:
#
# * First two toolset are required
# * Every fourth library is unusable on event toolsets
# * Last two tests are corner-ase tests
# * Every 4th test is explicitly marked up as expected-failure
def library_build_failed( library_idx ):
return library_idx % 2
def test_run_source( runner_idx ):
if runner_idx % 2: return "tarball"
else: return "cvs head"
def test_run_type( runner_idx ):
if runner_idx % 2: return "incremental"
else: return "full"
def test_type( i ):
types = [ "compile", "compile_fail", "link", "link_fail", "run", "run_fail", "run_pyd" ]
return types[ i % len( types) ]
def make_test_results():
if not os.path.exists( results_directory ):
os.makedirs( results_directory )
for i_runner in range( 0, num_of_runners ):
runner_id = "runner %02d" % i_runner
g = xml.sax.saxutils.XMLGenerator( open( os.path.join( results_directory, runner_id + ".xml" ), "w" ), "utf-8" )
g.startDocument()
if i_runner % 2:
platform = "Win32"
else:
platform = "Unix"
g.startElement( "test-run", { "platform": platform
, "runner": runner_id
, "timestamp": common.format_timestamp(
time.gmtime( time.time() - i_runner * 24*60*60 )
)
, "revision": '%d' % ( 7000 + i_runner )
, "source": test_run_source( i_runner )
, "run-type": test_run_type( i_runner )
} )
g.startElement( "comment", {} )
g.characters( "<b>Runner</b> is who <i>running</i> does." )
g.endElement( "comment" )
for i_lib in range( 0, num_of_libs ):
for i_toolset in range( num_of_toolsets ):
if library_build_failed( i_lib ): test_result = "fail"
else: test_result = "success"
common.make_test_log( xml_generator = g
, library_idx = i_lib
, toolset_idx = i_toolset
, test_name = ""
, test_type = "lib"
, test_result = test_result
, show_run_output = "false"
, variant = None )
for i_lib in range( 0, num_of_libs ):
library_name = "library_%02d" % i_lib
if num_of_runners - 1 == i_runner and i_lib % 2:
continue
for i_toolset in range( num_of_toolsets ):
toolset_name = "toolset %02d" % ( i_toolset )
if num_of_runners - 1 == i_runner and i_toolset % 2:
continue
for i_test in range( num_of_tests ):
test_name = "test_%02d_%02d" % ( i_lib, i_test )
test_result = ""
show_run_output = "false"
if num_of_runners - 1 == i_runner and i_test % 2:
continue
if i_runner % 2: test_result = "success"
else: test_result = "fail"
if i_runner == 1 and i_toolset == 2 and i_test % 6 == 0:
test_result = "fail"
if test_result == "success" and ( 0 == i_test % 5 ):
show_run_output = "true"
if i_toolset == 2:
variants = [ "static-lib", "shared-lib" ]
else:
variants = [ None ]
for variant in variants:
common.make_test_log( xml_generator = g
, library_idx = i_lib
, toolset_idx = i_toolset
, test_name = test_name
, test_type = test_type( i_test )
, test_result = test_result
, show_run_output = show_run_output
, variant = variant )
g.endElement( "test-run" )
g.endDocument()
## <test-log library="algorithm" test-name="container" test-type="run" test-program="libs/algorithm/string/test/container_test.cpp" target-directory="bin/boost/libs/algorithm/string/test/container.test/borland-5.6.4/debug" toolset="borland-5.6.4" show-run-output="false">
## <compile result="fail" timestamp="2004-06-29 17:02:27 UTC">
## "C:\Progra~1\Borland\CBuilder6\bin\bcc32" -j5 -g255 -q -c -P -w -Ve -Vx -a8 -b- -v -Od -vi- -tWC -tWR -tWC -WM- -DBOOST_ALL_NO_LIB=1 -w-8001 -I"C:\Users\Administrator\boost\main\results\bin\boost\libs\algorithm\string\test" -I"C:\Users\Administrator\boost\main\boost" -I"C:\Progra~1\Borland\CBuilder6\include" -o"C:\Users\Administrator\boost\main\results\bin\boost\libs\algorithm\string\test\container.test\borland-5.6.4\debug\container_test.obj" "..\libs\algorithm\string\test\container_test.cpp"
## ..\libs\algorithm\string\test\container_test.cpp:
## Warning W8091 C:\Users\Administrator\boost\main\boost\libs/test/src/unit_test_result.cpp 323: template argument _InputIter passed to 'for_each' is a output iterator: input iterator required in function unit_test_result::~unit_test_result()
## Warning W8091 C:\Users\Administrator\boost\main\boost\libs/test/src/unit_test_suite.cpp 63: template argument _InputIter passed to 'find_if' is a output iterator: input iterator required in function test_case::Impl::check_dependencies()
## Warning W8091 C:\Users\Administrator\boost\main\boost\libs/test/src/unit_test_suite.cpp 204: template argument _InputIter passed to 'for_each' is a output iterator: input iterator required in function test_suite::~test_suite()
## Error E2401 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 45: Invalid template argument list
## Error E2040 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 46: Declaration terminated incorrectly
## Error E2090 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 277: Qualifier 'algorithm' is not a class or namespace name
## Error E2272 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 277: Identifier expected
## Error E2090 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 278: Qualifier 'algorithm' is not a class or namespace name
## Error E2228 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 278: Too many error or warning messages
## *** 6 errors in Compile ***
## </compile>
## </test-log>
make_test_results()
common.make_expicit_failure_markup( num_of_libs, num_of_toolsets, num_of_tests )

View file

@ -1,85 +0,0 @@
import xml.sax.saxutils
import common
import os
import time
num_of_libs = 2
num_of_toolsets = 3
num_of_tests = 10
tag = "1_30_0"
def library_build_failed( library_idx ):
return library_idx % 2
def make_test_results():
if not os.path.exists( tag ):
os.makedirs( tag )
g = xml.sax.saxutils.XMLGenerator( open( os.path.join( tag, "test.xml" ), "w" ) )
platform = "Win32"
g.startElement( "test-results", {} )
for i_lib in range( 0, num_of_libs ):
for i_toolset in range( num_of_toolsets ):
if library_build_failed( i_lib ): test_result = "fail"
else: test_result = "success"
common.make_test_log( xml_generator = g
, library_idx = i_lib
, toolset_idx = i_toolset
, test_name = ""
, test_type = "lib"
, test_result = test_result
, show_run_output = "false" )
for i_lib in range( 0, num_of_libs ):
library_name = "library_%02d" % i_lib
for i_toolset in range( num_of_toolsets ):
toolset_name = "toolset_%02d" % ( i_toolset )
for i_test in range( num_of_tests ):
test_name = "test_%02d_%02d" % ( i_lib, i_test )
test_result = ""
test_type = "run"
show_run_output = "false"
if i_lib % 2: test_result = "success"
else: test_result = "fail"
if test_result == "success" and ( 0 == i_test % 5 ):
show_run_output = "true"
common.make_test_log( g, i_lib, i_toolset, test_name, test_type, test_result, show_run_output )
g.endElement( "test-results" )
## <test-log library="algorithm" test-name="container" test-type="run" test-program="libs/algorithm/string/test/container_test.cpp" target-directory="bin/boost/libs/algorithm/string/test/container.test/borland-5.6.4/debug" toolset="borland-5.6.4" show-run-output="false">
## <compile result="fail" timestamp="2004-06-29 17:02:27 UTC">
## "C:\Progra~1\Borland\CBuilder6\bin\bcc32" -j5 -g255 -q -c -P -w -Ve -Vx -a8 -b- -v -Od -vi- -tWC -tWR -tWC -WM- -DBOOST_ALL_NO_LIB=1 -w-8001 -I"C:\Users\Administrator\boost\main\results\bin\boost\libs\algorithm\string\test" -I"C:\Users\Administrator\boost\main\boost" -I"C:\Progra~1\Borland\CBuilder6\include" -o"C:\Users\Administrator\boost\main\results\bin\boost\libs\algorithm\string\test\container.test\borland-5.6.4\debug\container_test.obj" "..\libs\algorithm\string\test\container_test.cpp"
## ..\libs\algorithm\string\test\container_test.cpp:
## Warning W8091 C:\Users\Administrator\boost\main\boost\libs/test/src/unit_test_result.cpp 323: template argument _InputIter passed to 'for_each' is a output iterator: input iterator required in function unit_test_result::~unit_test_result()
## Warning W8091 C:\Users\Administrator\boost\main\boost\libs/test/src/unit_test_suite.cpp 63: template argument _InputIter passed to 'find_if' is a output iterator: input iterator required in function test_case::Impl::check_dependencies()
## Warning W8091 C:\Users\Administrator\boost\main\boost\libs/test/src/unit_test_suite.cpp 204: template argument _InputIter passed to 'for_each' is a output iterator: input iterator required in function test_suite::~test_suite()
## Error E2401 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 45: Invalid template argument list
## Error E2040 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 46: Declaration terminated incorrectly
## Error E2090 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 277: Qualifier 'algorithm' is not a class or namespace name
## Error E2272 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 277: Identifier expected
## Error E2090 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 278: Qualifier 'algorithm' is not a class or namespace name
## Error E2228 C:\Users\Administrator\boost\main\boost\boost/algorithm/string/finder.hpp 278: Too many error or warning messages
## *** 6 errors in Compile ***
## </compile>
## </test-log>
make_test_results( )
common.make_expicit_failure_markup( num_of_libs, num_of_toolsets, num_of_tests )

View file

@ -1,36 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:output method="xml" encoding="ascii"/>
<xsl:param name="library"/>
<xsl:template match="/">
<xsl:message>
<xsl:value-of select="$library"/>
</xsl:message>
<xsl:apply-templates/>
</xsl:template>
<xsl:template match="*">
<xsl:copy>
<xsl:apply-templates select="@*"/>
<xsl:apply-templates />
</xsl:copy>
</xsl:template>
<xsl:template match="test-log">
<xsl:if test="@library=$library">
<xsl:copy>
<xsl:apply-templates select="@*"/>
<xsl:apply-templates/>
</xsl:copy>
</xsl:if>
</xsl:template>
<xsl:template match="@*">
<xsl:copy-of select="."/>
</xsl:template>
</xsl:stylesheet>

View file

@ -1,32 +0,0 @@
import sys
sys.path.append( '..' )
import os
import report
import merger
import utils
tag = "1_32_0"
# utils.makedirs( "results" )
all_xml_file = "a.xml"
report.make_result_pages(
test_results_file = os.path.abspath( all_xml_file )
, expected_results_file = ""
, failures_markup_file = os.path.abspath( "../../../../status/explicit-failures-markup.xml" )
, tag = tag
, run_date = "Today date"
, comment_file = os.path.abspath( "comment.html" )
, results_dir = "results"
, result_prefix = ""
, reports = [ "dd" ]
, v2 = 1
)

View file

@ -1,35 +0,0 @@
import sys
sys.path.append( '..' )
import os
import report
import merger
import utils
tag = "1_30_0"
utils.makedirs( "results" )
all_xml_file = "results/all.xml"
all_xml_writer = open( all_xml_file, "w" )
merger.merge_test_runs( ".", tag, all_xml_writer )
all_xml_writer.close()
report.make_result_pages(
test_results_file = os.path.abspath( all_xml_file )
, expected_results_file = ""
, failures_markup_file = os.path.abspath( "explicit-failures-markup.xml" )
, source = tag
, run_date = "Today date"
, comment_file = os.path.abspath( "comment.html" )
, results_dir = os.path.abspath( "results" )
, result_prefix = ""
, reports = [ "l", "dd" ]
, v2 = 0
)

View file

@ -1,34 +0,0 @@
import sys
sys.path.append( '..' )
import os
import boost_wide_report
import common
import utils
import shutil
import time
tag = "CVS-HEAD"
if os.path.exists( "results/incoming/CVS-HEAD/processed/merged" ):
shutil.rmtree( "results/incoming/CVS-HEAD/processed/merged" )
boost_wide_report.ftp_task = lambda ftp_site, site_path, incoming_dir: 1
boost_wide_report.unzip_archives_task = lambda incoming_dir, processed_dir, unzip: 1
boost_wide_report.execute_tasks(
tag = tag
, user = None
, run_date = common.format_timestamp( time.gmtime() )
, comment_file = os.path.abspath( "comment.html" )
, results_dir = os.path.abspath( "results" )
, output_dir = os.path.abspath( "output" )
, reports = [ "i", "x", "ds", "dd", "dsr", "ddr", "us", "ud", "usr", "udr" ]
, warnings = [ 'Warning text 1', 'Warning text 2' ]
, extended_test_results = os.path.abspath( "output/extended_test_results.xml" )
, dont_collect_logs = 1
, expected_results_file = os.path.abspath( "expected_results.xml" )
, failures_markup_file = os.path.abspath( "explicit-failures-markup.xml" )
)

View file

@ -1,36 +0,0 @@
import unittest
import sys
import time
sys.path.append( ".." )
import boost_wide_report
class test_boost_wide_report(unittest.TestCase):
def test_diff( self ):
test_cases = [
( []
, []
, ( [], [] ) )
, ( [ boost_wide_report.file_info( "a", 1, time.localtime( 0 ) ) ]
, []
, ( [ "a" ], [] ) )
, ( []
, [ boost_wide_report.file_info( "a", 1, time.localtime( 0 ) ) ]
, ( [], [ "a" ] ) )
, ( [ boost_wide_report.file_info( "a", 1, time.localtime( 0 ) ) ]
, [ boost_wide_report.file_info( "a", 1, time.localtime( 1 ) ) ]
, ( [ "a" ], [] ) )
]
for test_case in test_cases:
source_dir_content = test_case[0]
destination_dir_content = test_case[1]
expected_result = test_case[2]
d = boost_wide_report.diff( source_dir_content, destination_dir_content )
self.failUnlessEqual( d, expected_result )
if __name__ == '__main__':
unittest.main()

View file

@ -1,107 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified">
<!--
The following online services can be used to validate collected test results:
- http://apps.gotdotnet.com/xmltools/xsdvalidator/
- http://tools.decisionsoft.com/schemaValidate.html
-->
<xs:simpleType name="test_result">
<xs:restriction base="xs:NMTOKEN">
<xs:enumeration value="fail"/>
<xs:enumeration value="succeed"/>
</xs:restriction>
</xs:simpleType>
<xs:simpleType name="run_test_result">
<xs:restriction base="xs:NMTOKEN">
<xs:enumeration value="fail"/>
<xs:enumeration value="succeed"/>
<xs:enumeration value="note"/>
</xs:restriction>
</xs:simpleType>
<xs:simpleType name="test_type">
<xs:restriction base="xs:NMTOKEN">
<xs:enumeration value="compile"/>
<xs:enumeration value="compile_fail"/>
<xs:enumeration value="lib"/>
<xs:enumeration value="pyd"/>
<xs:enumeration value="run"/>
<xs:enumeration value="run_fail"/>
<xs:enumeration value="run_pyd"/>
</xs:restriction>
</xs:simpleType>
<xs:element name="compile">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute name="result" type="test_result" use="required"/>
<xs:attribute name="timestamp" type="xs:string" use="required"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="link">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute name="result" type="test_result" use="required"/>
<xs:attribute name="timestamp" type="xs:string" use="required"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="lib">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute name="result" type="test_result" use="required"/>
<xs:attribute name="timestamp" type="xs:string" use="required"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="run">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute name="result" type="run_test_result" use="required"/>
<xs:attribute name="timestamp" type="xs:string" use="required"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="test-log">
<xs:complexType>
<xs:sequence>
<xs:element ref="compile" minOccurs="0"/>
<xs:element ref="link" minOccurs="0"/>
<xs:element ref="run" minOccurs="0"/>
<xs:element ref="lib" minOccurs="0"/>
</xs:sequence>
<xs:attribute name="library" type="xs:string" use="required"/>
<xs:attribute name="test-name" type="xs:string" use="required"/>
<xs:attribute name="test-type" type="test_type" use="required"/>
<xs:attribute name="test-program" type="xs:string" use="required"/>
<xs:attribute name="target-directory" type="xs:string" use="required"/>
<xs:attribute name="toolset" type="xs:string" use="required"/>
<xs:attribute name="show-run-output" type="xs:boolean" use="required"/>
</xs:complexType>
</xs:element>
<xs:element name="tests">
<xs:complexType>
<xs:sequence>
<xs:element ref="test-log" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>

View file

@ -1,13 +0,0 @@
from accept_args import *
from char_translation_table import *
from check_existance import *
from checked_system import *
from libxslt import *
from log import *
from makedirs import *
from rename import *
from tar import *
from zip import *
import sourceforge

View file

@ -1,30 +0,0 @@
import getopt
import re
import sys
def accept_args( args_spec, args, options, usage ):
defaults_num = len(options)
( option_pairs, rest_args ) = getopt.getopt( args, '', args_spec )
map( lambda x: options.__setitem__( x[0], x[1] ), option_pairs )
if ( options.has_key( '--help' ) or len( options.keys() ) == defaults_num ):
usage()
sys.exit( 1 )
if len( rest_args ) > 0 and rest_args[0][0] == '@':
f = open( rest_args[0][1:], 'r' )
config_lines = f.read().splitlines()
f.close()
for l in config_lines:
if re.search( r'^\s*#', l ): continue
if re.search( r'^\s*$', l ): continue
m = re.match( r'^(?P<name>.*?)=(?P<value>.*)', l )
if m:
options[ '--%s' % m.group( 'name' ) ] = m.group( 'value' )
else:
raise 'Invalid format of config line "%s"' % l
return rest_args

View file

@ -1,13 +0,0 @@
import string
def chr_or_question_mark( c ):
if chr(c) in string.printable and c < 128 and c not in ( 0x09, 0x0b, 0x0c ):
return chr(c)
else:
return '?'
char_translation_table = string.maketrans(
''.join( map( chr, range(0, 256) ) )
, ''.join( map( chr_or_question_mark, range(0, 256) ) )
)

View file

@ -1,9 +0,0 @@
import os
def check_existance( name ):
a = os.popen( '%s --version' % name )
output = a.read()
rc = a.close()
if rc is not None:
raise Exception( '"%s" is required' % name )

View file

@ -1,22 +0,0 @@
import os
import string
import sys
def system( commands ):
if sys.platform == 'win32':
f = open( 'tmp.cmd', 'w' )
f.write( string.join( commands, '\n' ) )
f.close()
rc = os.system( 'tmp.cmd' )
return rc
else:
rc = os.system( '&&'.join( commands ) )
return rc
def checked_system( commands, valid_return_codes = [ 0 ] ):
rc = system( commands )
if rc not in [ 0 ] + valid_return_codes:
raise Exception( 'Command sequence "%s" failed with return code %d' % ( commands, rc ) )
return rc

View file

@ -1,49 +0,0 @@
# Copyright (c) MetaCommunications, Inc. 2003-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import utils.makedirs
import utils.rename
import os.path
import os
import sys
def xslt_param( path, replace_spaces = 1 ):
path = path.replace( '\\', '/' )
if sys.platform == 'win32' and replace_spaces:
path = path.replace( ' ', '%20' )
return path
def libxslt( log, xml_file, xsl_file, output_file, parameters = None ):
utils.makedirs( os.path.dirname( output_file ) )
if sys.platform == 'win32':
os.chdir( os.path.dirname( xsl_file ) )
transform_command = 'xsltproc'
transform_command = transform_command + ' -o ' + '"%s"' % xslt_param( output_file )
if parameters is not None:
for i in parameters:
if parameters[i]:
parameters[i] = xslt_param( parameters[i] )
transform_command = transform_command + ' --param %s "\'%s\'" ' % ( i, parameters[ i ] )
transform_command = transform_command + ' "%s" ' % xslt_param( xsl_file )
transform_command = transform_command + ' "%s" ' % xslt_param( xml_file )
log( transform_command )
rc = os.system( transform_command )
if rc != 0:
raise Exception( '"%s" failed with return code %d' % ( transform_command, rc ) )
output_file = xslt_param( output_file, 0 )
xlst_output_file = xslt_param( output_file )
if output_file != xlst_output_file and os.path.exists( xlst_output_file ):
utils.rename( log, xlst_output_file, output_file )

View file

@ -1,18 +0,0 @@
import inspect
import sys
def log_level():
frames = inspect.stack()
level = 0
for i in frames[ 3: ]:
if i[0].f_locals.has_key( '__log__' ):
level = level + i[0].f_locals[ '__log__' ]
return level
def stdlog( message ):
sys.stderr.write( '# ' + ' ' * log_level() + message + '\n' )
sys.stderr.flush()
log = stdlog

View file

@ -1,7 +0,0 @@
import os.path
import os
def makedirs( path ):
if not os.path.exists( path ):
os.makedirs( path )

View file

@ -1,17 +0,0 @@
# Copyright (c) MetaCommunications, Inc. 2003-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os.path
import os
def rename( log, src, dst ):
log( 'Renaming %s to %s' % ( src, dst ) )
if os.path.exists( dst ):
os.unlink( dst )
os.rename( src, dst )

View file

@ -1,13 +0,0 @@
import smtplib
def send_mail( mail, subject, msg = '' ):
smtp_server = smtplib.SMTP( 'mail.%s' % mail.split( '@' )[-1] )
smtp_server.sendmail(
mail
, [ mail ]
, 'Subject: %s\n' % subject
+ 'To: %s\n' % mail
+ '\n'
+ msg
)

View file

@ -1,48 +0,0 @@
import utils.checked_system
import os
import sys
site_dir = '/home/groups/b/bo/boost/htdocs/'
def download( source, destination, user ):
if sys.platform == 'win32':
destination = os.popen( 'cygpath "%s"' % destination ).read().splitlines()[0]
utils.checked_system( [
'rsync -v -r -z --progress %(user)s@shell.sourceforge.net:%(site_dir)s%(source)s %(dest)s'
% { 'user': user, 'site_dir': site_dir, 'source': source, 'dest': destination }
] )
def upload( source, destination, user ):
if sys.platform == 'win32':
source = os.popen( 'cygpath "%s"' % source ).read().splitlines()[0]
utils.checked_system( [
'rsync -v -r -z --progress %(source)s %(user)s@shell.sourceforge.net:%(site_dir)s%(dest)s'
% { 'user': user, 'site_dir': site_dir, 'source': source, 'dest': destination }
] )
def checked_system( commands, user, background = False ):
if not background:
cmd = 'ssh -l %s shell.sourceforge.net "%s"'
else:
cmd = 'ssh -f -l %s shell.sourceforge.net "%s"'
utils.checked_system(
[ cmd % ( user, '&&'.join( commands ) ) ]
)
def untar( archive, user, background ):
checked_system(
[
'cd %s' % os.path.join( site_dir, os.path.dirname( archive ) )
, 'tar -x -z --overwrite --mode=+w -f %s' % os.path.basename( archive )
, 'rm -f %s' % archive
]
, user = user
, background = background
)

View file

@ -1,16 +0,0 @@
import utils.checked_system
import os.path
def tar( source_dir, archive_name ):
utils.checked_system( [
'cd %s' % source_dir
, 'tar -c -f ../%s -z *' % archive_name
] )
def untar( archive_path ):
#utils.checked_system( [ 'tar -xjf "%s"' % archive_path ] )
utils.checked_system( [
'cd %s' % os.path.dirname( archive_path )
, 'tar -xjf "%s"' % os.path.basename( archive_path )
] )

View file

@ -1,12 +0,0 @@
import zipfile
import os.path
def unzip( archive_path, result_dir ):
z = zipfile.ZipFile( archive_path, 'r', zipfile.ZIP_DEFLATED )
for f in z.infolist():
result = open( os.path.join( result_dir, f.filename ), 'wb' )
result.write( z.read( f.filename ) )
result.close()
z.close()

View file

@ -1,144 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
Copyright MetaCommunications, Inc. 2003-2004.
Distributed under the Boost Software License, Version 1.0. (See
accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
-->
<xsl:stylesheet
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:meta="http://www.meta-comm.com"
exclude-result-prefixes="meta"
version="1.0">
<xsl:import href="common.xsl"/>
<xsl:output method="xml" encoding="utf-8"/>
<xsl:param name="expected_results_file"/>
<xsl:param name="failures_markup_file"/>
<xsl:variable name="expected_results" select="document( $expected_results_file )" />
<xsl:variable name="failures_markup" select="document( $failures_markup_file )" />
<xsl:template match="/">
<xsl:apply-templates/>
</xsl:template>
<xsl:template match="test-log">
<xsl:variable name="library" select="@library"/>
<xsl:variable name="test-name" select="@test-name"/>
<xsl:variable name="toolset" select="@toolset"/>
<xsl:element name="{local-name()}">
<xsl:apply-templates select="@*"/>
<xsl:variable name="actual_result">
<xsl:choose>
<!-- Hack: needs to be researched (and removed). See M.Wille's incident. -->
<xsl:when test="run/@result='succeed' and lib/@result='fail'">
<xsl:text>success</xsl:text>
</xsl:when>
<xsl:when test="./*/@result = 'fail'" >
<xsl:text>fail</xsl:text>
</xsl:when>
<xsl:otherwise>
<xsl:text>success</xsl:text>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="expected_results_test_case" select="$expected_results//*/test-result[ @library=$library and ( @test-name=$test-name or @test-name='*' ) and @toolset = $toolset]"/>
<xsl:variable name="new_failures_markup" select="$failures_markup//library[@name=$library]/mark-expected-failures[ meta:re_match( test/@name, $test-name ) and meta:re_match( toolset/@name, $toolset ) ]"/>
<xsl:variable name="failures_markup" select="$failures_markup//library[@name=$library]/test[ meta:re_match( @name, $test-name ) ]/mark-failure[ meta:re_match( toolset/@name, $toolset ) ]"/>
<xsl:variable name="is_new">
<xsl:choose>
<xsl:when test="$expected_results_test_case">
<xsl:text>no</xsl:text>
</xsl:when>
<xsl:otherwise>yes</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="expected_result">
<xsl:choose>
<xsl:when test='count( $failures_markup ) &gt; 0 or count( $new_failures_markup ) &gt; 0'>
<xsl:text>fail</xsl:text>
</xsl:when>
<xsl:otherwise>
<xsl:choose>
<xsl:when test="$expected_results_test_case and $expected_results_test_case/@result = 'fail'">
<xsl:text>fail</xsl:text>
</xsl:when>
<xsl:otherwise>success</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="status">
<xsl:choose>
<xsl:when test="count( $failures_markup ) &gt; 0 or count( $new_failures_markup ) &gt; 0">
<xsl:choose>
<xsl:when test="$expected_result = $actual_result">expected</xsl:when>
<xsl:otherwise>unexpected</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:otherwise>
<xsl:choose>
<xsl:when test="$expected_result = $actual_result">expected</xsl:when>
<xsl:otherwise>unexpected</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="notes">
<xsl:choose>
<xsl:when test='count( $failures_markup ) &gt; 0'>
<xsl:for-each select="$failures_markup/note">
<xsl:copy-of select="."/>
</xsl:for-each>
</xsl:when>
<xsl:when test='count( $new_failures_markup ) &gt; 0'>
<xsl:for-each select="$new_failures_markup/note">
<xsl:copy-of select="."/>
</xsl:for-each>
</xsl:when>
</xsl:choose>
</xsl:variable>
<xsl:attribute name="result"><xsl:value-of select="$actual_result"/></xsl:attribute>
<xsl:attribute name="expected-result"><xsl:value-of select="$expected_result"/></xsl:attribute>
<xsl:attribute name="status"><xsl:value-of select="$status"/></xsl:attribute>
<xsl:attribute name="is-new"><xsl:value-of select="$is_new"/></xsl:attribute>
<!--<a><xsl:value-of select="count( $failures_markup )"/></a>-->
<xsl:element name="notes"><xsl:copy-of select="$notes"/></xsl:element>
<xsl:apply-templates select="node()" />
</xsl:element>
</xsl:template>
<xsl:template match="*">
<xsl:element name="{local-name()}">
<xsl:apply-templates select="@*"/>
<xsl:apply-templates select="node()" />
</xsl:element>
</xsl:template>
<xsl:template match="@*">
<xsl:copy-of select="." />
</xsl:template>
</xsl:stylesheet>

View file

@ -1,182 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
Copyright MetaCommunications, Inc. 2003-2004.
Distributed under the Boost Software License, Version 1.0. (See
accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
-->
<xsl:stylesheet
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:exsl="http://exslt.org/common"
xmlns:func="http://exslt.org/functions"
xmlns:str="http://exslt.org/strings"
xmlns:meta="http://www.meta-comm.com"
extension-element-prefixes="func"
version="1.0">
<xsl:variable name="output_directory" select="'output'"/>
<xsl:template name="get_toolsets">
<xsl:param name="toolsets"/>
<xsl:param name="required-toolsets"/>
<xsl:variable name="toolset_output">
<xsl:for-each select="$toolsets">
<xsl:variable name="toolset" select="."/>
<xsl:element name="toolset">
<xsl:attribute name="toolset"><xsl:value-of select="$toolset"/></xsl:attribute>
<xsl:choose>
<xsl:when test="$required_toolsets[ $toolset = @name ]">
<xsl:attribute name="required">yes</xsl:attribute>
<xsl:attribute name="sort">a</xsl:attribute>
</xsl:when>
<xsl:otherwise>
<xsl:attribute name="required">no</xsl:attribute>
<xsl:attribute name="sort">z</xsl:attribute>
</xsl:otherwise>
</xsl:choose>
</xsl:element>
</xsl:for-each>
</xsl:variable>
<xsl:for-each select="exsl:node-set( $toolset_output )/toolset">
<xsl:sort select="concat( @sort, ' ', @toolset)" order="ascending"/>
<xsl:copy-of select="."/>
</xsl:for-each>
</xsl:template>
<func:function name="meta:show_output">
<xsl:param name="explicit_markup"/>
<xsl:param name="test_log"/>
<func:result select="$test_log/@result != 'success' and not( meta:is_unusable( $explicit_markup, $test_log/@library, $test_log/@toolset )) or $test_log/@show-run-output = 'true'"/>
</func:function>
<func:function name="meta:is_test_log_a_test_case">
<xsl:param name="test_log"/>
<func:result select="$test_log/@test-type='compile' or $test_log/@test-type='compile_fail' or $test_log/@test-type='run' or $test_log/@test-type='run_pyd'"/>
</func:function>
<func:function name="meta:is_unusable">
<xsl:param name="explicit_markup"/>
<xsl:param name="library"/>
<xsl:param name="toolset"/>
<func:result select="$explicit_markup//library[ @name = $library ]/mark-unusable[ toolset/@name = $toolset or toolset/@name='*' ]"/>
</func:function>
<func:function name="meta:re_match">
<xsl:param name="pattern"/>
<xsl:param name="text"/>
<xsl:choose>
<xsl:when test="not( contains( $pattern, '*' ) )">
<func:result select="$text = $pattern"/>
</xsl:when>
<xsl:when test="$pattern = '*'">
<func:result select="1 = 1"/>
</xsl:when>
<xsl:when test="substring( $pattern, 1, 1 ) = '*' and substring( $pattern, string-length($pattern), 1 ) = '*' ">
<func:result select="contains( $text, substring( $pattern, 2, string-length($pattern) - 2 ) ) "/>
</xsl:when>
<xsl:when test="substring( $pattern, 1, 1 ) = '*'">
<xsl:variable name="pattern_tail" select="substring( $pattern, 2, string-length($pattern) - 1 )"/>
<func:result select="substring( $text, string-length($text) - string-length($pattern_tail) + 1, string-length($pattern_tail) ) = $pattern_tail"/>
</xsl:when>
<xsl:when test="substring( $pattern, string-length($pattern), 1 ) = '*' ">
<xsl:variable name="pattern_head" select="substring( $pattern, 1, string-length($pattern) - 2 )"/>
<func:result select="substring( $text, 1, string-length($pattern_head) ) = $pattern_head "/>
</xsl:when>
</xsl:choose>
</func:function>
<func:function name="meta:encode_path">
<xsl:param name="path"/>
<func:result select="translate( translate( $path, '/', '-' ), './', '-' )"/>
</func:function>
<func:function name="meta:toolset_name">
<xsl:param name="name"/>
<func:result select="$name"/>
</func:function>
<func:function name="meta:output_file_path">
<xsl:param name="path"/>
<func:result select="concat( $output_directory, '/', meta:encode_path( $path ), '.html' )"/>
</func:function>
<xsl:template name="show_notes">
<xsl:param name="explicit_markup"/>
<xsl:param name="notes"/>
<div class="notes">
<xsl:for-each select="$notes">
<div>
<xsl:variable name="refid" select="@refid"/>
<xsl:call-template name="show_note">
<xsl:with-param name="note" select="."/>
<xsl:with-param name="reference" select="$explicit_markup//note[ $refid = @id ]"/>
</xsl:call-template>
</div>
</xsl:for-each>
</div>
</xsl:template>
<xsl:template name="show_note">
<xsl:param name="note"/>
<xsl:param name="reference"/>
<div class="note">
<xsl:variable name="author">
<xsl:choose>
<xsl:when test="$note/@author">
<xsl:value-of select="$note/@author"/>
</xsl:when>
<xsl:when test="$reference">
<xsl:value-of select="$reference/@author"/>
</xsl:when>
<xsl:otherwise>
<xsl:text/>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="date">
<xsl:choose>
<xsl:when test="$note/@date">
<xsl:value-of select="$note/@date"/>
</xsl:when>
<xsl:when test="$reference">
<xsl:value-of select="$reference/@date"/>
</xsl:when>
<xsl:otherwise>
<xsl:text/>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<span class="note-header">
<xsl:choose>
<xsl:when test="$author != '' and $date != ''">
[&#160;<xsl:value-of select="$author"/>&#160;<xsl:value-of select="$date"/>&#160;]
</xsl:when>
<xsl:when test="$author != ''">
[&#160;<xsl:value-of select="$author"/>&#160;]
</xsl:when>
<xsl:when test="$date != ''">
[&#160;<xsl:value-of select="$date"/>&#160;]
</xsl:when>
</xsl:choose>
</span>
<xsl:if test="$reference">
<xsl:copy-of select="$reference/node()"/>
</xsl:if>
<xsl:copy-of select="$note/node()"/>
</div>
</xsl:template>
</xsl:stylesheet>

View file

@ -1,36 +0,0 @@
<!--
Copyright MetaCommunications, Inc. 2003-2004.
Distributed under the Boost Software License, Version 1.0. (See
accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
-->
<div class="legend">
<table border="0" summary="report description">
<tr>
<td>
<table border="0" summary="legend">
<tr>
<td>
<table width="100%" summary="unexpected new fail legend">
<tr class="library-row-single"><td class="library-fail-unexpected-new">&lt;toolset&gt;</td></tr>
</table>
</td>
<td class="legend-item">Failure on a newly added test/compiler.</td>
</tr>
<tr>
<td>
<table width="100%" summary="unexpected fail legend">
<tr class="library-row-single"><td class="library-fail-unexpected">&lt;toolset&gt;</td></tr>
</table>
</td>
<td class="legend-item">Unexpected failure.</td>
</tr>
</table>
</td>
</tr>
</table>
</div>

View file

@ -1,72 +0,0 @@
<!--
Copyright MetaCommunications, Inc. 2003-2004.
Distributed under the Boost Software License, Version 1.0. (See
accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
-->
<div class="legend">
<table border="0" summary="report description">
<tr>
<td>
<table border="0" summary="legend">
<tr>
<td>
<table width="100%" summary="success legend">
<tr class="library-row-single"><td class="library-success-expected">pass</td></tr>
</table>
</td>
<td class="legend-item">Success.</td>
</tr>
<tr>
<td>
<table width="100%" summary="unexpected pass legend">
<tr class="library-row-single"><td class="library-success-unexpected">pass</td></tr>
</table>
</td>
<td class="legend-item">Unexpected success.</td>
</tr>
<tr>
<td>
<table width="100%" summary="expected fail legend">
<tr class="library-row-single"><td class="library-fail-expected">fail</td></tr>
</table>
</td>
<td class="legend-item">Expected failure.</td>
</tr>
</table>
</td>
<td>
<table border="0" summary="legend">
<tr>
<td>
<table width="100%" summary="unexpected new fail legend">
<tr class="library-row-single"><td class="library-fail-unexpected-new">fail</td></tr>
</table>
</td>
<td class="legend-item">Failure on a newly added test/compiler.</td>
</tr>
<tr>
<td>
<table width="100%" summary="unexpected fail legend">
<tr class="library-row-single"><td class="library-fail-unexpected">fail</td></tr>
</table>
</td>
<td class="legend-item">Unexpected failure.</td>
</tr>
<tr>
<td>
<table width="100%" summary="unusable legend">
<tr class="library-row-single"><td class="library-unusable">n/a</td></tr>
</table>
</td>
<td class="legend-item">The library author marked it as unusable on particular platform/toolset.</td>
</tr>
</table>
</td>
</tr>
</table>
</div>

View file

@ -1,65 +0,0 @@
<!--
Copyright MetaCommunications, Inc. 2003-2004.
Distributed under the Boost Software License, Version 1.0. (See
accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
-->
<div class="legend">
<table border="0" summary="report description">
<tr>
<td>
<table border="0" summary="legend">
<tr>
<td>
<table width="100%" summary="success legend">
<tr class="library-row-single"><td class="library-user-success">pass</td></tr>
</table>
</td>
<td class="legend-item">
The test passes.
</td>
</tr>
<tr>
<td>
<table width="100%" summary="fail legend">
<tr class="library-row-single"><td class="library-user-fail-expected">fail</td></tr>
</table>
</td>
<td class="legend-item">
A known test failure; click on the link to see the log.
</td>
</tr>
</table>
</td>
<td>
<table border="0" summary="legend">
<tr>
<td>
<table width="100%" summary="unexpected fail legend">
<tr class="library-row-single"><td class="library-user-fail-unexpected">unexp.</td></tr>
</table>
</td>
<td class="legend-item">
The test is known to pass, but is currently failing;
click on the link to see the log.
</td>
</tr>
<tr>
<td>
<table width="100%" summary="unusable legend">
<tr class="library-row-single"><td class="library-unusable">n/a</td></tr>
</table>
</td>
<td class="legend-item">
The library author marked it as unusable on particular platform/toolset.
</td>
</tr>
</table>
</td>
</tr>
</table>
</div>

View file

@ -1,24 +0,0 @@
<!--
Copyright MetaCommunications, Inc. 2003-2004.
Distributed under the Boost Software License, Version 1.0. (See
accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
-->
<div class="tinyurl">
<script type="text/javascript">
<!--
function make_tinyurl()
{
window.open( 'http://tinyurl.com/create.php?url=' + parent.location.href );
}
//-->
</script>
<a href="javascript:make_tinyurl()">TinyUrl</a>
</div>

View file

@ -1,525 +0,0 @@
/*
Copyright MetaCommunications, Inc. 2003-2004.
Distributed under the Boost Software License, Version 1.0. (See
accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
/* All reports */
body
{
background-color: white;
}
body.user-toc
{
background-color: #f0f0f0;
}
body.developer-toc
{
background-color: #f0f5ff;
}
span.super
{
vertical-align: super;
font-size: 80%;
margin-left: 3pt;
}
h1.page-title
{
text-align: left;
text-transform: capitalize;
margin-top: 10pt;
margin-bottom: 10pt;
}
img
{
display: inline;
}
a.hover-link:link
,a.hover-link:visited
,a.hover-link:active
{
color: black;
text-decoration: none;
}
a.hover-link:hover
{
color: black;
text-decoration: underline;
}
div.legend
{
width: 80%;
background-color: #f5f5f5;
margin-top: 10pt;
}
div.comment
{
width: 80%;
background-color: #f5f5f5;
padding-left: 10pt;
padding-right: 10pt;
padding-bottom: 10pt;
}
div.tinyurl
{
margin-top: 10pt;
}
table.header-table
{
margin-left: 10pt;
margin-top: 20pt;
margin-bottom: 10pt;
width: 80%;
}
td.header-item
{
text-align: left;
vertical-align: top;
font-weight: bold;
}
td.header-item-content
{
padding-left: 20pt;
padding-bottom: 10pt;
}
td.legend-item
{
padding-left: 5pt;
/* padding-top: 2pt;*/
}
div.acknowledgement
{
text-align: left;
margin-top: 10pt;
margin-left: 5pt;
margin-bottom: 10pt;
}
div.report-info
{
text-align: left;
margin-bottom: 10pt;
width: 80%;
}
div.purpose
{
text-align: left;
margin-left: 5pt;
margin-top: 10pt;
}
div.library-name
{
margin-top: 20pt;
margin-bottom: 10pt;
text-align: left;
font-size: 125%;
font-weight: bold;
}
table.summary-table
,table.library-table
{
border-collapse: collapse;
border: 2px solid black;
margin: 5px;
}
table.summary-table td
,table.library-table td
{
text-align: center;
border-left: 1px solid black;
border-right: 1px solid black;
}
a.log-link:link
,a.log-link:visited
{
color: black;
/* text-decoration: none; */
}
a.log-link:active
,a.log-link:hover
,a.legend-link:link
,a.legend-link:visited
,a.legend-link:active
,a.legend-link:hover
{
color: black;
text-decoration: underline;
}
td.toolset-name
,td.required-toolset-name
{
vertical-align: middle;
padding-left: 3pt;
padding-right: 3pt;
word-spacing: -3pt;
}
td.required-toolset-name
{
font-weight: bold;
}
td.library-corner-case-header
{
}
tr.summary-row-first td
, tr.library-row-first td
{
border-top: 1px solid gray;
border-bottom: 0px;
}
tr.summary-row-last td
, tr.library-row-last td
{
border-top: 0px;
border-bottom: 1px solid gray;
}
tr.summary-row-single td
, tr.library-row-single td
{
border-top: 1px solid gray;
border-bottom: 1px solid gray;
}
tr.summary-row td
, tr.library-row td
{
border-bottom: 0px;
border-top: 0px;
}
td.library-success-expected
,td.library-fail-expected
,td.library-user-fail-expected
,td.library-user-success
,td.summary-expected
,td.summary-user-fail-expected
,td.summary-user-success
,td.summary-unknown-status
{
width: 60pt;
text-align: center;
background-color: lightgreen;
border-left: 1px solid black;
border-right: 1px solid black;
padding-left: 2pt;
padding-right: 2pt;
}
td.summary-unknown-status
{
background-color: white;
}
td.library-success-unexpected
,td.summary-success-unexpected
{
width: 60pt;
text-align: center;
background-color: green;
font-weight: bold;
color: white;
border: 0px;
padding-left: 2pt;
padding-right: 2pt;
}
tr.summary-row td.summary-fail-unexpected
,tr.summary-row-first td.summary-fail-unexpected
,tr.summary-row-last td.summary-fail-unexpected
,tr.summary-row-single td.summary-fail-unexpected
,tr.summary-row td.summary-user-fail-unexpected
,tr.summary-row-first td.summary-user-fail-unexpected
,tr.summary-row-last td.summary-user-fail-unexpected
,tr.summary-row-single td.summary-user-fail-unexpected
,tr.library-row td.library-user-fail-unexpected
,tr.library-row-first td.library-user-fail-unexpected
,tr.library-row-last td.library-user-fail-unexpected
,tr.library-row-single td.library-user-fail-unexpected
{
width: 60pt;
text-align: center;
background-color: red;
color: black;
border: 2px solid black;
padding-left: 2pt;
padding-right: 2pt;
}
tr.summary-row td.summary-missing
, tr.summary-row-first td.summary-missing
, tr.summary-row-last td.summary-missing
, tr.summary-row-single td.summary-missing
, tr.library-row td.library-missing
, tr.library-row-first td.library-missing
, tr.library-row-last td.library-missing
, tr.library-row-single td.library-missing
{
width: 60pt;
text-align: center;
background-color: white;
color: black;
border: 2px solid black;
padding-left: 2pt;
padding-right: 2pt;
}
tr.summary-row td.summary-unusable
, tr.summary-row-first td.summary-unusable
, tr.summary-row-last td.summary-unusable
, tr.summary-row-single td.summary-unusable
, tr.library-row td.library-unusable
, tr.library-row-first td.library-unusable
, tr.library-row-last td.library-unusable
, tr.library-row-single td.library-unusable
{
width: 60pt;
text-align: center;
background-color: silver;
color: black;
border-top: 2px solid black;
border-bottom: 2px solid black;
border-left: 2px solid black;
border-right: 2px solid black;
padding-left: 2pt;
padding-right: 2pt;
}
/* Summary */
table.summary-table td.library-name
{
width: 100pt;
padding: 0pt;
border-top: 1px solid gray;
border-bottom: 1px solid gray;
}
tr.summary-row td.summary-user-fail-unexpected
, tr.summary-row-first td.summary-user-fail-unexpected
, tr.summary-row-last td.summary-user-fail-unexpected
, tr.summary-row-single td.summary-user-fail-unexpected
{
width: 60pt;
text-align: center;
background-color: yellow;
color: black;
border: 2px solid black;
padding-left: 2pt;
padding-right: 2pt;
}
tr.summary-row td.summary-fail-unexpected-new
, tr.summary-row-first td.summary-fail-unexpected-new
, tr.summary-row-last td.summary-fail-unexpected-new
, tr.summary-row-single td.summary-fail-unexpected-new
, tr.library-row td.library-fail-unexpected-new
, tr.library-row-first td.library-fail-unexpected-new
, tr.library-row-last td.library-fail-unexpected-new
, tr.library-row-single td.library-fail-unexpected-new
{
width: 60pt;
text-align: center;
background-color: yellow;
color: black;
border: 2px solid black;
}
/* Detailed */
.library-conf-problem
{
font-size: 70%;
font-weight: normal;
}
div.library-toc
{
margin: 5pt;
}
li.library-toc-entry
{
margin-left: 5pt;
list-style-type: square;
}
div.library-footer
{
margin: 5px;
}
table.library-table td.test-name
{
width: 150pt;
padding-left: 6pt;
padding-right: 6pt;
border-right: 0;
border-top: 1px solid gray;
border-bottom: 1px solid gray;
}
table.library-table td.test-type
{
padding-right: 5px;
border-left: 0;
border-right: 0;
border-top: 1px solid gray;
border-bottom: 1px solid gray;
text-align: right;
}
tr.library-row td.library-fail-unexpected
, tr.library-row-first td.library-fail-unexpected
, tr.library-row-last td.library-fail-unexpected
, tr.library-row-single td.library-fail-unexpected
{
width: 60pt;
text-align: center;
background-color: red;
font-weight: bold;
color: black;
border: 2px solid black;
}
td.library-user-fail-expectected
{
width: 60pt;
text-align: center;
background-color: yellow;
color: black;
border: 0px solid black;
}
table.library-library-notes
{
background-color: LemonChiffon;
width: 640px;
margin-left: 5px;
margin-right: 5px;
}
tr.library-library-note
{
}
div.note
{
padding: 3pt;
}
span.note-header
{
font-weight: bold;
}
/* Log */
div.log-test-title
{
font-size: 1.5em;
font-weight: bold;
border-bottom: 1px solid black;
}
div.notes-title
{
font-weight: bold;
background-color: #ffffcc;
}
div.notes
{
padding: 3pt;
background-color: #ffffcc;
}
div.notes-title
{
font-weight: bold;
}
div.log-compiler-output-title
{
font-weight: bold;
}
div.log-linker-output-title
{
font-weight: bold;
}
div.log-run-output-title
{
font-weight: bold;
}
/* Issues page */
table.library-issues-table
{
border-collapse: collapse;
border: 2px solid black;
}
table.library-issues-table td
{
border: 1px solid #c0c0c0;
text-align: center;
margin-right: 5px;
}
table.library-issues-table td.failures-row
{
text-align: left;
padding: 0px;
}
table.issue-box tr.library-row-single td.library-fail-unexpected-new
,table.issue-box tr.library-row-single td.library-fail-unexpected
{
border: 0px;
font-weight: normal;
}

View file

@ -1,75 +0,0 @@
<!--
Copyright MetaCommunications, Inc. 2003-2004.
Distributed under the Boost Software License, Version 1.0. (See
accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
-->
<div class="legend">
<table border="0" summary="report description">
<tr>
<td>
<table border="0" summary="legend">
<tr>
<td>
<table width="100%" summary="success legend">
<tr class="summary-row-single"><td class="summary-expected">OK</td></tr>
</table>
</td>
<td class="legend-item">
All expected tests pass.
</td>
</tr>
<tr>
<td>
<table width="100%" summary="unexpected pass legend">
<tr class="summary-row-single"><td class="summary-success-unexpected">OK</td></tr>
</table>
</td>
<td class="legend-item">
All expected tests pass, and some other tests that were expected to fail
unexpectedly pass as well.
</td>
</tr>
<tr>
<td>
<table width="100%" summary="unexpected new fail legend">
<tr class="summary-row-single"><td class="summary-fail-unexpected-new">fail</td></tr>
</table>
</td>
<td class="legend-item">
There are some failures on the newly added tests/compiler(s).
</td>
</tr>
</table>
</td>
<td>
<table border="0" summary="legend">
<tr>
<td>
<table width="100%" summary="unexpected fail legend">
<tr class="summary-row-single"><td class="summary-fail-unexpected">broken</td></tr>
</table>
</td>
<td class="legend-item">
Tests that the library author expects to pass are currently failing.
</td>
</tr>
<tr>
<td>
<table width="100%" summary="unusable legend">
<tr class="summary-row-single"><td class="summary-unusable">n/a</td></tr>
</table>
</td>
<td class="legend-item">
The library author marked it as unusable on particular platform/toolset.
</td>
</tr>
</table>
</td>
</tr>
</table>
</div>

Some files were not shown because too many files have changed in this diff Show more