tools/regression: merge numerous changes from trunk

[SVN r50275]
This commit is contained in:
Beman Dawes 2008-12-15 12:16:41 +00:00
parent e218b12d69
commit bade3b8ef8
15 changed files with 1220 additions and 385 deletions

View file

@ -1,72 +1,72 @@
<?xml version="1.0" encoding="windows-1251"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="7.10"
Name="process_jam_log"
ProjectGUID="{9A751791-929F-496A-8DE7-B61020619BFA}"
RootNamespace="process_jam_log"
Keyword="MakeFileProj">
<Platforms>
<Platform
Name="Win32"/>
</Platforms>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="Debug"
IntermediateDirectory="Debug"
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..\..\..\..\tools\regression\build
..\..\jam\src\bin.ntx86\bjam.exe process_jam_log variant=debug
"
ReBuildCommandLine="cd ..\..\..\..\tools\regression\build
call bjam --v2 msvc-7.1 debug clean
call bjam --v2 msvc-7.1 debug"
CleanCommandLine="cd ..\..\..\..\tools\regression\build
call bjam --v2 msvc-7.1 debug clean
"
Output="../../../../bin.v2/tools/regression/build/msvc-7.1/debug/link-static/process_jam_log.exe"/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory="Release"
IntermediateDirectory="Release"
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam process_jam_log variant=release link=static"
ReBuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam -a process_jam_log variant=release link=static"
CleanCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam clean"
Output="..\..\..\..\bin.v2\tools\regression\build\msvc-7.1\release\link-static\process_jam_log.exe"/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<Filter
Name="Source Files"
Filter="cpp;c;cxx;def;odl;idl;hpj;bat;asm;asmx"
UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}">
</Filter>
<Filter
Name="Header Files"
Filter="h;hpp;hxx;hm;inl;inc;xsd"
UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}">
</Filter>
<Filter
Name="Resource Files"
Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx"
UniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}">
</Filter>
<File
RelativePath=".\readme.txt">
</File>
</Files>
<Globals>
</Globals>
</VisualStudioProject>
<?xml version="1.0" encoding="windows-1251"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="7.10"
Name="process_jam_log"
ProjectGUID="{9A751791-929F-496A-8DE7-B61020619BFA}"
RootNamespace="process_jam_log"
Keyword="MakeFileProj">
<Platforms>
<Platform
Name="Win32"/>
</Platforms>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="Debug"
IntermediateDirectory="Debug"
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..\..\..\..\tools\regression\build
..\..\jam\src\bin.ntx86\bjam.exe process_jam_log variant=debug
"
ReBuildCommandLine="cd ..\..\..\..\tools\regression\build
call bjam --v2 msvc-7.1 debug clean
call bjam --v2 msvc-7.1 debug"
CleanCommandLine="cd ..\..\..\..\tools\regression\build
call bjam --v2 msvc-7.1 debug clean
"
Output="../../../../bin.v2/tools/regression/build/msvc-7.1/debug/link-static/process_jam_log.exe"/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory="Release"
IntermediateDirectory="Release"
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam process_jam_log variant=release link=static"
ReBuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam -a process_jam_log variant=release link=static"
CleanCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam clean"
Output="..\..\..\..\bin.v2\tools\regression\build\msvc-7.1\release\link-static\process_jam_log.exe"/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<Filter
Name="Source Files"
Filter="cpp;c;cxx;def;odl;idl;hpj;bat;asm;asmx"
UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}">
</Filter>
<Filter
Name="Header Files"
Filter="h;hpp;hxx;hm;inl;inc;xsd"
UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}">
</Filter>
<Filter
Name="Resource Files"
Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx"
UniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}">
</Filter>
<File
RelativePath=".\readme.txt">
</File>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

View file

@ -21,8 +21,11 @@
reports.</p>
<ul>
<li><a href="instructions.html">Instructions</a> for running the regression
tests</li>
<li><a href=
"http://beta.boost.org/development/running_regression_tests.html">Instructions</a>
for running the regression tests as part of the published regression
tests are available at the Boost web site.</li>
<li><a href="../src/process_jam_log.cpp">process_jam_log.cpp</a> -
Processes the bjam outputs, creating a file named test_log.xml for each
test encountered.</li>
@ -51,4 +54,4 @@
copy at <a href=
"http://www.boost.org/LICENSE_1_0.txt">www.boost.org/LICENSE_1_0.txt</a>)</p>
</body>
</html>
</html>

View file

@ -1,109 +0,0 @@
<html>
<head>
<meta http-equiv="Content-Language" content="en-us">
<meta name="GENERATOR" content="Microsoft FrontPage 5.0">
<meta name="ProgId" content="FrontPage.Editor.Document">
<meta http-equiv="Content-Type" content="text/html; charset=windows-1252">
<title>Running Boost Regression Tests</title>
<link rel="stylesheet" type="text/css" href="../../../doc/html/minimal.css">
</head>
<body>
<table border="0" cellpadding="5" cellspacing="0" style="border-collapse: collapse" bordercolor="#111111" width="831">
<tr>
<td width="277">
<a href="../../../../index.htm">
<img src="../../../boost.png" alt="boost.png (6897 bytes)" align="middle" width="277" height="86" border="0"></a></td>
<td width="531" align="middle">
<font size="7">Running Boost Regression Tests</font>
</td>
</tr>
</table>
<h2>Requirements</h2>
<ul>
<li>Python 2.3 or later.<br>
&nbsp;</li>
<li>Subversion 1.4 or later.<br>
&nbsp;</li>
<li>At least 5 gigabytes of disk space per compiler to be tested.</li>
</ul>
<h2>Step by step instructions</h2>
<ol>
<li>Create a new directory for the branch you want to test.<br>
&nbsp;</li>
<li>Download the
<a href="http://svn.boost.org/svn/boost/trunk/tools/regression/src/run.py">
run.py</a> script into that directory.<br>
&nbsp;</li>
<li>Run &quot;<code>python run.py [options] [commands]</code>&quot;.</li>
</ol>
<dl>
<dd>
<pre>commands: cleanup, collect-logs, get-source, get-tools, patch,
regression, setup, show-revision, test, test-clean, test-process,
test-run, update-source, upload-logs
options:
-h, --help show this help message and exit
--runner=RUNNER runner ID (e.g. 'Metacomm')
--comment=COMMENT an HTML comment file to be inserted in the
reports
--tag=TAG the tag for the results
--toolsets=TOOLSETS comma-separated list of toolsets to test with
--incremental do incremental run (do not remove previous
binaries)
--timeout=TIMEOUT specifies the timeout, in minutes, for a single
test run/compilation
--bjam-options=BJAM_OPTIONS
options to pass to the regression test
--bjam-toolset=BJAM_TOOLSET
bootstrap toolset for 'bjam' executable
--pjl-toolset=PJL_TOOLSET
bootstrap toolset for 'process_jam_log'
executable
--platform=PLATFORM
--user=USER Boost SVN user ID
--local=LOCAL the name of the boost tarball
--force-update=FORCE_UPDATE
do an SVN update (if applicable) instead of a
clean checkout, even when performing a full run
--have-source=HAVE_SOURCE
do neither a tarball download nor an SVN update;
used primarily for testing script changes
--proxy=PROXY HTTP proxy server address and port
(e.g.'<a rel="nofollow" href="http://www.someproxy.com:3128'" target="_top">http://www.someproxy.com:3128'</a>)
--ftp-proxy=FTP_PROXY
FTP proxy server (e.g. 'ftpproxy')
--dart-server=DART_SERVER
the dart server to send results to
--debug-level=DEBUG_LEVEL
debugging level; controls the amount of
debugging output printed
--send-bjam-log send full bjam log of the regression run
--mail=MAIL email address to send run notification to
--smtp-login=SMTP_LOGIN
STMP server address/login information, in the
following form:
&lt;user&gt;:&lt;password&gt;@&lt;host&gt;[:&lt;port&gt;]
--skip-tests=SKIP_TESTS
do not run bjam; used for testing script changes</pre>
</dd>
</dl>
<p>To test trunk use &quot;<code>--tag=trunk</code>&quot; (the default), and to test the
release use &quot;<code>--tag=branches/release</code>&quot;. Or substitute any Boost tree
of your choice.</p>
<hr>
<p>© Copyright Rene Rivera, 2007<br>
Distributed under the Boost Software License, Version 1.0. See
<a href="http://www.boost.org/LICENSE_1_0.txt">www.boost.org/LICENSE_1_0.txt</a></p>
<p>Revised
<!--webbot bot="Timestamp" S-Type="EDITED" S-Format="%B %d, %Y" startspan -->November 23, 2007<!--webbot bot="Timestamp" endspan i-checksum="39587" --> </font>
</p>
</body>

View file

@ -17,6 +17,8 @@ import httplib
import os.path
import string
import sys
import re
import urlparse
def process_xml_file( input_file, output_file ):
@ -176,23 +178,32 @@ def publish_test_logs(
utils.log('Dart server error: %s' % e)
def upload_to_ftp( tag, results_file, ftp_proxy, debug_level ):
ftp_site = 'fx.meta-comm.com'
site_path = '/boost-regression'
utils.log( 'Uploading log archive "%s" to ftp://%s%s/%s' % ( results_file, ftp_site, site_path, tag ) )
def upload_to_ftp( tag, results_file, ftp_proxy, debug_level, ftp_url ):
if not ftp_url:
ftp_host = 'boost.cowic.de'
ftp_url = ''.join(['ftp','://anonymous','@',ftp_host,'/boost/do-not-publish-this-url/results/'])
utils.log( 'Uploading log archive "%s" to %s/%s' % ( results_file, ftp_url, tag ) )
ftp_parts = urlparse.urlparse(ftp_url)
ftp_netloc = re.split('[@]',ftp_parts[1])
ftp_user = re.split('[:]',ftp_netloc[0])[0]
ftp_password = re.split('[:]',ftp_netloc[0]+':anonymous')[1]
ftp_site = re.split('[:]',ftp_netloc[1])[0]
ftp_path = ftp_parts[2]
if not ftp_proxy:
ftp = ftplib.FTP( ftp_site )
ftp.set_debuglevel( debug_level )
ftp.login()
ftp.login( ftp_user, ftp_password )
else:
utils.log( ' Connecting through FTP proxy server "%s"' % ftp_proxy )
ftp = ftplib.FTP( ftp_proxy )
ftp.set_debuglevel( debug_level )
ftp.set_pasv (0) # turn off PASV mode
ftp.login( 'anonymous@%s' % ftp_site, 'anonymous@' )
ftp.login( '%s@%s' % (ftp_user,ftp_site), ftp_password )
ftp.cwd( site_path )
ftp.cwd( ftp_path )
try:
ftp.cwd( tag )
except ftplib.error_perm:
@ -323,11 +334,12 @@ def upload_logs(
, send_bjam_log = False
, timestamp_file = None
, dart_server = None
, ftp_url = None
, **unused
):
logs_archive = os.path.join( results_dir, '%s.zip' % runner_id )
upload_to_ftp( tag, logs_archive, ftp_proxy, debug_level )
upload_to_ftp( tag, logs_archive, ftp_proxy, debug_level, ftp_url )
if send_bjam_log:
bjam_log_path = os.path.join( results_dir, 'bjam.log' )
if not timestamp_file:
@ -336,7 +348,7 @@ def upload_logs(
timestamp = time.strftime( '%Y-%m-%d-%H-%M-%S', read_timestamp( timestamp_file ) )
logs_archive = os.path.join( results_dir, '%s.%s.log.zip' % ( runner_id, timestamp ) )
compress_file( bjam_log_path, logs_archive )
upload_to_ftp( '%s/logs' % tag, logs_archive, ftp_proxy, debug_level )
upload_to_ftp( '%s/logs' % tag, logs_archive, ftp_proxy, debug_level, ftp_url )
def collect_and_upload_logs(
@ -355,6 +367,7 @@ def collect_and_upload_logs(
, send_bjam_log = False
, dart_server = None
, http_proxy = None
, ftp_url = None
, **unused
):
@ -383,6 +396,7 @@ def collect_and_upload_logs(
, send_bjam_log
, timestamp_file
, dart_server = dart_server
, ftp_url = ftp_url
)
@ -404,6 +418,7 @@ def accept_args( args ):
, 'help'
, 'dart-server='
, 'revision='
, 'ftp='
]
options = {
@ -419,6 +434,7 @@ def accept_args( args ):
, '--debug-level' : 0
, '--dart-server' : 'beta.boost.org:8081'
, '--revision' : None
, '--ftp' : None
}
@ -439,7 +455,8 @@ def accept_args( args ):
, 'debug_level' : int(options[ '--debug-level' ])
, 'send_bjam_log' : options.has_key( '--send-bjam-log' )
, 'dart_server' : options[ '--dart-server' ]
, 'revision ' : options[ '--revision' ]
, 'revision' : options[ '--revision' ]
, 'ftp' : options[ '--ftp' ]
}
@ -475,6 +492,7 @@ Options:
\t--debug-level debugging level; controls the amount of debugging
\t output printed; 0 by default (no debug output)
\t--dart-server The dart server to send results to.
\t--ftp The ftp URL to upload results to.
''' % '\n\t'.join( commands.keys() )

View file

@ -19,8 +19,11 @@
*******************************************************************************/
#include <boost/config/warning_disable.hpp>
#include "boost/config.hpp"
#include "boost/filesystem/operations.hpp"
#include "boost/filesystem/convenience.hpp"
#include "boost/filesystem/fstream.hpp"
#include "detail/tiny_xml.hpp"
namespace fs = boost::filesystem;
@ -90,10 +93,10 @@ namespace
std::vector<int> error_count;
// prefix for library and test hyperlink prefix
string cvs_root ( "http://boost.cvs.sourceforge.net/" );
string url_prefix_dir_view( cvs_root + "boost/boost" );
string url_prefix_checkout_view( cvs_root + "*checkout*/boost/boost" );
string url_suffix_text_view( "?view=markup&rev=HEAD" );
string svn_root ( "http://svn.boost.org/trac/boost/browser/trunk/" );
string url_prefix_dir_view( svn_root );
string url_prefix_checkout_view( svn_root );
string url_suffix_text_view( "" );
// get revision number (as a string) if boost_root is svn working copy -----//
@ -191,11 +194,11 @@ namespace
if ( !fs::exists( dir_path ) ) return false;
for ( fs::directory_iterator itr( dir_path ); itr != end_itr; ++itr )
if ( fs::is_directory( *itr )
&& itr->leaf() != ignore_dir_named )
&& itr->filename() != ignore_dir_named )
{
if ( find_file( *itr, name, path_found ) ) return true;
}
else if ( itr->leaf() == name )
else if ( itr->filename() == name )
{
path_found = *itr;
return true;
@ -281,16 +284,16 @@ namespace
// SunCC creates an internal subdirectory everywhere it writes
// object files. This confuses the target_directory() algorithm.
// This patch ignores the SunCC internal directory. Jens Maurer
if ( (*itr).leaf() == "SunWS_cache" ) continue;
if ( (*itr).filename() == "SunWS_cache" ) continue;
// SGI does something similar for template instantiations. Jens Maurer
if( (*itr).leaf() == "ii_files" ) continue;
if( (*itr).filename() == "ii_files" ) continue;
if ( child.empty() ) child = *itr;
else
{
std::cout << "Warning: only first of two target possibilities will be reported for: \n "
<< root.string() << ": " << child.leaf()
<< " and " << (*itr).leaf() << "\n";
<< root.string() << ": " << child.filename()
<< " and " << (*itr).filename() << "\n";
}
}
}
@ -349,7 +352,14 @@ const fs::path find_bin_path(const string& relative)
fs::path bin_path;
if (boost_build_v2)
{
bin_path = locate_root / "bin.v2" / relative;
if ( relative == "status" )
bin_path = locate_root / "bin.v2" / "libs";
else
{
bin_path = locate_root / "bin.v2" / relative;
if (!fs::exists(bin_path))
bin_path = locate_root / "bin" / relative;
}
if (!fs::exists(bin_path))
{
std::cerr << "warning: could not find build results for '"
@ -726,12 +736,38 @@ const fs::path find_bin_path(const string& relative)
{
results.push_back( std::string() );
do_row( *itr,
itr->leaf().substr( 0, itr->leaf().size()-5 ),
itr->filename().substr( 0, itr->filename().size()-5 ),
results[results.size()-1] );
}
}
}
// find_compilers ------------------------------------------------------------//
void find_compilers(const fs::path & bin_dir)
{
fs::directory_iterator compiler_itr( bin_dir );
if ( specific_compiler.empty() )
std::clog << "Using " << bin_dir.string() << " to determine compilers\n";
for (; compiler_itr != end_itr; ++compiler_itr )
{
if ( fs::is_directory( *compiler_itr ) // check just to be sure
&& compiler_itr->filename() != "test" ) // avoid strange directory (Jamfile bug?)
{
if ( specific_compiler.size() != 0
&& specific_compiler != compiler_itr->filename() ) continue;
toolsets.push_back( compiler_itr->filename() );
string desc( compiler_desc( compiler_itr->filename() ) );
string vers( version_desc( compiler_itr->filename() ) );
report << "<td>"
<< (desc.size() ? desc : compiler_itr->filename())
<< (vers.size() ? (string( "<br>" ) + vers ) : string( "" ))
<< "</td>\n";
error_count.push_back( 0 );
}
}
}
// do_table_body -----------------------------------------------------------//
void do_table_body( const fs::path & bin_dir )
@ -746,30 +782,56 @@ const fs::path find_bin_path(const string& relative)
jamfile.clear();
jamfile.seekg(0);
string line;
bool run_tests = false;
while( std::getline( jamfile, line ) )
{
bool v2(false);
string::size_type pos( line.find( "subinclude" ) );
if ( pos == string::npos ) {
pos = line.find( "build-project" );
string::size_type sub_pos( line.find( "subinclude" ) );
if ( sub_pos == string::npos ) {
sub_pos = line.find( "build-project" );
v2 = true;
}
if ( pos != string::npos
&& line.find( '#' ) > pos )
if ( sub_pos != string::npos
&& line.find( '#' ) > sub_pos )
{
if (v2)
pos = line.find_first_not_of( " \t./", pos+13 );
sub_pos = line.find_first_not_of( " \t./", sub_pos+13 );
else
pos = line.find_first_not_of( " \t./", pos+10 );
sub_pos = line.find_first_not_of( " \t./", sub_pos+10 );
if ( pos == string::npos ) continue;
if ( sub_pos == string::npos ) continue;
string subinclude_bin_dir(
line.substr( pos, line.find_first_of( " \t", pos )-pos ) );
line.substr( sub_pos, line.find_first_of( " \t", sub_pos )-sub_pos ) );
fs::path bin_path = find_bin_path(subinclude_bin_dir);
if (!bin_path.empty())
do_rows_for_sub_tree( bin_path, results );
}
if ( ! run_tests )
{
string::size_type run_pos = line.find("run-tests");
if ( run_pos != string::npos && line.find_first_not_of(" \t") == run_pos )
run_tests = true;
}
else
{
if ( line.find(";") != string::npos )
run_tests = false;
else
{
string::size_type pos = line.find_first_not_of( " \t" );
if ( pos != string::npos && line[pos] != '#' )
{
string::size_type end_pos = line.find_first_of(" \t#", pos);
string::iterator end = end_pos != string::npos ? line.begin() + end_pos : line.end();
string run_tests_bin_dir(line.begin() + pos, end);
fs::path bin_path = find_bin_path("libs/" + run_tests_bin_dir);
if (!bin_path.empty())
do_rows_for_sub_tree( bin_path, results );
}
}
}
}
@ -789,7 +851,15 @@ const fs::path find_bin_path(const string& relative)
// - Boost.Build V2 location with top-lelve "build-dir"
// - Boost.Build V1 location without ALL_LOCATE_TARGET
string relative( fs::initial_path().string() );
relative.erase( 0, boost_root.string().size()+1 );
#ifdef BOOST_WINDOWS_API
if (relative.size() > 1 && relative[1] == ':') relative[0] = std::tolower(relative[0]);
#endif
if ( relative.find(boost_root.string()) != string::npos )
relative.erase( 0, boost_root.string().size()+1 );
else if ( relative.find(locate_root.string()) != string::npos )
relative.erase( 0, locate_root.string().size()+1 );
fs::path bin_path = find_bin_path(relative);
report << "<table border=\"1\" cellspacing=\"0\" cellpadding=\"5\">\n";
@ -799,32 +869,29 @@ const fs::path find_bin_path(const string& relative)
report << "<tr><td>Library</td><td>Test Name</td>\n"
"<td><a href=\"compiler_status.html#test-type\">Test Type</a></td>\n";
fs::directory_iterator itr( bin_path );
while ( itr != end_itr
&& ((itr->string().find( ".test" ) != (itr->string().size()-5))
|| !fs::is_directory( *itr )))
++itr; // bypass chaff
if ( itr != end_itr )
if ( relative == "status" )
{
fs::directory_iterator compiler_itr( *itr );
if ( specific_compiler.empty() )
std::clog << "Using " << itr->string() << " to determine compilers\n";
for (; compiler_itr != end_itr; ++compiler_itr )
fs::recursive_directory_iterator ritr( bin_path );
fs::recursive_directory_iterator end_ritr;
while ( ritr != end_ritr
&& ((ritr->string().find( ".test" ) != (ritr->string().size()-5))
|| !fs::is_directory( *ritr )))
++ritr; // bypass chaff
if ( ritr != end_ritr )
{
if ( fs::is_directory( *compiler_itr ) // check just to be sure
&& compiler_itr->leaf() != "test" ) // avoid strange directory (Jamfile bug?)
{
if ( specific_compiler.size() != 0
&& specific_compiler != compiler_itr->leaf() ) continue;
toolsets.push_back( compiler_itr->leaf() );
string desc( compiler_desc( compiler_itr->leaf() ) );
string vers( version_desc( compiler_itr->leaf() ) );
report << "<td>"
<< (desc.size() ? desc : compiler_itr->leaf())
<< (vers.size() ? (string( "<br>" ) + vers ) : string( "" ))
<< "</td>\n";
error_count.push_back( 0 );
}
find_compilers( *ritr );
}
}
else
{
fs::directory_iterator itr( bin_path );
while ( itr != end_itr
&& ((itr->string().find( ".test" ) != (itr->string().size()-5))
|| !fs::is_directory( *itr )))
++itr; // bypass chaff
if ( itr != end_itr )
{
find_compilers( *itr );
}
}
@ -943,7 +1010,7 @@ int cpp_main( int argc, char * argv[] ) // note name!
if ( argc == 4 )
{
fs::path links_path( argv[3], fs::native );
links_name = links_path.leaf();
links_name = links_path.filename();
links_file.open( links_path );
if ( !links_file )
{

View file

@ -130,7 +130,7 @@ namespace
if(fs::is_directory(*itr)){
std::pair<col_node::subcolumns_t::iterator, bool> result
= node.m_subcolumns.insert(
std::make_pair(itr->leaf(), col_node())
std::make_pair(itr->filename(), col_node())
);
build_node_tree(*itr, result.first->second);
}
@ -629,7 +629,7 @@ namespace
if(! fs::is_directory(*itr))
continue;
string test_name = itr->leaf();
string test_name = itr->filename();
// strip off the ".test" is there is one
string::size_type s = test_name.find( ".test" );
if(string::npos != s)
@ -728,7 +728,7 @@ namespace
throw std::string("binary path not found");
if(*leaf_itr != "libs")
--leaf_itr;
test_lib_dir.remove_leaf();
test_lib_dir.remove_filename();
}
if(leaf_itr == fs::initial_path().end())
@ -768,7 +768,7 @@ namespace
}
if(boost_root.empty())
throw std::string("boost root not found");
boost_root.remove_leaf();
boost_root.remove_filename();
}
return boost_root;
@ -888,7 +888,7 @@ int cpp_main( int argc, char * argv[] ) // note name!
if ( argc == 3 )
{
fs::path links_path( argv[2], fs::native );
links_name = links_path.leaf();
links_name = links_path.filename();
links_file.open( links_path );
if ( !links_file )
{

View file

@ -48,7 +48,7 @@ do
cd >nul test
echo $lib_name
echo >>../../../status/library_status_contents.html "<a target=\"detail\" href=\"../libs/$lib_name/test/library_status.html\">$lib_name</a><br>"
../../../tools/regression/library_test $@
../../../tools/regression/src/library_test.sh $@
cd >nul ..
fi
@ -62,7 +62,7 @@ do
cd >nul test
echo $lib_name/$sublib_name
echo >>../../../../status/library_status_contents.html "<a target=\"detail\" href=\"../libs/$lib_name/$sublib_name/test/library_status.html\">$lib_name/$sublib_name</a><br>"
../../../../tools/regression/library_test $@
../../../../tools/regression/src/library_test.sh $@
cd >nul ..
fi
cd >nul ..

View file

@ -6,6 +6,8 @@
// See http://www.boost.org/tools/regression for documentation.
#include <boost/config/warning_disable.hpp>
#include "detail/tiny_xml.hpp"
#include "boost/filesystem/operations.hpp"
#include "boost/filesystem/fstream.hpp"
@ -555,57 +557,117 @@ int main( int argc, char ** argv )
std::ios::sync_with_stdio(false);
fs::initial_path();
std::istream* input = 0;
if ( argc <= 1 )
std::cout << "Usage: bjam [bjam-args] | process_jam_log [--echo] [--create-directories] [--v1|v2] [locate-root]\n"
"locate-root - the same as the bjam ALL_LOCATE_TARGET\n"
" parameter, if any. Default is boost-root.\n"
"create-directories - if the directory for xml file doesn't exists - creates it.\n"
" usually used for processing logfile on different machine\n"
"v2 - bjam version 2 used (default).\n"
"v1 - bjam version 1 used.\n"
;
set_boost_root();
boost_root.normalize();
if ( argc > 1 && std::strcmp( argv[1], "--echo" ) == 0 )
{
echo = true;
--argc; ++argv;
std::cout << "process_jam_log [--echo] [--create-directories] [--v1|--v2]\n"
" [--boost-root boost_root] [--locate-root locate_root]\n"
" [--input-file input_file]\n"
" [locate-root]\n"
"--echo - verbose diagnostic output.\n"
"--create-directories - if the directory for xml file doesn't exists - creates it.\n"
" usually used for processing logfile on different machine\n"
"--v2 - bjam version 2 used (default).\n"
"--v1 - bjam version 1 used.\n"
"--boost-root - the root of the boost installation being used. If not defined\n"
" assume to run from within it and discover it heuristically.\n"
"--locate-root - the same as the bjam ALL_LOCATE_TARGET\n"
" parameter, if any. Default is boost-root.\n"
"--input-file - the output of a bjam --dump-tests run. Default is std input.\n"
;
return 1;
}
if (argc > 1 && std::strcmp( argv[1], "--create-directories" ) == 0 )
while ( argc > 1 )
{
create_dirs = true;
if ( std::strcmp( argv[1], "--echo" ) == 0 )
{
echo = true;
--argc; ++argv;
}
if ( argc > 1 && std::strcmp( argv[1], "--v2" ) == 0 )
{
boost_build_v2 = true;
--argc; ++argv;
}
if ( argc > 1 && std::strcmp( argv[1], "--v1" ) == 0 )
{
boost_build_v2 = false;
--argc; ++argv;
}
if (argc > 1)
{
locate_root = fs::path( argv[1], fs::native );
if ( !locate_root.is_complete() )
locate_root = ( fs::initial_path() / locate_root ).normalize();
}
else if ( std::strcmp( argv[1], "--create-directories" ) == 0 )
{
create_dirs = true;
--argc; ++argv;
}
else if ( std::strcmp( argv[1], "--v2" ) == 0 )
{
boost_build_v2 = true;
--argc; ++argv;
}
else if ( std::strcmp( argv[1], "--v1" ) == 0 )
{
boost_build_v2 = false;
--argc; ++argv;
}
else if ( std::strcmp( argv[1], "--boost-root" ) == 0 )
{
--argc; ++argv;
if ( argc == 1 )
{
std::cout << "Abort: option --boost-root requires a directory argument\n";
std::exit(1);
}
boost_root = fs::path( argv[1], fs::native );
if ( !boost_root.is_complete() )
boost_root = ( fs::initial_path() / boost_root ).normalize();
--argc; ++argv;
}
else
}
else if ( std::strcmp( argv[1], "--locate-root" ) == 0 )
{
--argc; ++argv;
if ( argc == 1 )
{
std::cout << "Abort: option --locate-root requires a directory argument\n";
std::exit(1);
}
locate_root = fs::path( argv[1], fs::native );
--argc; ++argv;
}
else if ( std::strcmp( argv[1], "--input-file" ) == 0 )
{
--argc; ++argv;
if ( argc == 1 )
{
std::cout << "Abort: option --input-file requires a filename argument\n";
std::exit(1);
}
input = new std::ifstream(argv[1]);
--argc; ++argv;
}
else if ( *argv[1] == '-' )
{
std::cout << "Abort: unknown option; invoke with no arguments to see list of valid options\n";
return 1;
}
else
{
locate_root = fs::path( argv[1], fs::native );
--argc; ++argv;
}
}
if ( boost_root.empty() )
{
locate_root = boost_root;
set_boost_root();
boost_root.normalize();
}
if ( locate_root.empty() )
{
locate_root = boost_root;
}
else if ( !locate_root.is_complete() )
{
locate_root = ( fs::initial_path() / locate_root ).normalize();
}
if ( input == 0 )
{
input = &std::cin;
}
std::cout << "boost_root: " << boost_root.string() << '\n'
@ -617,25 +679,18 @@ int main( int argc, char ** argv )
string content;
bool capture_lines = false;
std::istream* input;
if (argc > 1)
{
input = new std::ifstream(argv[1]);
}
else
{
input = &std::cin;
}
// This loop looks at lines for certain signatures, and accordingly:
// * Calls start_message() to start capturing lines. (start_message() will
// automatically call stop_message() if needed.)
// * Calls stop_message() to stop capturing lines.
// * Capture lines if line capture on.
static const int max_line_length = 8192;
int line_num = 0;
while ( std::getline( *input, line ) )
{
if (max_line_length < line.size()) line = line.substr(0, max_line_length);
++line_num;
std::vector<std::string> const line_parts( split( line ) );

View file

@ -0,0 +1,468 @@
#!/usr/bin/python
# Copyright 2008 Rene Rivera
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import re
import optparse
import time
import xml.dom.minidom
import xml.dom.pulldom
from xml.sax.saxutils import unescape, escape
import os.path
#~ Process a bjam XML log into the XML log format for Boost result processing.
class BJamLog2Results:
def __init__(self,args=None):
opt = optparse.OptionParser(
usage="%prog [options] input")
opt.add_option( '--output',
help="output file" )
opt.add_option( '--runner',
help="runner ID (e.g. 'Metacomm')" )
opt.add_option( '--comment',
help="an HTML comment file to be inserted in the reports" )
opt.add_option( '--tag',
help="the tag for the results" )
opt.add_option( '--incremental',
help="do incremental run (do not remove previous binaries)",
action='store_true' )
opt.add_option( '--platform' )
opt.add_option( '--source' )
opt.add_option( '--revision' )
self.output = None
self.runner = None
self.comment='comment.html'
self.tag='trunk'
self.incremental=False
self.platform=''
self.source='SVN'
self.revision=None
self.input = []
( _opt_, self.input ) = opt.parse_args(args,self)
if self.incremental:
run_type = 'incremental'
else:
run_type = 'full'
self.results = xml.dom.minidom.parseString('''<?xml version="1.0" encoding="UTF-8"?>
<test-run
source="%(source)s"
runner="%(runner)s"
timestamp=""
platform="%(platform)s"
tag="%(tag)s"
run-type="%(run-type)s"
revision="%(revision)s">
</test-run>
''' % {
'source' : self.source,
'runner' : self.runner,
'platform' : self.platform,
'tag' : self.tag,
'run-type' : run_type,
'revision' : self.revision,
} )
self.test = {}
self.target_to_test = {}
self.target = {}
self.parent = {}
self.log = {}
self.add_log()
self.gen_output()
#~ print self.test
#~ print self.target
def add_log(self):
if self.input[0]:
bjam_xml = self.input[0]
else:
bjam_xml = self.input[1]
events = xml.dom.pulldom.parse(bjam_xml)
context = []
test_run = self.results.documentElement
for (event,node) in events:
if event == xml.dom.pulldom.START_ELEMENT:
context.append(node)
if node.nodeType == xml.dom.Node.ELEMENT_NODE:
x_f = self.x_name_(*context)
if x_f:
events.expandNode(node)
# expanding eats the end element, hence walking us out one level
context.pop()
# call the translator, and add returned items to the result
items = (x_f[1])(node)
if items:
for item in items:
if item:
test_run.appendChild(self.results.createTextNode("\n"))
test_run.appendChild(item)
elif event == xml.dom.pulldom.END_ELEMENT:
context.pop()
#~ Add the log items nwo that we've collected all of them.
items = self.log.values()
if items:
for item in items:
if item:
test_run.appendChild(self.results.createTextNode("\n"))
test_run.appendChild(item)
def gen_output(self):
if self.output:
out = open(self.output,'w')
else:
out = sys.stdout
if out:
self.results.writexml(out,encoding='utf-8')
def tostring(self):
return self.results.toxml('utf-8')
def x_name_(self, *context, **kwargs):
node = None
names = [ ]
for c in context:
if c:
if not isinstance(c,xml.dom.Node):
suffix = '_'+c.replace('-','_').replace('#','_')
else:
suffix = '_'+c.nodeName.replace('-','_').replace('#','_')
node = c
names.append('x')
names = map(lambda x: x+suffix,names)
if node:
for name in names:
if hasattr(self,name):
return (name,getattr(self,name))
return None
def x(self, *context, **kwargs):
node = None
names = [ ]
for c in context:
if c:
if not isinstance(c,xml.dom.Node):
suffix = '_'+c.replace('-','_').replace('#','_')
else:
suffix = '_'+c.nodeName.replace('-','_').replace('#','_')
node = c
names.append('x')
names = map(lambda x: x+suffix,names)
if node:
for name in names:
if hasattr(self,name):
return getattr(self,name)(node,**kwargs)
else:
assert False, 'Unknown node type %s'%(name)
return None
#~ The timestamp goes to the corresponding attribute in the result.
def x_build_timestamp( self, node ):
test_run = self.results.documentElement
test_run.setAttribute('timestamp',self.get_data(node).strip())
return None
#~ Comment file becomes a comment node.
def x_build_comment( self, node ):
comment = None
if self.comment:
comment_f = open(self.comment)
if comment_f:
comment = comment_f.read()
comment_f.close()
if not comment:
comment = ''
return [self.new_text('comment',comment)]
#~ Tests are remembered for future reference.
def x_build_test( self, node ):
test_run = self.results.documentElement
test_node = node
test_name = test_node.getAttribute('name')
self.test[test_name] = {
'library' : '/'.join(test_name.split('/')[0:-1]),
'test-name' : test_name.split('/')[-1],
'test-type' : test_node.getAttribute('type').lower(),
'test-program' : self.get_child_data(test_node,tag='source',strip=True),
'target' : self.get_child_data(test_node,tag='target',strip=True),
'info' : self.get_child_data(test_node,tag='info',strip=True)
}
#~ Add a lookup for the test given the test target.
self.target_to_test[self.test[test_name]['target']] = test_name
#~ print "--- %s\n => %s" %(self.test[test_name]['target'],test_name)
return None
#~ Process the target dependency DAG into an ancestry tree so we can look up
#~ which top-level library and test targets specific build actions correspond to.
def x_build_targets_target( self, node ):
test_run = self.results.documentElement
target_node = node
name = self.get_child_data(target_node,tag='name',strip=True)
path = self.get_child_data(target_node,tag='path',strip=True)
jam_target = self.get_child_data(target_node,tag='jam-target',strip=True)
#~ print "--- target :: %s" %(name)
#~ Map for jam targets to virtual targets.
self.target[jam_target] = {
'name' : name,
'path' : path
}
#~ Create the ancestry.
dep_node = self.get_child(self.get_child(target_node,tag='dependencies'),tag='dependency')
while dep_node:
child = self.get_data(dep_node,strip=True)
child_jam_target = '<p%s>%s' % (path,child.split('//',1)[1])
self.parent[child_jam_target] = jam_target
#~ print "--- %s\n ^ %s" %(jam_target,child_jam_target)
dep_node = self.get_sibling(dep_node.nextSibling,tag='dependency')
return None
#~ Given a build action log, process into the corresponding test log and
#~ specific test log sub-part.
def x_build_action( self, node ):
test_run = self.results.documentElement
action_node = node
name = self.get_child(action_node,tag='name')
if name:
name = self.get_data(name)
#~ Based on the action, we decide what sub-section the log
#~ should go into.
action_type = None
if re.match('[^%]+%[^.]+[.](compile)',name):
action_type = 'compile'
elif re.match('[^%]+%[^.]+[.](link|archive)',name):
action_type = 'link'
elif re.match('[^%]+%testing[.](capture-output)',name):
action_type = 'run'
elif re.match('[^%]+%testing[.](expect-failure|expect-success)',name):
action_type = 'result'
#~ print "+ [%s] %s %s :: %s" %(action_type,name,'','')
if action_type:
#~ Get the corresponding test.
(target,test) = self.get_test(action_node,type=action_type)
#~ Skip action that have no correspoding test as they are
#~ regular build actions and don't need to show up in the
#~ regression results.
if not test:
return None
#~ And the log node, which we will add the results to.
log = self.get_log(action_node,test)
#~ print "--- [%s] %s %s :: %s" %(action_type,name,target,test)
#~ Collect some basic info about the action.
result_data = "%(info)s\n\n%(command)s\n%(output)s\n" % {
'command' : self.get_action_command(action_node,action_type),
'output' : self.get_action_output(action_node,action_type),
'info' : self.get_action_info(action_node,action_type)
}
#~ For the test result status we find the appropriate node
#~ based on the type of test. Then adjust the result status
#~ acorrdingly. This makes the result status reflect the
#~ expectation as the result pages post processing does not
#~ account for this inversion.
action_tag = action_type
if action_type == 'result':
if re.match(r'^compile',test['test-type']):
action_tag = 'compile'
elif re.match(r'^link',test['test-type']):
action_tag = 'link'
elif re.match(r'^run',test['test-type']):
action_tag = 'run'
#~ The result sub-part we will add this result to.
result_node = self.get_child(log,tag=action_tag)
if action_node.getAttribute('status') == '0':
action_result = 'succeed'
else:
action_result = 'fail'
if not result_node:
#~ If we don't have one already, create it and add the result.
result_node = self.new_text(action_tag,result_data,
result = action_result,
timestamp = action_node.getAttribute('start'))
log.appendChild(self.results.createTextNode("\n"))
log.appendChild(result_node)
else:
#~ For an existing result node we set the status to fail
#~ when any of the individual actions fail, except for result
#~ status.
if action_type != 'result':
result = result_node.getAttribute('result')
if action_node.getAttribute('status') != '0':
result = 'fail'
else:
result = action_result
result_node.setAttribute('result',result)
result_node.appendChild(self.results.createTextNode("\n"))
result_node.appendChild(self.results.createTextNode(result_data))
return None
#~ The command executed for the action. For run actions we omit the command
#~ as it's just noise.
def get_action_command( self, action_node, action_type ):
if action_type != 'run':
return self.get_child_data(action_node,tag='command')
else:
return ''
#~ The command output.
def get_action_output( self, action_node, action_type ):
return self.get_child_data(action_node,tag='output',default='')
#~ Some basic info about the action.
def get_action_info( self, action_node, action_type ):
info = ""
#~ The jam action and target.
info += "%s %s\n" %(self.get_child_data(action_node,tag='name'),
self.get_child_data(action_node,tag='path'))
#~ The timing of the action.
info += "Time: (start) %s -- (end) %s -- (user) %s -- (system) %s\n" %(
action_node.getAttribute('start'), action_node.getAttribute('end'),
action_node.getAttribute('user'), action_node.getAttribute('system'))
#~ And for compiles some context that may be hidden if using response files.
if action_type == 'compile':
define = self.get_child(self.get_child(action_node,tag='properties'),name='define')
while define:
info += "Define: %s\n" %(self.get_data(define,strip=True))
define = self.get_sibling(define.nextSibling,name='define')
return info
#~ Find the test corresponding to an action. For testing targets these
#~ are the ones pre-declared in the --dump-test option. For libraries
#~ we create a dummy test as needed.
def get_test( self, node, type = None ):
jam_target = self.get_child_data(node,tag='jam-target')
base = self.target[jam_target]['name']
target = jam_target
while target in self.parent:
target = self.parent[target]
#~ print "--- TEST: %s ==> %s" %(jam_target,target)
#~ main-target-type is a precise indicator of what the build target is
#~ proginally meant to be.
main_type = self.get_child_data(self.get_child(node,tag='properties'),
name='main-target-type',strip=True)
if main_type == 'LIB' and type:
lib = self.target[target]['name']
if not lib in self.test:
self.test[lib] = {
'library' : re.search(r'libs/([^/]+)',lib).group(1),
'test-name' : os.path.basename(lib),
'test-type' : 'lib',
'test-program' : os.path.basename(lib),
'target' : lib
}
test = self.test[lib]
else:
target_name_ = self.target[target]['name']
if self.target_to_test.has_key(target_name_):
test = self.test[self.target_to_test[target_name_]]
else:
test = None
return (base,test)
#~ Find, or create, the test-log node to add results to.
def get_log( self, node, test ):
target_directory = os.path.dirname(self.get_child_data(
node,tag='path',strip=True))
target_directory = re.sub(r'.*[/\\]bin[.]v2[/\\]','',target_directory)
target_directory = re.sub(r'[\\]','/',target_directory)
if not target_directory in self.log:
if 'info' in test and test['info'] == 'always_show_run_output':
show_run_output = 'true'
else:
show_run_output = 'false'
self.log[target_directory] = self.new_node('test-log',
library=test['library'],
test_name=test['test-name'],
test_type=test['test-type'],
test_program=test['test-program'],
toolset=self.get_toolset(node),
target_directory=target_directory,
show_run_output=show_run_output)
return self.log[target_directory]
#~ The precise toolset from the build properties.
def get_toolset( self, node ):
toolset = self.get_child_data(self.get_child(node,tag='properties'),
name='toolset',strip=True)
toolset_version = self.get_child_data(self.get_child(node,tag='properties'),
name='toolset-%s:version'%toolset,strip=True)
return '%s-%s' %(toolset,toolset_version)
#~ XML utilities...
def get_sibling( self, sibling, tag = None, id = None, name = None, type = None ):
n = sibling
while n:
found = True
if type and found:
found = found and type == n.nodeType
if tag and found:
found = found and tag == n.nodeName
if (id or name) and found:
found = found and n.nodeType == xml.dom.Node.ELEMENT_NODE
if id and found:
if n.hasAttribute('id'):
found = found and n.getAttribute('id') == id
else:
found = found and n.hasAttribute('id') and n.getAttribute('id') == id
if name and found:
found = found and n.hasAttribute('name') and n.getAttribute('name') == name
if found:
return n
n = n.nextSibling
return None
def get_child( self, root, tag = None, id = None, name = None, type = None ):
return self.get_sibling(root.firstChild,tag=tag,id=id,name=name,type=type)
def get_data( self, node, strip = False, default = None ):
data = None
if node:
data_node = None
if not data_node:
data_node = self.get_child(node,tag='#text')
if not data_node:
data_node = self.get_child(node,tag='#cdata-section')
data = ""
while data_node:
data += data_node.data
data_node = data_node.nextSibling
if data_node:
if data_node.nodeName != '#text' \
and data_node.nodeName != '#cdata-section':
data_node = None
if not data:
data = default
else:
if strip:
data = data.strip()
return data
def get_child_data( self, root, tag = None, id = None, name = None, strip = False, default = None ):
return self.get_data(self.get_child(root,tag=tag,id=id,name=name),strip=strip,default=default)
def new_node( self, tag, *child, **kwargs ):
result = self.results.createElement(tag)
for k in kwargs.keys():
if kwargs[k] != '':
if k == 'id':
result.setAttribute('id',kwargs[k])
elif k == 'klass':
result.setAttribute('class',kwargs[k])
else:
result.setAttribute(k.replace('_','-'),kwargs[k])
for c in child:
if c:
result.appendChild(c)
return result
def new_text( self, tag, data, **kwargs ):
result = self.new_node(tag,**kwargs)
data = data.strip()
if len(data) > 0:
result.appendChild(self.results.createTextNode(data))
return result
if __name__ == '__main__': BJamLog2Results()

View file

@ -26,7 +26,7 @@ repo_path = {
'trunk' : 'trunk',
'release' : 'branches/release',
'build' : 'trunk/tools/build/v2',
'jam' : 'tags/tools/jam/Boost_Jam_3_1_15/src',
'jam' : 'tags/tools/jam/Boost_Jam_3_1_17/src',
'regression' : 'trunk/tools/regression',
'boost-build.jam'
: 'trunk/boost-build.jam'
@ -84,6 +84,8 @@ class runner:
action='store_true' )
#~ Connection Options:
opt.add_option( '--ftp',
help="FTP URL to upload results to." )
opt.add_option( '--proxy',
help="HTTP proxy server address and port (e.g.'http://www.someproxy.com:3128')" )
opt.add_option( '--ftp-proxy',
@ -121,6 +123,7 @@ class runner:
self.local=None
self.force_update=False
self.have_source=False
self.ftp=None
self.proxy=None
self.ftp_proxy=None
self.dart_server=None
@ -137,7 +140,10 @@ class runner:
self.regression_root = root
self.boost_root = os.path.join( self.regression_root, 'boost' )
self.regression_results = os.path.join( self.regression_root, 'results' )
self.regression_log = os.path.join( self.regression_results, 'bjam.log' )
if self.pjl_toolset != 'python':
self.regression_log = os.path.join( self.regression_results, 'bjam.log' )
else:
self.regression_log = os.path.join( self.regression_results, 'bjam.xml' )
self.tools_bb_root = os.path.join( self.regression_root,'tools_bb' )
self.tools_bjam_root = os.path.join( self.regression_root,'tools_bjam' )
self.tools_regression_root = os.path.join( self.regression_root,'tools_regression' )
@ -147,6 +153,10 @@ class runner:
self.patch_boost = 'patch_boost.bat'
self.bjam = { 'name' : 'bjam.exe' }
self.process_jam_log = { 'name' : 'process_jam_log.exe' }
elif sys.platform == 'cygwin':
self.patch_boost = 'patch_boost'
self.bjam = { 'name' : 'bjam.exe' }
self.process_jam_log = { 'name' : 'process_jam_log.exe' }
else:
self.patch_boost = 'patch_boost'
self.bjam = { 'name' : 'bjam' }
@ -286,7 +296,8 @@ class runner:
def command_setup(self):
self.command_patch()
self.build_if_needed(self.bjam,self.bjam_toolset)
self.build_if_needed(self.process_jam_log,self.pjl_toolset)
if self.pjl_toolset != 'python':
self.build_if_needed(self.process_jam_log,self.pjl_toolset)
def command_test(self, *args):
if not args or args == None or args == []: args = [ "test", "process" ]
@ -303,9 +314,11 @@ class runner:
if "test" in args:
self.command_test_run()
self.command_test_boost_build()
if "process" in args:
self.command_test_process()
if self.pjl_toolset != 'python':
self.command_test_process()
def command_test_clean(self):
results_libs = os.path.join( self.regression_results, 'libs' )
@ -315,17 +328,44 @@ class runner:
def command_test_run(self):
self.import_utils()
test_cmd = '%s -d2 --dump-tests %s "--build-dir=%s" >>"%s" 2>&1' % (
self.bjam_cmd( self.toolsets ),
self.bjam_options,
self.regression_results,
self.regression_log )
if self.pjl_toolset != 'python':
test_cmd = '%s -d2 --dump-tests %s "--build-dir=%s" >>"%s" 2>&1' % (
self.bjam_cmd( self.toolsets ),
self.bjam_options,
self.regression_results,
self.regression_log )
else:
test_cmd = '%s -d1 --dump-tests --verbose-test %s "--build-dir=%s" "--out-xml=%s"' % (
self.bjam_cmd( self.toolsets ),
self.bjam_options,
self.regression_results,
self.regression_log )
self.log( 'Starting tests (%s)...' % test_cmd )
cd = os.getcwd()
os.chdir( os.path.join( self.boost_root, 'status' ) )
utils.system( [ test_cmd ] )
os.chdir( cd )
def command_test_boost_build(self):
self.import_utils()
self.log( 'Running Boost.Build tests' )
# Find the true names of the toolsets used for testing
toolsets = os.listdir(os.path.join(self.regression_results,
"boost/bin.v2/libs/any/test/any_test.test"));
for t in toolsets:
d = os.path.join(self.regression_results, ("boost-build-%s" % (t)))
utils.makedirs (d)
fn = os.path.join(d, "test_log.xml")
cd = os.getcwd()
try:
os.chdir (os.path.join (self.boost_root, 'tools/build/v2/test'));
bjam_path = os.path.dirname (self.tool_path( self.bjam ))
self.log( "Using bjam binary in '%s'" % (bjam_path))
os.putenv('PATH', bjam_path + os.pathsep + os.environ['PATH'])
utils.system ( [ "%s test_all.py --default-bjam --xml %s > %s" % (sys.executable, t, fn) ] )
finally:
os.chdir( cd )
def command_test_process(self):
self.import_utils()
self.log( 'Getting test case results out of "%s"...' % self.regression_log )
@ -348,18 +388,13 @@ class runner:
f.write( '<p>Tests are run on %s platform.</p>' % self.platform_name() )
f.close()
if self.incremental:
run_type = 'incremental'
else:
run_type = 'full'
source = 'tarball'
revision = ''
svn_root_file = os.path.join( self.boost_root, '.svn' )
svn_info_file = os.path.join( self.boost_root, 'svn_info.txt' )
if os.path.exists( svn_root_file ):
source = 'SVN'
self.svn_command( 'info --xml "%s" >%s' % (self.boost_root,svn_info_file) )
self.svn_command( 'info --xml "%s" >"%s"' % (self.boost_root,svn_info_file) )
if os.path.exists( svn_info_file ):
f = open( svn_info_file, 'r' )
@ -373,30 +408,70 @@ class runner:
revision += svn_info[i]
i += 1
from collect_and_upload_logs import collect_logs
collect_logs(
self.regression_results,
self.runner, self.tag, self.platform, comment_path,
self.timestamp_path,
self.user,
source, run_type,
self.dart_server, self.proxy,
revision )
if self.pjl_toolset != 'python':
from collect_and_upload_logs import collect_logs
if self.incremental:
run_type = 'incremental'
else:
run_type = 'full'
collect_logs(
self.regression_results,
self.runner, self.tag, self.platform, comment_path,
self.timestamp_path,
self.user,
source, run_type,
self.dart_server, self.proxy,
revision )
else:
from process_jam_log import BJamLog2Results
if self.incremental:
run_type = '--incremental'
else:
run_type = ''
BJamLog2Results([
'--output='+os.path.join(self.regression_results,self.runner+'.xml'),
'--runner='+self.runner,
'--comment='+comment_path,
'--tag='+self.tag,
'--platform='+self.platform,
'--source='+source,
'--revision='+revision,
run_type,
self.regression_log
])
self.compress_file(
os.path.join(self.regression_results,self.runner+'.xml'),
os.path.join(self.regression_results,self.runner+'.zip')
)
def command_upload_logs(self):
self.import_utils()
from collect_and_upload_logs import upload_logs
self.retry(
lambda:
upload_logs(
self.regression_results,
self.runner, self.tag,
self.user,
self.ftp_proxy,
self.debug_level, self.send_bjam_log,
self.timestamp_path,
self.dart_server )
)
if self.ftp:
self.retry(
lambda:
upload_logs(
self.regression_results,
self.runner, self.tag,
self.user,
self.ftp_proxy,
self.debug_level, self.send_bjam_log,
self.timestamp_path,
self.dart_server,
ftp_url = self.ftp )
)
else:
self.retry(
lambda:
upload_logs(
self.regression_results,
self.runner, self.tag,
self.user,
self.ftp_proxy,
self.debug_level, self.send_bjam_log,
self.timestamp_path,
self.dart_server )
)
def command_regression(self):
import socket
@ -418,7 +493,7 @@ class runner:
b = os.path.basename( self.local )
tag = b[ 0: b.find( '.' ) ]
self.log( 'Tag: "%s"' % tag )
self.unpack_tarball( local, self.boost_root )
self.unpack_tarball( self.local, self.boost_root )
elif self.have_source:
if not self.incremental: self.command_cleanup( [ 'bin' ] )
@ -656,6 +731,33 @@ class runner:
smtp_server.sendmail( self.mail, [ self.mail ],
'Subject: %s\nTo: %s\n\n%s' % ( subject, self.mail, msg ) )
def compress_file( self, file_path, archive_path ):
self.import_utils()
utils.log( 'Compressing "%s"...' % file_path )
try:
import zipfile
z = zipfile.ZipFile( archive_path, 'w', zipfile.ZIP_DEFLATED )
z.write( file_path, os.path.basename( file_path ) )
z.close()
utils.log( 'Done writing "%s".'% archive_path )
except Exception, msg:
utils.log( 'Warning: Compressing falied (%s)' % msg )
utils.log( ' Trying to compress using a platform-specific tool...' )
try:
import zip_cmd
except ImportError:
script_dir = os.path.dirname( os.path.abspath( sys.argv[0] ) )
utils.log( 'Could not find \'zip_cmd\' module in the script directory (%s).' % script_dir )
raise Exception( 'Compressing failed!' )
else:
if os.path.exists( archive_path ):
os.unlink( archive_path )
utils.log( 'Removing stale "%s".' % archive_path )
zip_cmd.main( file_path, archive_path )
utils.log( 'Done compressing "%s".' % archive_path )
#~ Dowloading source, from SVN...
def svn_checkout( self ):
@ -681,7 +783,7 @@ class runner:
raise Exception( 'SVN command "%s" failed with code %d' % ( cmd, rc ) )
def svn_repository_url( self, path ):
if hasattr(self,'user') and self.user is not None and self.user != 'anonymous':
if self.user != 'anonymous' and self.user != '':
return '%s%s' % (repo_root['user'],path)
else:
return '%s%s' % (repo_root['anon'],path)

View file

@ -23,7 +23,7 @@ if no_update:
root = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
print '# Running regressions in %s...' % root
script_sources = [ 'collect_and_upload_logs.py', 'regression.py' ]
script_sources = [ 'collect_and_upload_logs.py', 'process_jam_log.py', 'regression.py' ]
script_local = os.path.join(root,'tools','regression','src')
script_remote = 'http://svn.boost.org/svn/boost/trunk/tools/regression/src'
script_dir = os.path.join(root,'tools_regression_src')

View file

@ -8,6 +8,7 @@
import shutil
import codecs
import xml.sax.handler
import xml.sax.saxutils
import glob
import re
import os.path
@ -18,8 +19,6 @@ import sys
import ftplib
import utils
import runner
report_types = [ 'us', 'ds', 'ud', 'dd', 'l', 'p', 'i', 'n', 'ddr', 'dsr', 'udr', 'usr' ]
@ -484,8 +483,8 @@ def execute_tasks(
os.makedirs( merged_dir )
if not dont_collect_logs:
ftp_site = 'fx.meta-comm.com'
site_path = '/boost-regression/%s' % tag
ftp_site = 'boost.cowic.de'
site_path = '/boost/do-not-publish-this-url/results/%s' % tag
ftp_task( ftp_site, site_path, incoming_dir )

View file

@ -1,6 +1,6 @@
#!/bin/sh
#~ Copyright Redshift Software, Inc. 2007
#~ Copyright Redshift Software, Inc. 2007-2008
#~ Distributed under the Boost Software License, Version 1.0.
#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
@ -8,9 +8,9 @@ set -e
build_all()
{
update_tools ${1}
build_results ${1}
upload_results ${1}
update_tools ${1} ${2}
build_results ${1} ${2}
upload_results ${1} ${2}
}
update_tools()
@ -93,8 +93,25 @@ build_results()
root=`pwd`
boost=${cwd}/boost
case ${1} in
trunk) tag=trunk ;;
release) tag=branches/release ;;
trunk)
tag=trunk
reports="dd,ds,i,n"
;;
release)
tag=branches/release
reports="dd,ds,i,n"
;;
release-1_35_0)
tag=tags/release/Boost_1_35_0
reports="dd,ud,ds,us,ddr,udr,dsr,usr,i,n,e"
;;
release-1_36_0)
tag=tags/release/Boost_1_36_0
reports="dd,ud,ds,us,ddr,udr,dsr,usr,i,n,e"
;;
esac
report_info
python "${boost}/tools/regression/xsl_reports/boost_wide_report.py" \
@ -104,20 +121,26 @@ build_results()
--failures-markup="${boost}/status/explicit-failures-markup.xml" \
--comment="comment.html" \
--user="" \
--reports="i,dd,ds,n"
--reports=${reports}
cd "${cwd}"
}
upload_results()
{
cwd=`pwd`
upload_dir=/home/grafik/www.boost.org/testing
cd ${1}/all
rm -f ../../${1}.zip*
zip -r -9 ../../${1} * -x '*.xml'
#~ zip -q -r -9 ../../${1} * -x '*.xml'
7za a -tzip -mx=9 ../../${1}.zip * '-x!*.xml'
cd "${cwd}"
bzip2 -9 ${1}.zip
scp ${1}.zip.bz2 grafik@beta.boost.org:/home/grafik/www.boost.org/testing/incoming/
ssh grafik@beta.boost.org bunzip2 /home/grafik/www.boost.org/testing/incoming/${1}.zip.bz2
mv ${1}.zip ${1}.zip.uploading
rsync -vuz --rsh=ssh --stats \
${1}.zip.uploading grafik@beta.boost.org:/${upload_dir}/incoming/
ssh grafik@beta.boost.org \
cp ${upload_dir}/incoming/${1}.zip.uploading ${upload_dir}/live/${1}.zip
mv ${1}.zip.uploading ${1}.zip
}
build_all ${1}
build_all ${1} ${2}

View file

@ -16,6 +16,7 @@ import sys
report_author = "Douglas Gregor <dgregor@osl.iu.edu>"
boost_dev_list = "Boost Developer List <boost@lists.boost.org>"
boost_testing_list = "Boost Testing List <boost-testing@lists.boost.org>"
def sorted_keys( dict ):
result = dict.keys()
@ -30,6 +31,7 @@ class Platform:
def __init__(self, name):
self.name = name
self.failures = list()
self.maintainers = list()
return
def addFailure(self, failure):
@ -39,6 +41,13 @@ class Platform:
def isBroken(self):
return len(self.failures) > 300
def addMaintainer(self, maintainer):
"""
Add a new maintainer for this platform.
"""
self.maintainers.append(maintainer)
return
class Failure:
"""
A single test case failure in the report.
@ -200,6 +209,67 @@ There are failures in these libraries you maintain:
return message
class PlatformMaintainer:
"""
Information about the platform maintainer of a library
"""
def __init__(self, name, email):
self.name = name
self.email = email
self.platforms = list()
return
def addPlatform(self, runner, platform):
self.platforms.append(platform)
return
def composeEmail(self, report):
"""
Composes an e-mail to this platform maintainer if one or more of
the platforms s/he maintains has a large number of failures.
Returns the e-mail text if a message needs to be sent, or None
otherwise.
"""
# Determine if we need to send a message to this developer.
requires_message = False
for platform in self.platforms:
if platform.isBroken():
requires_message = True
break
if not requires_message:
return None
# Build the message header
message = """From: Douglas Gregor <dgregor@osl.iu.edu>
To: """
message += self.name + ' <' + self.email + '>'
message += """
Reply-To: boost@lists.boost.org
Subject: Large number of Boost failures on a platform you maintain as of """
message += str(datetime.date.today()) + " [" + report.branch + "]"
message += """
You are receiving this report because one or more of the testing
platforms that you maintain has a large number of Boost failures that
are not accounted for. A full version of the report is sent to the
Boost developer's mailing list.
Detailed report:
"""
message += ' ' + report.url + """
The following platforms have a large number of failures:
"""
for platform in self.platforms:
if platform.isBroken():
message += (' ' + platform.name + ' ('
+ str(len(platform.failures)) + ' failures)\n')
return message
class Report:
"""
The complete report of all failing test cases.
@ -211,6 +281,7 @@ class Report:
self.libraries = dict()
self.platforms = dict()
self.maintainers = dict()
self.platform_maintainers = dict()
return
def getPlatform(self, name):
@ -233,6 +304,17 @@ class Report:
self.maintainers[name] = Maintainer(name, email)
return self.maintainers[name]
def getPlatformMaintainer(self, name, email):
"""
Retrieve the platform maintainer with the given name and
e-mail address.
"""
if self.platform_maintainers.has_key(name):
return self.platform_maintainers[name]
else:
self.platform_maintainers[name] = PlatformMaintainer(name, email)
return self.platform_maintainers[name]
def parseIssuesEmail(self):
"""
Try to parse the issues e-mail file. Returns True if everything was
@ -317,7 +399,7 @@ class Report:
time.sleep (30)
return False
# Parses the file $BOOST_ROOT/libs/maintainers.txt to create a hash
# mapping from the library name to the list of maintainers.
def parseLibraryMaintainersFile(self):
@ -329,6 +411,8 @@ class Report:
name_email_regex = re.compile('\s*(\w*(\s*\w+)+)\s*<\s*(\S*(\s*\S+)+)\S*>')
at_regex = re.compile('\s*-\s*at\s*-\s*')
for line in file('../../../libs/maintainers.txt', 'r'):
if line.startswith('#'):
continue
m = lib_maintainer_regex.match (line)
if m:
libname = m.group(1)
@ -350,6 +434,41 @@ class Report:
pass
pass
# Parses the file $BOOST_ROOT/libs/platform_maintainers.txt to
# create a hash mapping from the platform name to the list of
# maintainers.
def parsePlatformMaintainersFile(self):
"""
Parse the platform maintainers file in
../../../libs/platform_maintainers.txt to collect information
about the maintainers of the various platforms.
"""
platform_maintainer_regex = re.compile('([A-Za-z0-9_.-]*|"[^"]*")\s+(\S+)\s+(.*)')
name_email_regex = re.compile('\s*(\w*(\s*\w+)+)\s*<\s*(\S*(\s*\S+)+)\S*>')
at_regex = re.compile('\s*-\s*at\s*-\s*')
for line in file('../../../libs/platform_maintainers.txt', 'r'):
if line.startswith('#'):
continue
m = platform_maintainer_regex.match (line)
if m:
platformname = m.group(2)
if self.platforms.has_key(platformname):
platform = self.platforms[platformname]
for person in re.split('\s*,\s*', m.group(3)):
nmm = name_email_regex.match(person)
if nmm:
name = nmm.group(1)
email = nmm.group(3)
email = at_regex.sub('@', email)
maintainer = self.getPlatformMaintainer(name, email)
maintainer.addPlatform(m.group(1), platform)
platform.addMaintainer(maintainer)
pass
pass
pass
pass
pass
def numFailures(self):
count = 0
for library in self.libraries:
@ -374,6 +493,8 @@ To: boost@lists.boost.org
Reply-To: boost@lists.boost.org
Subject: [Report] """
message += str(self.numFailures()) + " failures on " + branch
if branch != 'trunk':
message += ' branch'
message += " (" + str(datetime.date.today()) + ")"
message += """
@ -381,7 +502,7 @@ Boost regression test failures
"""
message += "Report time: " + self.date + """
This report lists all regression test failures on release platforms.
This report lists all regression test failures on high-priority platforms.
Detailed report:
"""
@ -399,54 +520,107 @@ Detailed report:
"""
for platform in sorted_keys( self.platforms ):
if self.platforms[platform].isBroken():
message += ' ' + platform + '\n'
message += (' ' + platform + ' ('
+ str(len(self.platforms[platform].failures))
+ ' failures)\n')
message += '\n'
message += """
Failures on these "broken" platforms will be omitted from the results below.
Please see the full report for information about these failures.
"""
# Display the number of failures
message += (str(self.numFailures()) + ' failures in ' +
message += (str(self.numReportableFailures()) + ' failures in ' +
str(len(self.libraries)) + ' libraries')
if any_broken_platforms:
message += ' (' + str(self.numReportableFailures()) + ' are from non-broken platforms)'
message += (' (plus ' + str(self.numFailures() - self.numReportableFailures())
+ ' from broken platforms)')
message += '\n'
# Display the number of failures per library
for k in sorted_keys( self.libraries ):
library = self.libraries[k]
num_failures = library.numFailures()
message += (' ' + library.name + ' ('
+ str(library.numReportableFailures()))
message += ' ' + library.name + ' ('
if library.numReportableFailures() > 0:
message += (str(library.numReportableFailures())
+ " failures")
if library.numReportableFailures() < num_failures:
message += (' of ' + str(num_failures)
+ ' failures are from non-broken platforms')
if library.numReportableFailures() > 0:
message += ', plus '
message += (str(num_failures-library.numReportableFailures())
+ ' failures on broken platforms')
message += ')\n'
pass
# If we have any broken platforms, tell the user how we're
# displaying them.
if any_broken_platforms:
message += """
Test failures marked with a (*) represent tests that failed on
platforms that are considered broken. They are likely caused by
misconfiguration by the regression tester or a failure in a core
library such as Test or Config."""
message += '\n'
# Provide the details for the failures in each library.
for k in sorted_keys( self.libraries ):
library = self.libraries[k]
message += '\n|' + library.name + '|\n'
for test in library.tests:
message += ' ' + test.name + ':'
for failure in test.failures:
platform = failure.platform
message += ' ' + platform.name
if platform.isBroken():
message += '*'
pass
message += '\n'
pass
pass
if library.numReportableFailures() > 0:
message += '\n|' + library.name + '|\n'
for test in library.tests:
if test.numReportableFailures() > 0:
message += ' ' + test.name + ':'
for failure in test.failures:
platform = failure.platform
if not platform.isBroken():
message += ' ' + platform.name
message += '\n'
return message
def composeTestingSummaryEmail(self):
"""
Compose a message to send to the Boost Testing list. Returns
the message text if a message is needed, returns None
otherwise.
"""
brokenPlatforms = 0
for platform in sorted_keys( self.platforms ):
if self.platforms[platform].isBroken():
brokenPlatforms = brokenPlatforms + 1
if brokenPlatforms == 0:
return None;
message = """From: Douglas Gregor <dgregor@osl.iu.edu>
To: boost-testing@lists.boost.org
Reply-To: boost-testing@lists.boost.org
Subject: [Report] """
message += str(brokenPlatforms) + " potentially broken platforms on " + branch
if branch != 'trunk':
message += ' branch'
message += " (" + str(datetime.date.today()) + ")"
message += """
Potentially broken platforms for Boost regression testing
"""
message += "Report time: " + self.date + """
This report lists the high-priority platforms that are exhibiting a
large number of regression test failures, which might indicate a problem
with the test machines or testing harness.
Detailed report:
"""
message += ' ' + self.url + '\n'
message += """
Platforms with a large number of failures:
"""
for platform in sorted_keys( self.platforms ):
if self.platforms[platform].isBroken():
message += (' ' + platform + ' ('
+ str(len(self.platforms[platform].failures))
+ ' failures)\n')
return message
@ -594,7 +768,9 @@ if not okay:
# Try to parse maintainers information
report.parseLibraryMaintainersFile()
report.parsePlatformMaintainersFile()
# Generate individualized e-mail for library maintainers
for maintainer_name in report.maintainers:
maintainer = report.maintainers[maintainer_name]
@ -613,7 +789,27 @@ for maintainer_name in report.maintainers:
if '--debug' in sys.argv:
print ('Message text for ' + maintainer.name + ':\n')
print email
# Generate individualized e-mail for platform maintainers
for maintainer_name in report.platform_maintainers:
maintainer = report.platform_maintainers[maintainer_name]
email = maintainer.composeEmail(report)
if email:
if '--send' in sys.argv:
print ('Sending notification email to ' + maintainer.name + '...')
smtp = smtplib.SMTP('milliways.osl.iu.edu')
smtp.sendmail(from_addr = report_author,
to_addrs = maintainer.email,
msg = email)
print 'done.\n'
else:
print 'Would send a notification e-mail to',maintainer.name
if '--debug' in sys.argv:
print ('Message text for ' + maintainer.name + ':\n')
print email
email = report.composeSummaryEmail()
if '--send' in sys.argv:
print 'Sending summary email to Boost developer list...'
@ -626,6 +822,19 @@ if '--debug' in sys.argv:
print 'Message text for summary:\n'
print email
email = report.composeTestingSummaryEmail()
if email:
if '--send' in sys.argv:
print 'Sending summary email to Boost testing list...'
smtp = smtplib.SMTP('milliways.osl.iu.edu')
smtp.sendmail(from_addr = report_author,
to_addrs = boost_testing_list,
msg = email)
print 'done.\n'
if '--debug' in sys.argv:
print 'Message text for testing summary:\n'
print email
if not ('--send' in sys.argv):
print 'Chickening out and not sending any e-mail.'
print 'Use --send to actually send e-mail, --debug to see e-mails.'

View file

@ -226,7 +226,7 @@ Report time: </xsl:text>
This report lists all regression test failures on release platforms.
Detailed report:
http://boost.org/regression/</xsl:text>
http://beta.boost.org/development/tests/</xsl:text>
<xsl:value-of select="$source"/>
<xsl:text>/developer/issues.html