Merged tools, system, filesystem from trunk at 41106

[SVN r41109]
This commit is contained in:
Beman Dawes 2007-11-15 16:31:06 +00:00
commit f98b9cbb5a
117 changed files with 7143 additions and 4440 deletions

8
.gitmodules vendored
View file

@ -1,3 +1,7 @@
[submodule "system"]
path = libs/system
url = ../system.git
fetchRecurseSubmodules = on-demand
[submodule "multi_array"]
path = libs/multi_array
url = ../multi_array.git
@ -282,3 +286,7 @@
path = libs/pool
url = ../pool.git
fetchRecurseSubmodules = on-demand
[submodule "litre"]
path = tools/litre
url = ../litre.git
fetchRecurseSubmodules = on-demand

@ -1 +1 @@
Subproject commit 3dc88e3cfd8a0aa11e967c0f8e8cd6ed4aa4b153
Subproject commit fa9c7b59f826b2e423570662af77cf91a8c2778a

1
libs/system Submodule

@ -0,0 +1 @@
Subproject commit bc0ea8a494c5fb855e1897b75783773d7e0b81e4

@ -1 +1 @@
Subproject commit 733cd588bffb0b5f360ef7d4f643632db03de114
Subproject commit 31e219acfc8382338dd0075f470b82b99d16b31d

@ -1 +1 @@
Subproject commit af596bfca076451166e4f9a3af7866c3f1b9bda0
Subproject commit 040634e05ca763d91f6cc8e3c6bde9998630e8f6

@ -1 +1 @@
Subproject commit aaf484f4aa2dc129d5b1abfb361cfab010bc02a0
Subproject commit 924c85428613fa22f880f4182f000de181e5edc5

@ -1 +1 @@
Subproject commit 5af6d49b664b591c8cf97c66c77446a354bd3835
Subproject commit 8f3a7c6a0df93e298bbb92c2b291b380c57d495b

View file

@ -0,0 +1,82 @@
c = BuildmasterConfig = {}
####### BUILDSLAVES
c['slavePortnum'] = 9091
c['bots'] = []
c['bots'].append( ('linux-x86-rsi-droid', 'boost1234') )
####### CHANGESOURCES
from buildbot.changes.svnpoller import SVNPoller
from buildbot.changes.pb import PBChangeSource
c['sources'] = []
#~ c['sources'].append(SVNPoller(
#~ 'http://svn.boost.org/svn/boost/trunk'
#~ ))
c['sources'].append(PBChangeSource())
####### SCHEDULERS
from buildbot.scheduler import AnyBranchScheduler
c['schedulers'] = []
c['schedulers'].append(AnyBranchScheduler(
name = 'testing',
branches = ['trunk'],
treeStableTimer = 2*60,
builderNames = [
'Linux x86 Alpha'
]
))
####### BUILDERS
from boost.bot.factory import Boost_BuildFactory, action
c['builders'] = []
c['builders'].append({
'name': 'Linux x86 Alpha',
'slavename': 'linux-x86-rsi-droid',
'builddir': 'Linux-x86-Alpha',
'factory': Boost_BuildFactory(
action('svn',root='http://svn.boost.org/svn/boost/'),
action('bjam_build'),
action('test_tools_build', toolset='gcc'),
action('btest_all',
toolset=['gcc-4.1.2~linux~x86','gcc-4.2.0~linux~x86'],
options=['-sTEST_BOOST_VERSION=HEAD','-j2']
)
)
})
####### STATUS TARGETS
from buildbot.status import html
from buildbot.status import words
c['status'] = []
c['status'].append(html.Waterfall(
http_port=9090
))
c['status'].append(words.IRC(
host="irc.freenode.net",
nick="buildbot_alpha",
channels=["#boost"]
))
####### DEBUGGING OPTIONS
####### PROJECT IDENTITY
c['projectName'] = "Boost"
c['projectURL'] = "http://boost.org/"
c['buildbotURL'] = "http://droid.borg.redshift-software.com:9090/"

View file

@ -0,0 +1,4 @@
#~ Copyright Redshift Software, Inc. 2006
#~ Distributed under the Boost Software License, Version 1.0.
#~ (See accompanying file LICENSE_1_0.txt or copy at
#~ http://www.boost.org/LICENSE_1_0.txt)

View file

@ -0,0 +1,9 @@
# Copyright Redshift Software, Inc. 2005-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
modified = '$Date: 2007-05-09 10:49:32 -0500 (Wed, 09 May 2007) $'
revision = '$Revision: 37651 $'

View file

@ -0,0 +1,19 @@
# Copyright Redshift Software, Inc. 2005-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import string
def chr_or_question_mark( c ):
if chr(c) in string.printable and c < 128 and c not in ( 0x09, 0x0b, 0x0c ):
return chr(c)
else:
return '?'
char_translation_table = string.maketrans(
''.join( map( chr, range(0, 256) ) )
, ''.join( map( chr_or_question_mark, range(0, 256) ) )
)

View file

@ -0,0 +1,295 @@
# Copyright Redshift Software, Inc. 2005-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import boost.bot.step
import buildbot
import buildbot.process.base
import buildbot.process.factory
import buildbot.process.buildstep
import buildbot.steps.source
import os.path
import re
import string
import time
import twisted.python
import types
import urllib
def action(_action,*_args,**_kwargs):
_args = _args or []
_kwargs = _kwargs or {}
return (_action,_args,_kwargs)
def defaults(_defaults = {},**_kwargs):
_defaults.update({
'haltOnFailure': _kwargs.get('haltOnFailure',False),
'flunkOnWarnings': _kwargs.get('flunkOnWarnings',False),
'flunkOnFailure': _kwargs.get('flunkOnFailure',True),
'warnOnWarnings': _kwargs.get('warnOnWarnings',False),
'warnOnFailure': _kwargs.get('warnOnFailure',False),
'timeout': _kwargs.get('timeout',30*60)
})
return _defaults
def s(steptype, **kwargs):
return (steptype, kwargs)
class Boost_BuildFactory(buildbot.process.factory.BuildFactory):
def __init__(self, *actions, **args):
buildbot.process.factory.BuildFactory.__init__(self)
self.actions = actions or []
self.options = args or {}
#~ --
self.steps = []
self.treeStableTimer = 5*60
self.buildClass = Boost_Build
def newBuild(self,request):
b = buildbot.process.factory.BuildFactory.newBuild(self,request)
b.setOptions(self.options)
steps = []
files = []
for (_action,_args,_kwargs) in self.actions:
action_call = getattr(self,'action_%s' % _action,None)
if callable(action_call):
for k in _kwargs.keys():
if _kwargs[k] == None: del _kwargs[k]
_kwargs.update(self.options)
(action_steps,action_files) = action_call(b,*_args,**_kwargs)
steps = steps + action_steps
files = files + action_files
b.important_files = files
b.setSteps(steps)
return b
def action_cvs(self,b,*args,**kwargs):
opt = {
'cvsmodule' : kwargs.get('module',"boost"),
'global_options' : ["-z9"],
'mode' : kwargs.get('mode',"copy"),
'branch' : kwargs.get('branch','HEAD'),
'cvsroot' : kwargs.get('root')
}
if kwargs.has_key('passwd'):
opt['login'] = kwargs['passwd'] or ""
opt.update(defaults(**kwargs))
return (
[ s(buildbot.steps.source.CVS,**opt) ],
kwargs.get('files',[".*"]) )
def action_svn(self,b,*args,**kwargs):
opt = {
'mode' : kwargs.get('mode',"update"),
'defaultBranch' : kwargs.get('branch','trunk'),
'baseURL' : kwargs.get('root')
}
opt.update(defaults(**kwargs))
return (
[ s(buildbot.steps.source.SVN,**opt) ],
kwargs.get('files',[".*"]) )
def action_tarball(self,b,*args,**kwargs):
return (
[ s( boost.bot.step.Tarball
,description = kwargs.get('description')
,archive = kwargs.get('archive',b.workdir)
,publishdir = kwargs['publishdir']
,branch = kwargs.get('branch','HEAD')
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
def action_selfupdate(self,b,*args,**kwargs):
return (
[ s( boost.bot.step.SelfUpdate
,description = kwargs.get('description')
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
def action_bjam_build(self,b,*args,**kwargs):
return (
[ s( boost.bot.step.Boost_Jam_Build
,description = kwargs.get('description')
,workdir = b.workdir
,jam_src = kwargs.get('jam_src','tools/jam/src')
,toolset = kwargs.get('toolset',None)
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
def action_bjam(self,b,*args,**kwargs):
return (
[ s( boost.bot.step.Boost_Jam
,description = kwargs.get('description')
,workdir = b.workdir
,bjam = kwargs.get('bjam','tools/jam/src/bin/bjam')
,project = kwargs.get('project','.')
,options = kwargs.get('options',[])
,target = kwargs.get('target')
,locate = kwargs.get('locate','results')
,env = kwargs.get('env',{})
,logfile = kwargs.get('logfile',False)
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
def action_test_tools_build(self,b,*args,**kwargs):
return self.action_bjam( b
,description = kwargs.get('description',['test tools','build'])
,project = 'tools/regression/build'
,options = [
'toolset=%s' % kwargs['toolset']
] + kwargs.get('options',[])
,target = 'release'
,locate = kwargs.get('locate','results')
,env = kwargs.get('env',{})
,**defaults(**kwargs)
)
#~ def action_btest(self,b,*args,**kwargs):
#~ return (
#~ [ s( boost.bot.step.Boost_Test
#~ ,description = kwargs.get('description')
#~ ,workdir = b.workdir
#~ ,tests = kwargs.get('tests',['.*'])
#~ ,bjam = kwargs.get('bjam','tools/jam/src/bin/bjam')
#~ ,project = kwargs.get('project','status')
#~ ,options = kwargs.get('options',[
#~ '--dump-tests',
#~ '--dump-test-targets',
#~ '-sBUILD=%s' % kwargs.get('build','debug'),
#~ '-sTOOLS=%s' % kwargs['toolset']
#~ ] + kwargs.get('options',[]))
#~ ,target = 'nothing'
#~ ,locate = kwargs.get('locate','results')
#~ ,env = kwargs.get('env',{})
#~ ,logfile = kwargs.get('logfile','bjam.log')
#~ ,**defaults(**kwargs)
#~ ) ],
#~ kwargs.get('files',[]) )
def action_btest_all(self,b,*args,**kwargs):
return self.action_bjam( b
,description = kwargs.get('description',['btest','all'])
,project = kwargs.get('project','status')
,options = [
'--dump-tests',
#~ '-d2',
'-n',
kwargs.get('build','debug')
]
+ kwargs['toolset']
+ kwargs.get('options',[])
,locate = kwargs.get('locate','results')
,env = kwargs.get('env',{})
,logfile = kwargs.get('logfile','bjam.log')
,files = kwargs.get('files',['boost.*','libs.*','status.*'])
,**defaults(**kwargs)
)
def action_process_jam_log(self,b,*args,**kwargs):
return (
[ s( boost.bot.step.Boost_Process_Jam_Log
,description = kwargs.get('description',['process log'])
,workdir = b.workdir
,projcess_jam_log = kwargs.get('projcess_jam_log','dist/bin/process_jam_log')
,locate = kwargs.get('locate','results')
,logfile = kwargs.get('logfile','bjam.log')
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
def action_collect_results(self,b,*args,**kwargs):
return (
[ s( boost.bot.step.Boost_Collect_Results
,description = kwargs.get('description')
,workdir = b.workdir
,locate = kwargs.get('locate',b.options.get('locate','results'))
,runner = kwargs['runner']
,branch = kwargs['branch']
,source_type = kwargs['source_type']
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
def action_publish_results(self,b,*args,**kwargs):
return (
[ s( boost.bot.step.Boost_Publish_Results
,description = kwargs.get('description')
,workdir = b.workdir
,locate = kwargs.get('locate',b.options.get('locate','results'))
,runner = kwargs['runner']
,branch = kwargs['branch']
,source_type = kwargs['source_type']
,publish_location = kwargs['publish_location']
,proxy = kwargs.get('proxy')
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
class Boost_Build(buildbot.process.base.Build):
def __init__(self,requests):
buildbot.process.base.Build.__init__(self,requests)
self.important_files = []
self.important_re = None
#~ def isFileImportant(self, filename):
#~ if self.important_re == None:
#~ self.important_re = []
#~ for file in self.important_files:
#~ self.important_re.append(re.compile(file))
#~ for file_re in self.important_re:
#~ if file_re.search(filename):
#~ return 1;
#~ return 0
def setOptions(self,options = {}):
self.options = options or {}
self.workdir = self.options.get('workdir','build')
#~ def setupBuild(self, expectations):
#~ # Hack the stamp as an allowed arg for steps.
#~ if 'stamp' not in buildbot.process.buildstep.BuildStep.parms:
#~ buildbot.process.buildstep.BuildStep.parms.append('stamp')
#~ return buildbot.process.base.Build.setupBuild(self,expectations)
#~ def getNextStep(self):
#~ s = buildbot.process.base.Build.getNextStep(self)
#~ if s:
#~ # Add a stamp arg for the steps to use as needed.
#~ stamp = self._get_stamp()
#~ s.stamp = stamp
#~ if hasattr(s,'cmd'):
#~ if hasattr(s.cmd,'args'):
#~ s.cmd.args.update( { 'stamp' : stamp } )
#~ return s
#~ def _get_stamp(self):
#~ # The default is to use the revision sequence as the "time".
#~ # If not available, because of a forced build for example, we
#~ # use the current time.
#~ stamp = time.strftime( '%Y-%m-%dT%H:%M:%S', time.gmtime() )
#~ revision, patch = self.getSourceStamp()
#~ if not revision:
#~ changes = self.allChanges()
#~ if changes:
#~ last_change_time = max([c.when for c in changes])
#~ last_change_revision = max([c.revision for c in changes])
#~ # Prefer using the revision change if present. If it's not
#~ # it's likely a CVS like time sequence, so use the time sequence
#~ # int that case (adjusted with the tree timer).
#~ if last_change_revision:
#~ stamp = last_change_revision
#~ else:
#~ stamp = time.strftime( '%Y-%m-%dT%H:%M:%S',
#~ time.gmtime(last_change_time + self.treeStableTimer / 2) )
#~ return stamp

View file

@ -0,0 +1,528 @@
# Copyright Redshift Software, Inc. 2005-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import boost.bot.char_translation_table
import ftplib
import platform
import re
import os
import os.path
import shutil
import string
import sys
import tarfile
import urlparse
import xml.sax.saxutils
import zipfile
from buildbot.slave.commands import Command, AbandonChain, ShellCommand
from buildbot.slave.registry import registerSlaveCommand
from twisted.internet import reactor, defer
from twisted.python import failure, log, runtime
_ver = '$Revision: 37651 $'[1+len("Revision: "):-2]
class LoggedShellCommand(ShellCommand):
def __init__(self, builder, command, workdir, **kwargs):
ShellCommand.__init__(self,builder,command,workdir
,environ = kwargs.get('environ',{})
,sendStdout = kwargs.get('sendStdout',True)
,sendStderr = kwargs.get('sendStderr',True)
,sendRC = kwargs.get('sendRC',True)
,timeout = kwargs.get('timeout',None)
,initialStdin = kwargs.get('stdin',kwargs.get('initialStdin',None))
,keepStdinOpen = kwargs.get('keepStdinOpen',False)
,keepStdout = kwargs.get('keepStdout',False)
)
self.logfile = None
logfile = kwargs.get('logfile')
if logfile:
logdir = os.path.dirname(logfile)
if not os.path.exists(logdir):
os.makedirs(logdir)
if kwargs.get('appendToLog',False) and os.path.exists(logfile):
self.logfile = file(logfile,"a")
else:
self.logfile = file(logfile,"w")
def addStdout(self, data):
ShellCommand.addStdout(self,data)
if self.logfile: self.logfile.write(data)
def addStdout(self, data):
ShellCommand.addStdout(self,data)
if self.logfile: self.logfile.write(data)
def finished(self, sig, rc):
if self.logfile: self.logfile.close()
ShellCommand.finished(self,sig,rc)
def c(callback, *args, **kwargs):
args = args or []
kwargs = kwargs or {}
return (callback,args,kwargs)
class NoOpCommand(Command):
def start(self):
return self._start("noop",c(self.doNoOp))
def doNoOp(self):
self.stdout("do noop")
return 0
def stdout(self, message):
self.sendStatus({'stdout': message+"\n"})
def interrupt(self):
self.interrupted = True
def _start(self, name, *callbacks):
d = defer.Deferred()
self.stdout("starting %s operation" % name)
self.name = name
self.command = None
for call,args,kwargs in callbacks:
d.addCallbacks(self._do_call,None,[call]+args,kwargs)
d.addCallback(self._result_check)
d.addCallbacks(self._success,self._failure)
reactor.callLater(2,d.callback,0)
return d
def _do_call(self, rc, call, *args, **kwargs):
return call(*args,**kwargs)
def _result_check(self, rc):
if self.interrupted:
raise AbandonChain(-1)
if rc != 0:
raise AbandonChain(rc)
return 0
def _success(self, rc):
self.sendStatus({'rc': 0})
return None
def _failure(self, fail):
fail.trap(AbandonChain)
self.sendStatus({'rc': fail.value.args[0]})
return None
registerSlaveCommand("noop", NoOpCommand, _ver)
class SelfUpdateCommand(NoOpCommand):
def start(self):
return self._start("selfupdate",c(self.doUpdateCommandRegistry))
def doUpdateCommandRegistry(self):
import buildbot.slave.registry
import buildbot.slave.commands
import boost.buildbot.remote
self.stdout("updating command registry")
reload(buildbot.slave.registry)
self.stdout("reloading standard commands")
reload(buildbot.slave.commands)
self.stdout("reloading boost commands")
reload(boost.buildbot.remote)
self.stdout("command registry update complete")
self.stdout("commands:")
for name, (factory, version) in buildbot.slave.registry.commandRegistry.items():
self.stdout(" %s (%s)" % (name,version))
return 0
registerSlaveCommand("selfupdate", SelfUpdateCommand, _ver)
class TarballCommand(NoOpCommand):
def start(self):
stamp = self.args.get('stamp','')
stamp = stamp.replace(' ','-')
stamp = stamp.replace(':','_')
archive_stamped = os.path.normpath(os.path.join(self.builder.basedir,
"%s-%s-%s" % (self.args['archive'],self.args.get('branch','X'),stamp)))
return self._start( "tarball",
c( self.doCleanRepository,
repository = os.path.normpath(os.path.join(self.builder.basedir, self.args['workdir'])) ),
c( self.doArchive,
source = os.path.normpath(os.path.join(self.builder.basedir, self.args['workdir'])),
archive = archive_stamped ),
c( self.doPublish,
archive = archive_stamped,
publishdir = os.path.normpath(self.args['publishdir']) ) )
def doCleanRepository(self,*args,**kwargs):
self.stdout("cleaning repository at %s..." % kwargs['repository'])
self._clean_r(kwargs['repository'])
return 0
def doArchive(self,*args,**kwargs):
source_path = kwargs['source']
archive_path = "%s.tar.bz2" % kwargs['archive']
archive_dir = os.path.basename( kwargs['archive'] )
self.stdout("creating archive %s for %s" % ( archive_path, source_path ))
previous_umask = os.umask(0022)
tar = tarfile.open(archive_path, 'w:bz2')
#~ Disabling posix allows for longer names and hence deeper directories.
tar.Posix = False
tar.add(source_path, archive_dir)
tar.close()
os.umask(previous_umask)
return 0
def doPublish(self,*args,**kwargs):
archive_path = "%s.tar.bz2" % kwargs['archive']
self.stdout("publishing archive %s to %s" % ( archive_path, kwargs['publishdir'] ))
previous_umask = os.umask(0022)
try:
os.makedirs(kwargs['publishdir'],0755)
except:
pass
#~ shutil.move is available on py2.3, consider copy/rename implementation to
#~ support py2.2. Or possibly do an external async "mv" command.
shutil.move(archive_path,kwargs['publishdir'])
self._clean_archives( kwargs['publishdir'], '[^\.]+\.tar\.bz2',
( os.path.basename(archive_path) ) )
os.umask(previous_umask)
return 0
def _clean_r(self,dir):
names = os.listdir(dir)
names.sort()
for name in names:
entry = os.path.join(dir,name)
if name == 'CVS':
self.stdout("[REMOVE] %s" % entry)
shutil.rmtree( entry )
elif os.path.isdir(entry):
self._clean_r(entry)
def _clean_archives(self,dir,m,exclude):
m_re = re.compile(m)
names = os.listdir(dir)
names.sort()
for name in names:
if m_re.search(name) and name not in exclude:
entry = os.path.join(dir,name)
self.stdout("[REMOVE] %s" % entry)
os.remove( entry )
registerSlaveCommand("tarball", TarballCommand, _ver)
class Command_Boost_Jam_Build(NoOpCommand):
def start(self):
return self._start( "boost.bjam.build",
c( self.doBJamBuild,
jam_src = os.path.normpath(os.path.join(
self.builder.basedir, self.args['workdir'], self.args['jam_src'])),
toolset = self.args.get('toolset',None),
timeout = self.args.get('timeout',60*5))
)
def doBJamBuild(self,*args,**kwargs):
self.stdout("building bjam at %s..." % kwargs['jam_src'])
if runtime.platformType != 'posix':
command = [ '.\build.bat' ]
else:
command = [ 'sh', './build.sh' ]
if kwargs['toolset']:
command.append(kwargs['toolset'])
self.command = ShellCommand(self.builder, command,
kwargs['jam_src'], { 'LOCATE_TARGET' : 'bin' },
sendRC = False, timeout = kwargs['timeout'] )
return self.command.start()
registerSlaveCommand("boost.jam.build", Command_Boost_Jam_Build, _ver)
class Command_Boost_Jam(NoOpCommand):
def start(self):
_builddir = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','results')))
_env = self.args.get('env',{})
_env.update({
'BOOST_BUILD_PATH': "%s:%s:%s" % (
os.path.normpath(self.builder.basedir),
os.path.normpath(os.path.join(self.builder.basedir,'..')),
_env.get('BOOST_BUILD_PATH','.') )
})
_logfile = False
if self.args.get('logfile'):
_logfile = os.path.normpath(os.path.join(
_builddir,self.args['logfile']))
return self._start( "boost.bjam",
c( self.doBJam
,bjam = os.path.normpath(os.path.join(self.builder.basedir,
self.args['workdir'], self.args['bjam']))
,project = os.path.normpath(os.path.join(self.builder.basedir,
self.args['workdir'], self.args.get('project','.')))
,options = self.args.get('options',[])
,builddir = _builddir
,target = self.args.get('target','all')
,env = _env
,logfile = _logfile
,appendToLog = self.args.get('appendToLog',False)
,timeout = self.args.get('timeout',60*5)
)
)
def doBJam(self,*args,**kwargs):
self.stdout("bjam %s..." % kwargs['target'])
self.stdout(" env:")
env = os.environ.copy()
env.update(kwargs['env'])
for item in env.items():
self.stdout(" %s = '%s'" % item)
command = []
command += [ kwargs['bjam'] ]
command += [ '--build-dir=%s' % (kwargs['builddir']) ]
command += kwargs['options']
if kwargs.get('target'):
command += [ kwargs['target'] ]
self.command = LoggedShellCommand(self.builder
,command
,kwargs['project']
,environ = kwargs['env']
,sendRC = False
,timeout = kwargs['timeout']
,logfile = kwargs['logfile']
,appendToLog = kwargs['appendToLog']
)
return self.command.start()
registerSlaveCommand("boost.jam", Command_Boost_Jam, _ver)
class Command_Boost_ProcessJamLog(NoOpCommand):
def start(self):
return self._start( "boost.process_jam_log"
,c( self.doProcessJamLog
,process_jam_log = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build'),
self.args.get('process_jam_log','dist/bin/process_jam_log')))
,boostroot = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('boostroot',self.args.get('workdir','.'))))
,logfile = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build'),
self.args.get('logfile','bjam.log')))
,locate = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build')))
,timeout = self.args.get('timeout',60*15)
)
)
def doProcessJamLog(self,*args,**kwargs):
self.stdout("processing the regression test results...")
if runtime.platformType != 'posix':
command = 'type "%s" | "%s" "%s"' % (kwargs['logfile'], kwargs['process_jam_log'], kwargs['locate'])
else:
command = 'cat "%s" | "%s" "%s"' % (kwargs['logfile'], kwargs['process_jam_log'], kwargs['locate'])
self.command = ShellCommand(self.builder
,command
,kwargs['boostroot']
,timeout = kwargs['timeout']
)
return self.command.start()
registerSlaveCommand("boost.process_jam_log", Command_Boost_ProcessJamLog, _ver)
class Command_Boost_CollectResults(NoOpCommand):
def start(self):
return self._start( "boost.collect_results",
c( self.doCollectResults
,results = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build'),
'%s.xml' % self.args['runner']))
,locate = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build')))
,runner = self.args['runner']
,timestamp = string.replace(self.args['stamp'],'T',' ')
,tag = '%s-%s' % (self.args['source_type'],self.args['branch'])
,source = self.args['source_type']
,comments = self.args.get('comments',
os.path.normpath(os.path.join(self.builder.basedir,'..','comments.html')))
,platform = self.args.get('platform',platform.system())
,timeout = self.args.get('timeout',60*15)
),
c( self.doZipArchive
,source = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build'),
'%s.xml' % self.args['runner']))
,archive = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build'),
'%s.zip' % self.args['runner']))
,timeout = self.args.get('timeout',60*15)
)
)
def doCollectResults(self,*args,**kwargs):
self.stdout("collecting the regression test results...")
result = 0
previous_umask = os.umask(0022)
results_writer = open( kwargs['results'], 'w' )
self.stdout( 'Collecting test logs into "%s"...' % kwargs['results'] )
results_xml = xml.sax.saxutils.XMLGenerator( results_writer )
results_xml.startDocument()
results_xml.startElement( 'test-run' ,{
'tag': kwargs['tag']
,'platform': kwargs['platform']
,'runner': kwargs['runner']
,'timestamp': kwargs['timestamp']
,'source': kwargs['source']
,'run-type': 'incremental'
})
self._copy_comments( results_xml, kwargs['comments'] )
self._collect_test_logs( [ kwargs['locate'] ], results_writer )
results_xml.endElement( "test-run" )
results_xml.endDocument()
results_writer.close()
self.stdout( 'Done writing "%s".' % kwargs['results'] )
os.umask(previous_umask)
return result
def _copy_comments(self,results_xml,comment_file):
results_xml.startElement( 'comment', {} )
if os.path.exists( comment_file ):
self.stdout( 'Reading comments file "%s"...' % comment_file )
f = open( comment_file, 'r' )
try:
results_xml.characters( f.read() )
finally:
f.close()
else:
self.stdout( 'Warning: comment file "%s" is not found.' % comment_file )
results_xml.endElement( 'comment' )
def _collect_test_logs(self,input_dirs,test_results_writer):
self.stdout( 'Collecting test logs ...' )
for input_dir in input_dirs:
self.stdout( 'Walking directory "%s" ...' % input_dir )
os.path.walk( input_dir, self._process_test_log_files, test_results_writer )
def _process_test_log_files(self,output_file,dir,names):
for file in names:
if os.path.basename( file ) == 'test_log.xml':
self._process_xml_file( os.path.join( dir, file ), output_file )
def _process_xml_file(self,input_file,output_file):
self.stdout( 'Processing test log "%s"' % input_file )
f = open( input_file, 'r' )
xml = f.readlines()
f.close()
for i in range( 0, len(xml)):
xml[i] = string.translate( xml[i], boost.buildbot.char_translation_table.char_translation_table )
output_file.writelines( xml )
def doZipArchive(self,*args,**kwargs):
source_path = kwargs['source']
archive_path = kwargs['archive']
self.stdout("creating archive %s for %s" % ( archive_path, source_path ))
result = 0
previous_umask = os.umask(0022)
try:
z = zipfile.ZipFile( archive_path, 'w', zipfile.ZIP_DEFLATED )
z.write( source_path, os.path.basename( source_path ) )
z.close()
self.stdout( 'Done writing "%s".'% archive_path )
except Exception, msg:
self.stdout( 'Warning: Compressing failed (%s)' % msg )
self.stdout( ' Trying to compress using a platform-specific tool...' )
try: import zip_cmd
except ImportError:
script_dir = os.path.dirname( os.path.abspath( sys.argv[0] ) )
self.stdout( 'Could not find \'zip_cmd\' module in the script directory (%s).' % script_dir )
result = -1
else:
if os.path.exists( archive_path ):
os.unlink( archive_path )
self.stdout( 'Removing stale "%s".' % archive_path )
zip_cmd.main( source_path, archive_path )
self.stdout( 'Done compressing "%s".' % archive_path )
os.umask(previous_umask)
return result
registerSlaveCommand("boost.collect_results", Command_Boost_CollectResults, _ver)
class Command_Boost_PublishResults(NoOpCommand):
def start(self):
return self._start( "boost.publish_results",
c( self.doPublish
,source = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build'),
'%s.zip' % self.args['runner']))
,target = '%s/%s-%s' % (self.args['publish_location'],self.args['source_type'],self.args['branch'])
,proxy = self.args.get('proxy')
,timeout = self.args.get('timeout',60*15)
)
)
def doPublish(self,*args,**kwargs):
self.stdout("publishing the regression test results...")
result = 0
(scheme,site,path,query,fragment) = urlparse.urlsplit(kwargs['target'])
publish_call = getattr(self,'_publish_%s' % scheme,None)
if callable(publish_call):
result = publish_call(scheme,site,path,query,fragment,**kwargs)
else:
self.stdout('unknown publish method "%s"' % scheme)
result = -1
return result
def _publish_ftp(self,scheme,site,path,query,fragment,**kwargs):
self.stdout( 'Uploading log archive "%s" to %s' % ( kwargs['source'], kwargs['target'] ) )
if not kwargs['proxy']:
ftp = ftplib.FTP( site )
ftp.set_debuglevel( 1 )
ftp.login()
else:
utils.log( ' Connecting through FTP proxy server "%s"' % kwargs['proxy'] )
ftp = ftplib.FTP( kwargs['proxy'] )
ftp.set_debuglevel( 1 )
ftp.set_pasv (0) # turn off PASV mode
ftp.login( 'anonymous@%s' % site, 'anonymous@' )
ftp.cwd( os.path.dirname(path) )
try:
ftp.cwd( os.path.basename(path) )
except ftplib.error_perm:
ftp.mkd( os.path.basename(path) )
ftp.cwd( os.path.basename(path) )
f = open( kwargs['source'], 'rb' )
ftp.storbinary( 'STOR %s' % os.path.basename( kwargs['source'] ), f )
ftp.quit()
return 0
registerSlaveCommand("boost.publish_results", Command_Boost_PublishResults, _ver)

View file

@ -0,0 +1,129 @@
# Copyright Redshift Software, Inc. 2005-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from buildbot.steps.shell import ShellCommand
from buildbot.process.buildstep import LoggedRemoteCommand
import re
import string
import twisted.python
class command_base(ShellCommand):
def __init__(self, _name, _description, **kwargs):
if kwargs.get('name'): _name = kwargs.get('name')
if not kwargs.get('description'): kwargs['description'] = _description
ShellCommand.__init__(self,**kwargs)
self._name = _name
def start(self):
#~ command = self._interpolateProperties(self.command)
#~ assert isinstance(command, (list, tuple, str))
kwargs = self.remote_kwargs
#~ kwargs['command'] = command
if kwargs.get('env'): kwargs['env'] = kwargs['env'].copy()
kwargs['logfiles'] = self.logfiles
cmd = LoggedRemoteCommand(self._name,kwargs)
self.setupEnvironment(cmd)
self.checkForOldSlaveAndLogfiles()
self.startCommand(cmd)
class SelfUpdate(command_base):
def __init__(self, **kwargs):
command_base.__init__(self, 'selfupdate', ["self","update"], **kwargs)
class Tarball(command_base):
def __init__(self, **kwargs):
command_base.__init__(self, 'tarball', ["tarball"], **kwargs)
class Boost_Jam_Build(command_base):
def __init__(self, **kwargs):
command_base.__init__(self, 'boost.jam.build', ["bjam","build"], **kwargs)
class Boost_Jam(command_base):
def __init__(self, **kwargs):
command_base.__init__(self, 'boost.jam', ["bjam"], **kwargs)
class Boost_Test(command_base):
def __init__(self, **kwargs):
self.tests = kwargs.get('tests');
if kwargs.has_key('tests'): del kwargs['tests']
self._kwargs = kwargs
command_base.__init__(self, 'boost.jam', ["btest"], **kwargs)
def commandComplete(self, cmd):
def test_match(t,r):
return t or r.match(parts[1])
#~ Get the log so we can parse it to find all the targets
#~ we can test.
out = cmd.log.getText()
lines = string.split(out,"\n")
test_targets = {}
test_re = []
for test in self.tests:
test_re.append(re.compile(test))
for line in lines:
parts = re.split('(?:" ")|(?:" ")|(?: ")|(?:" )|(?: [[]")|(?:"[]] )|(?:")',line)
if not parts: continue
if parts[0] != 'boost-test(TARGET)': continue
if not reduce(test_match,test_re,False): continue
try:
target_i = parts.index(':')+1
except:
continue
twisted.python.log.msg("Boost_Test.commandComplete: TEST = %s -- TARGETS = %s" %
(parts[1],string.join(parts[target_i:-1],' ')) )
for t in parts[target_i:-1]:
test_targets[t] = True
test_targets = test_targets.keys()
test_targets.sort()
#~ Construct new steps for each of the targets we want to test. It would be much
#~ better to tell bjam all targets to test in groups instead of one per invocation.
#~ But there's no "easy" way to do that. Passing in args can blow the command line
#~ limits. Setting an env can also blow that limit, but this may be a higher limit
#~ and we could do them piecemeal.
kwargs = self._kwargs.copy()
kwargs.update({
'flunkOnFailure': False,
'appendToLog': True
})
kwargs['options'].remove('--dump-tests')
kwargs['options'].remove('--dump-test-targets')
count = 0
for test_target in test_targets:
kwargs['target'] = test_target
step = Boost_Jam(**kwargs)
count += 1
step.name = "%s.%d" % (step.name,count)
#~ The steps up to our point have been eaten away already. So we
#~ can add to the front so that the additional steps get executed
#~ before the rest.
self.build.steps.insert(count-1,step)
self.build.build_status.addStep(step)
#~ Rearrange the steps on the build_status to match the order in the
#~ actual build.
existing_count = len(self.build.steps)-count
new_count = count
a = self.build.build_status.steps[0:-new_count-existing_count]
c = self.build.build_status.steps[-new_count-existing_count:-new_count]
b = self.build.build_status.steps[-new_count:]
self.build.build_status.steps = a+b+c
class Boost_Process_Jam_Log(command_base):
def __init__(self, **kwargs):
command_base.__init__(self, 'boost.process_jam_log', ["process log"], **kwargs)
class Boost_Collect_Results(command_base):
def __init__(self, **kwargs):
command_base.__init__(self, 'boost.collect_results', ["collect results"], **kwargs)
class Boost_Publish_Results(command_base):
def __init__(self, **kwargs):
command_base.__init__(self, 'boost.publish_results', ["publish results"], **kwargs)

View file

@ -0,0 +1,9 @@
# Copyright Redshift Software, Inc. 2005-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
modified = '$Date$'
revision = '$Revision$'

View file

@ -0,0 +1,19 @@
# Copyright Redshift Software, Inc. 2005-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import string
def chr_or_question_mark( c ):
if chr(c) in string.printable and c < 128 and c not in ( 0x09, 0x0b, 0x0c ):
return chr(c)
else:
return '?'
char_translation_table = string.maketrans(
''.join( map( chr, range(0, 256) ) )
, ''.join( map( chr_or_question_mark, range(0, 256) ) )
)

View file

@ -0,0 +1,281 @@
# Copyright Redshift Software, Inc. 2005-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import boost.buildbot.step
import buildbot
import buildbot.process.base
import buildbot.process.factory
import buildbot.process.step
import os.path
import re
import string
import time
import twisted.python
import types
import urllib
from buildbot.process.factory import s
def action(_action,*_args,**_kwargs):
_args = _args or []
_kwargs = _kwargs or {}
return (_action,_args,_kwargs)
def defaults(_defaults = {},**_kwargs):
_defaults.update({
'haltOnFailure': _kwargs.get('haltOnFailure',False),
'flunkOnWarnings': _kwargs.get('flunkOnWarnings',False),
'flunkOnFailure': _kwargs.get('flunkOnFailure',True),
'warnOnWarnings': _kwargs.get('warnOnWarnings',False),
'warnOnFailure': _kwargs.get('warnOnFailure',False),
'timeout': _kwargs.get('timeout',30*60)
})
return _defaults
class Boost_BuildFactory(buildbot.process.factory.BuildFactory):
def __init__(self, *actions, **args):
buildbot.process.factory.BuildFactory.__init__(self)
self.actions = actions or []
self.options = args or {}
#~ --
self.steps = []
self.treeStableTimer = 5*60
self.buildClass = Boost_Build
def newBuild(self):
b = buildbot.process.factory.BuildFactory.newBuild(self)
b.setOptions(self.options)
steps = []
files = []
for (_action,_args,_kwargs) in self.actions:
action_call = getattr(self,'action_%s' % _action,None)
if callable(action_call):
for k in _kwargs.keys():
if _kwargs[k] == None: del _kwargs[k]
_kwargs.update(self.options)
(action_steps,action_files) = action_call(b,*_args,**_kwargs)
steps = steps + action_steps
files = files + action_files
b.important_files = files
b.setSteps(steps)
return b
def action_cvs(self,b,*args,**kwargs):
opt = {
'cvsmodule' : kwargs.get('module',"boost"),
'global_options' : ["-z9"],
'mode' : kwargs.get('mode',"copy"),
'branch' : kwargs.get('branch','HEAD'),
'cvsroot' : kwargs.get('root')
}
if kwargs.has_key('passwd'):
opt['login'] = kwargs['passwd'] or ""
opt.update(defaults(**kwargs))
return (
[ s(buildbot.process.step.CVS,**opt) ],
kwargs.get('files',[".*"]) )
def action_tarball(self,b,*args,**kwargs):
return (
[ s( boost.buildbot.step.Tarball
,description = kwargs.get('description')
,archive = kwargs.get('archive',b.workdir)
,publishdir = kwargs['publishdir']
,branch = kwargs.get('branch','HEAD')
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
def action_selfupdate(self,b,*args,**kwargs):
return (
[ s( boost.buildbot.step.SelfUpdate
,description = kwargs.get('description')
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
def action_bjam_build(self,b,*args,**kwargs):
return (
[ s( boost.buildbot.step.Boost_Jam_Build
,description = kwargs.get('description')
,workdir = b.workdir
,jam_src = kwargs.get('jam_src','tools/build/jam_src')
,toolset = kwargs.get('toolset',None)
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
def action_bjam(self,b,*args,**kwargs):
return (
[ s( boost.buildbot.step.Boost_Jam
,description = kwargs.get('description')
,workdir = b.workdir
,bjam = kwargs.get('bjam','tools/build/jam_src/bin/bjam')
,project = kwargs.get('project','.')
,options = kwargs.get('options',[])
,target = kwargs.get('target','all')
,locate = kwargs.get('locate','build')
,env = kwargs.get('env',{})
,logfile = kwargs.get('logfile',False)
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
def action_test_tools_build(self,b,*args,**kwargs):
return self.action_bjam( b
,description = kwargs.get('description',['test tools','build'])
,project = 'tools/regression/build'
,options = [
'-sBUILD=release',
'-sTOOLS=%s' % kwargs['toolset']
] + kwargs.get('options',[])
,target = 'run'
,locate = kwargs.get('locate','build')
,env = kwargs.get('env',{})
,**defaults(**kwargs)
)
def action_btest(self,b,*args,**kwargs):
return (
[ s( boost.buildbot.step.Boost_Test
,description = kwargs.get('description')
,workdir = b.workdir
,tests = kwargs.get('tests',['.*'])
,bjam = kwargs.get('bjam','tools/build/jam_src/bin/bjam')
,project = kwargs.get('project','status')
,options = kwargs.get('options',[
'--dump-tests',
'--dump-test-targets',
'-sBUILD=%s' % kwargs.get('build','debug'),
'-sTOOLS=%s' % kwargs['toolset']
] + kwargs.get('options',[]))
,target = 'nothing'
,locate = kwargs.get('locate','build')
,env = kwargs.get('env',{})
,logfile = kwargs.get('logfile','bjam.log')
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
def action_btest_all(self,b,*args,**kwargs):
return self.action_bjam( b
,description = kwargs.get('description',['btest','all'])
,project = kwargs.get('project','status')
,options = [
'--dump-tests',
'--dump-test-targets',
'-sBUILD=%s' % kwargs.get('build','debug'),
'-sTOOLS=%s' % kwargs['toolset']
] + kwargs.get('options',[])
,target = 'test'
,locate = kwargs.get('locate','build')
,env = kwargs.get('env',{})
,logfile = kwargs.get('logfile','bjam.log')
,files = kwargs.get('files',['boost.*','libs.*','status.*'])
,**defaults(**kwargs)
)
def action_process_jam_log(self,b,*args,**kwargs):
return (
[ s( boost.buildbot.step.Boost_Process_Jam_Log
,description = kwargs.get('description',['process log'])
,workdir = b.workdir
,projcess_jam_log = kwargs.get('projcess_jam_log','tools/regression/build/run/process_jam_log')
,locate = kwargs.get('locate','build')
,logfile = kwargs.get('logfile','bjam.log')
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
def action_collect_results(self,b,*args,**kwargs):
return (
[ s( boost.buildbot.step.Boost_Collect_Results
,description = kwargs.get('description')
,workdir = b.workdir
,locate = kwargs.get('locate',b.options.get('locate','build'))
,runner = kwargs['runner']
,branch = kwargs['branch']
,source_type = kwargs['source_type']
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
def action_publish_results(self,b,*args,**kwargs):
return (
[ s( boost.buildbot.step.Boost_Publish_Results
,description = kwargs.get('description')
,workdir = b.workdir
,locate = kwargs.get('locate',b.options.get('locate','build'))
,runner = kwargs['runner']
,branch = kwargs['branch']
,source_type = kwargs['source_type']
,publish_location = kwargs['publish_location']
,proxy = kwargs.get('proxy')
,**defaults(**kwargs)
) ],
kwargs.get('files',[]) )
class Boost_Build(buildbot.process.base.Build):
def __init__(self):
buildbot.process.base.Build.__init__(self)
self.important_files = []
self.important_re = None
def isFileImportant(self, filename):
if self.important_re == None:
self.important_re = []
for file in self.important_files:
self.important_re.append(re.compile(file))
for file_re in self.important_re:
if file_re.search(filename):
return 1;
return 0
def setOptions(self,options = {}):
self.options = options or {}
self.workdir = self.options.get('workdir','build')
def setupBuild(self, expectations):
#~ Hack the stamp as an allowed arg for steps.
if 'stamp' not in buildbot.process.step.BuildStep.parms:
buildbot.process.step.BuildStep.parms.append('stamp')
return buildbot.process.base.Build.setupBuild(self,expectations)
def getNextStep(self):
s = buildbot.process.base.Build.getNextStep(self)
if s:
#~ Add a stamp arg for the steps to use as needed.
stamp = self._get_stamp()
s.stamp = stamp
if hasattr(s,'cmd'):
if hasattr(s.cmd,'args'):
s.cmd.args.update( { 'stamp' : stamp } )
return s
def _get_stamp(self):
#~ The default is to use the revision sequence as the "time".
#~ If not available, because of a forced build for example, we
#~ use the current time.
stamp = time.strftime( '%Y-%m-%dT%H:%M:%S', time.gmtime() )
revision, patch = self.getSourceStamp()
if not revision:
changes = self.allChanges()
if changes:
last_change_time = max([c.when for c in changes])
last_change_revision = max([c.revision for c in changes])
#~ Prefer using the revision change if present. If it's not
#~ it's likely a CVS like time sequence, so use the time sequence
#~ int that case (adjusted with the tree timer).
if last_change_revision:
stamp = last_change_revision
else:
stamp = time.strftime( '%Y-%m-%dT%H:%M:%S',
time.gmtime(last_change_time + self.treeStableTimer / 2) )
return stamp

View file

@ -0,0 +1,520 @@
# Copyright Redshift Software, Inc. 2005
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import boost.buildbot.char_translation_table
import ftplib
import platform
import re
import os
import os.path
import shutil
import string
import sys
import tarfile
import urlparse
import xml.sax.saxutils
import zipfile
from buildbot.slave.commands import Command, AbandonChain, ShellCommand
from buildbot.slave.registry import registerSlaveCommand
from twisted.internet import reactor, defer
from twisted.python import failure, log, runtime
cvs_ver = '$Revision$'[1+len("Revision: "):-2]
class LoggedShellCommand(ShellCommand):
def __init__(self, builder, command, workdir, **kwargs):
ShellCommand.__init__(self,builder,command,workdir
,environ = kwargs.get('environ',{})
,sendStdout = kwargs.get('sendStdout',True)
,sendStderr = kwargs.get('sendStderr',True)
,sendRC = kwargs.get('sendRC',True)
,timeout = kwargs.get('timeout',None)
,stdin = kwargs.get('stdin',None)
,keepStdout = kwargs.get('keepStdout',False)
)
self.logfile = None
logfile = kwargs.get('logfile')
if logfile:
logdir = os.path.dirname(logfile)
if not os.path.exists(logdir):
os.makedirs(logdir)
if kwargs.get('appendToLog',False) and os.path.exists(logfile):
self.logfile = file(logfile,"a")
else:
self.logfile = file(logfile,"w")
def addStdout(self, data):
ShellCommand.addStdout(self,data)
if self.logfile: self.logfile.write(data)
def addStdout(self, data):
ShellCommand.addStdout(self,data)
if self.logfile: self.logfile.write(data)
def finished(self, sig, rc):
if self.logfile: self.logfile.close()
ShellCommand.finished(self,sig,rc)
def c(callback, *args, **kwargs):
args = args or []
kwargs = kwargs or {}
return (callback,args,kwargs)
class NoOpCommand(Command):
def start(self):
return self._start("noop",c(self.doNoOp))
def doNoOp(self):
self.stdout("do noop")
return 0
def stdout(self, message):
self.sendStatus({'stdout': message+"\n"})
def interrupt(self):
self.interrupted = True
def _start(self, name, *callbacks):
d = defer.Deferred()
self.stdout("starting %s operation" % name)
self.name = name
self.command = None
for call,args,kwargs in callbacks:
d.addCallbacks(self._do_call,None,[call]+args,kwargs)
d.addCallback(self._result_check)
d.addCallbacks(self._success,self._failure)
reactor.callLater(2,d.callback,0)
return d
def _do_call(self, rc, call, *args, **kwargs):
return call(*args,**kwargs)
def _result_check(self, rc):
if self.interrupted:
raise AbandonChain(-1)
if rc != 0:
raise AbandonChain(rc)
return 0
def _success(self, rc):
self.sendStatus({'rc': 0})
return None
def _failure(self, fail):
fail.trap(AbandonChain)
self.sendStatus({'rc': fail.value.args[0]})
return None
registerSlaveCommand("noop", NoOpCommand, cvs_ver)
class SelfUpdateCommand(NoOpCommand):
def start(self):
return self._start("selfupdate",c(self.doUpdateCommandRegistry))
def doUpdateCommandRegistry(self):
import buildbot.slave.registry
import buildbot.slave.commands
import boost.buildbot.remote
self.stdout("updating command registry")
reload(buildbot.slave.registry)
self.stdout("reloading standard commands")
reload(buildbot.slave.commands)
self.stdout("reloading boost commands")
reload(boost.buildbot.remote)
self.stdout("command registry update complete")
self.stdout("commands:")
for name, (factory, version) in buildbot.slave.registry.commandRegistry.items():
self.stdout(" %s (%s)" % (name,version))
return 0
registerSlaveCommand("selfupdate", SelfUpdateCommand, cvs_ver)
class TarballCommand(NoOpCommand):
def start(self):
stamp = self.args.get('stamp','')
stamp = stamp.replace(' ','-')
stamp = stamp.replace(':','_')
archive_stamped = os.path.normpath(os.path.join(self.builder.basedir,
"%s-%s-%s" % (self.args['archive'],self.args.get('branch','X'),stamp)))
return self._start( "tarball",
c( self.doCleanRepository,
repository = os.path.normpath(os.path.join(self.builder.basedir, self.args['workdir'])) ),
c( self.doArchive,
source = os.path.normpath(os.path.join(self.builder.basedir, self.args['workdir'])),
archive = archive_stamped ),
c( self.doPublish,
archive = archive_stamped,
publishdir = os.path.normpath(self.args['publishdir']) ) )
def doCleanRepository(self,*args,**kwargs):
self.stdout("cleaning repository at %s..." % kwargs['repository'])
self._clean_r(kwargs['repository'])
return 0
def doArchive(self,*args,**kwargs):
source_path = kwargs['source']
archive_path = "%s.tar.bz2" % kwargs['archive']
archive_dir = os.path.basename( kwargs['archive'] )
self.stdout("creating archive %s for %s" % ( archive_path, source_path ))
previous_umask = os.umask(0022)
tar = tarfile.open(archive_path, 'w:bz2')
#~ Disabling posix allows for longer names and hence deeper directories.
tar.Posix = False
tar.add(source_path, archive_dir)
tar.close()
os.umask(previous_umask)
return 0
def doPublish(self,*args,**kwargs):
archive_path = "%s.tar.bz2" % kwargs['archive']
self.stdout("publishing archive %s to %s" % ( archive_path, kwargs['publishdir'] ))
previous_umask = os.umask(0022)
try:
os.makedirs(kwargs['publishdir'],0755)
except:
pass
#~ shutil.move is available on py2.3, consider copy/rename implementation to
#~ support py2.2. Or possibly do an external async "mv" command.
shutil.move(archive_path,kwargs['publishdir'])
self._clean_archives( kwargs['publishdir'], '[^\.]+\.tar\.bz2',
( os.path.basename(archive_path) ) )
os.umask(previous_umask)
return 0
def _clean_r(self,dir):
names = os.listdir(dir)
names.sort()
for name in names:
entry = os.path.join(dir,name)
if name == 'CVS':
self.stdout("[REMOVE] %s" % entry)
shutil.rmtree( entry )
elif os.path.isdir(entry):
self._clean_r(entry)
def _clean_archives(self,dir,m,exclude):
m_re = re.compile(m)
names = os.listdir(dir)
names.sort()
for name in names:
if m_re.search(name) and name not in exclude:
entry = os.path.join(dir,name)
self.stdout("[REMOVE] %s" % entry)
os.remove( entry )
registerSlaveCommand("tarball", TarballCommand, cvs_ver)
class Command_Boost_Jam_Build(NoOpCommand):
def start(self):
return self._start( "boost.bjam.build",
c( self.doBJamBuild,
jam_src = os.path.normpath(os.path.join(
self.builder.basedir, self.args['workdir'], self.args['jam_src'])),
toolset = self.args.get('toolset',None),
timeout = self.args.get('timeout',60*5))
)
def doBJamBuild(self,*args,**kwargs):
self.stdout("building bjam at %s..." % kwargs['jam_src'])
if runtime.platformType != 'posix':
command = [ '.\build.bat' ]
else:
command = [ 'sh', './build.sh' ]
if kwargs['toolset']:
command.append(kwargs['toolset'])
self.command = ShellCommand(self.builder, command,
kwargs['jam_src'], { 'LOCATE_TARGET' : 'bin' },
sendRC = False, timeout = kwargs['timeout'] )
return self.command.start()
registerSlaveCommand("boost.jam.build", Command_Boost_Jam_Build, cvs_ver)
class Command_Boost_Jam(NoOpCommand):
def start(self):
_env = self.args.get('env',{})
_env.update({
'ALL_LOCATE_TARGET': os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build'))),
'BOOST_BUILD_PATH': "%s:%s:%s" % (
os.path.normpath(self.builder.basedir),
os.path.normpath(os.path.join(self.builder.basedir,'..')),
_env.get('BOOST_BUILD_PATH','.') )
})
_logfile = False
if self.args.get('logfile'):
_logfile = os.path.normpath(os.path.join(
_env['ALL_LOCATE_TARGET'],self.args['logfile']))
return self._start( "boost.bjam",
c( self.doBJam
,bjam = os.path.normpath(os.path.join(self.builder.basedir,
self.args['workdir'], self.args['bjam']))
,project = os.path.normpath(os.path.join(self.builder.basedir,
self.args['workdir'], self.args.get('project','.')))
,options = self.args.get('options',[])
,target = self.args.get('target','all')
,env = _env
,logfile = _logfile
,appendToLog = self.args.get('appendToLog',False)
,timeout = self.args.get('timeout',60*5)
)
)
def doBJam(self,*args,**kwargs):
self.stdout("bjam %s..." % kwargs['target'])
self.stdout(" env:")
env = os.environ.copy()
env.update(kwargs['env'])
for item in env.items():
self.stdout(" %s = '%s'" % item)
command = [ kwargs['bjam'] ] + kwargs['options'] + [ kwargs['target'] ]
self.command = LoggedShellCommand(self.builder
,command
,kwargs['project']
,environ = kwargs['env']
,sendRC = False
,timeout = kwargs['timeout']
,logfile = kwargs['logfile']
,appendToLog = kwargs['appendToLog']
)
return self.command.start()
registerSlaveCommand("boost.jam", Command_Boost_Jam, cvs_ver)
class Command_Boost_ProcessJamLog(NoOpCommand):
def start(self):
return self._start( "boost.process_jam_log"
,c( self.doProcessJamLog
,process_jam_log = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build'),
self.args.get('process_jam_log','tools/regression/build/run/process_jam_log')))
,boostroot = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('boostroot',self.args.get('workdir','.'))))
,logfile = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build'),
self.args.get('logfile','bjam.log')))
,locate = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build')))
,timeout = self.args.get('timeout',60*15)
)
)
def doProcessJamLog(self,*args,**kwargs):
self.stdout("processing the regression test results...")
if runtime.platformType != 'posix':
command = 'type "%s" | "%s" "%s"' % (kwargs['logfile'], kwargs['process_jam_log'], kwargs['locate'])
else:
command = 'cat "%s" | "%s" "%s"' % (kwargs['logfile'], kwargs['process_jam_log'], kwargs['locate'])
self.command = ShellCommand(self.builder
,command
,kwargs['boostroot']
,timeout = kwargs['timeout']
)
return self.command.start()
registerSlaveCommand("boost.process_jam_log", Command_Boost_ProcessJamLog, cvs_ver)
class Command_Boost_CollectResults(NoOpCommand):
def start(self):
return self._start( "boost.collect_results",
c( self.doCollectResults
,results = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build'),
'%s.xml' % self.args['runner']))
,locate = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build')))
,runner = self.args['runner']
,timestamp = string.replace(self.args['stamp'],'T',' ')
,tag = '%s-%s' % (self.args['source_type'],self.args['branch'])
,source = self.args['source_type']
,comments = self.args.get('comments',
os.path.normpath(os.path.join(self.builder.basedir,'..','comments.html')))
,platform = self.args.get('platform',platform.system())
,timeout = self.args.get('timeout',60*15)
),
c( self.doZipArchive
,source = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build'),
'%s.xml' % self.args['runner']))
,archive = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build'),
'%s.zip' % self.args['runner']))
,timeout = self.args.get('timeout',60*15)
)
)
def doCollectResults(self,*args,**kwargs):
self.stdout("collecting the regression test results...")
result = 0
previous_umask = os.umask(0022)
results_writer = open( kwargs['results'], 'w' )
self.stdout( 'Collecting test logs into "%s"...' % kwargs['results'] )
results_xml = xml.sax.saxutils.XMLGenerator( results_writer )
results_xml.startDocument()
results_xml.startElement( 'test-run' ,{
'tag': kwargs['tag']
,'platform': kwargs['platform']
,'runner': kwargs['runner']
,'timestamp': kwargs['timestamp']
,'source': kwargs['source']
,'run-type': 'incremental'
})
self._copy_comments( results_xml, kwargs['comments'] )
self._collect_test_logs( [ kwargs['locate'] ], results_writer )
results_xml.endElement( "test-run" )
results_xml.endDocument()
results_writer.close()
self.stdout( 'Done writing "%s".' % kwargs['results'] )
os.umask(previous_umask)
return result
def _copy_comments(self,results_xml,comment_file):
results_xml.startElement( 'comment', {} )
if os.path.exists( comment_file ):
self.stdout( 'Reading comments file "%s"...' % comment_file )
f = open( comment_file, 'r' )
try:
results_xml.characters( f.read() )
finally:
f.close()
else:
self.stdout( 'Warning: comment file "%s" is not found.' % comment_file )
results_xml.endElement( 'comment' )
def _collect_test_logs(self,input_dirs,test_results_writer):
self.stdout( 'Collecting test logs ...' )
for input_dir in input_dirs:
self.stdout( 'Walking directory "%s" ...' % input_dir )
os.path.walk( input_dir, self._process_test_log_files, test_results_writer )
def _process_test_log_files(self,output_file,dir,names):
for file in names:
if os.path.basename( file ) == 'test_log.xml':
self._process_xml_file( os.path.join( dir, file ), output_file )
def _process_xml_file(self,input_file,output_file):
self.stdout( 'Processing test log "%s"' % input_file )
f = open( input_file, 'r' )
xml = f.readlines()
f.close()
for i in range( 0, len(xml)):
xml[i] = string.translate( xml[i], boost.buildbot.char_translation_table.char_translation_table )
output_file.writelines( xml )
def doZipArchive(self,*args,**kwargs):
source_path = kwargs['source']
archive_path = kwargs['archive']
self.stdout("creating archive %s for %s" % ( archive_path, source_path ))
result = 0
previous_umask = os.umask(0022)
try:
z = zipfile.ZipFile( archive_path, 'w', zipfile.ZIP_DEFLATED )
z.write( source_path, os.path.basename( source_path ) )
z.close()
self.stdout( 'Done writing "%s".'% archive_path )
except Exception, msg:
self.stdout( 'Warning: Compressing failed (%s)' % msg )
self.stdout( ' Trying to compress using a platform-specific tool...' )
try: import zip_cmd
except ImportError:
script_dir = os.path.dirname( os.path.abspath( sys.argv[0] ) )
self.stdout( 'Could not find \'zip_cmd\' module in the script directory (%s).' % script_dir )
result = -1
else:
if os.path.exists( archive_path ):
os.unlink( archive_path )
self.stdout( 'Removing stale "%s".' % archive_path )
zip_cmd.main( source_path, archive_path )
self.stdout( 'Done compressing "%s".' % archive_path )
os.umask(previous_umask)
return result
registerSlaveCommand("boost.collect_results", Command_Boost_CollectResults, cvs_ver)
class Command_Boost_PublishResults(NoOpCommand):
def start(self):
return self._start( "boost.publish_results",
c( self.doPublish
,source = os.path.normpath(os.path.join(
self.builder.basedir,self.args.get('locate','build'),
'%s.zip' % self.args['runner']))
,target = '%s/%s-%s' % (self.args['publish_location'],self.args['source_type'],self.args['branch'])
,proxy = self.args.get('proxy')
,timeout = self.args.get('timeout',60*15)
)
)
def doPublish(self,*args,**kwargs):
self.stdout("publishing the regression test results...")
result = 0
(scheme,site,path,query,fragment) = urlparse.urlsplit(kwargs['target'])
publish_call = getattr(self,'_publish_%s' % scheme,None)
if callable(publish_call):
result = publish_call(scheme,site,path,query,fragment,**kwargs)
else:
self.stdout('unknown publish method "%s"' % scheme)
result = -1
return result
def _publish_ftp(self,scheme,site,path,query,fragment,**kwargs):
self.stdout( 'Uploading log archive "%s" to %s' % ( kwargs['source'], kwargs['target'] ) )
if not kwargs['proxy']:
ftp = ftplib.FTP( site )
ftp.set_debuglevel( 1 )
ftp.login()
else:
utils.log( ' Connecting through FTP proxy server "%s"' % kwargs['proxy'] )
ftp = ftplib.FTP( kwargs['proxy'] )
ftp.set_debuglevel( 1 )
ftp.set_pasv (0) # turn off PASV mode
ftp.login( 'anonymous@%s' % site, 'anonymous@' )
ftp.cwd( os.path.dirname(path) )
try:
ftp.cwd( os.path.basename(path) )
except ftplib.error_perm:
ftp.mkd( os.path.basename(path) )
ftp.cwd( os.path.basename(path) )
f = open( kwargs['source'], 'rb' )
ftp.storbinary( 'STOR %s' % os.path.basename( kwargs['source'] ), f )
ftp.quit()
return 0
registerSlaveCommand("boost.publish_results", Command_Boost_PublishResults, cvs_ver)

View file

@ -0,0 +1,185 @@
# Copyright Redshift Software, Inc. 2005-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import os.path
import sys
import getopt
import re
import boost
def show_revision( **unused ):
re_keyword_value = re.compile( r'^\$\w+:\s+(.*)\s+\$$' )
print '\n\tResivion: %s' % re_keyword_value.match( boost.buildbot.revision ).group( 1 )
print '\tLast modified on: %s\n' % re_keyword_value.match( boost.buildbot.modified ).group( 1 )
sys.exit(0)
def create_tester( root, server, runner, passwd, debug_level, **unused ):
import twisted.scripts.mktap
root = os.path.abspath(root)
if os.path.exists(root):
print "Testing root location %s exists." % root
print "Skipping to prevent corruption of existing setup."
sys.exit(1)
if not os.path.exists(root):
if debug_level > 0: print "mkdir", root
os.mkdir(root)
if debug_level > 0: print "chdir", root
os.chdir(root)
sys.argv = [
'mktap', 'buildbot', 'slave',
'--basedir', root,
'--master', server,
'--name', runner,
'--passwd', passwd
]
if debug_level > 0: print ' '.join( sys.argv )
twisted.scripts.mktap.run()
if debug_level > 0: print "Tester configured in %s." % root
sys.exit(0)
def create_server( root, debug_level, **unused ):
import twisted.scripts.mktap
root = os.path.abspath(root)
if os.path.exists(root):
print "Testing root location %s exists." % root
print "Skipping to prevent corruption of existing setup."
sys.exit(1)
if not os.path.exists(root):
if debug_level > 0: print "mkdir", root
os.mkdir(root)
if debug_level > 0: print "chdir", root
os.chdir(root)
sys.argv = [
'mktap', 'buildbot', 'master',
'--basedir', root
]
if debug_level > 0: print ' '.join( sys.argv )
twisted.scripts.mktap.run()
if debug_level > 0: print "Server configured in %s." % root
sys.exit(0)
def start_daemon( root, debug_level, **unused ):
import twisted.python.runtime
# import the various built in slave commands so that we can add our own
import buildbot.slave.registry
import buildbot.slave.commands
import boost.buildbot.remote
root = os.path.abspath(root)
if debug_level > 0: print "chdir", root
os.chdir(root)
sys.argv = [
'twistd',
'--no_save',
'--file=buildbot.tap'
]
if sys.platform == "win32":
sys.arvg.append("--reactor=win32")
if debug_level > 0: print ' '.join( sys.argv )
if twisted.python.runtime.platformType == "Win32":
import twisted.scripts.twistw
twisted.scripts.twistw.run()
else:
import twisted.scripts.twistd
twisted.scripts.twistd.run()
sys.exit(0)
def stop_daemon( root, debug_level, **unused ):
import signal
twistd_pid_file = os.path.join(root,'twistd.pid')
if os.path.isfile(twistd_pid_file):
twistd_pid = file(twistd_pid_file,'r').read()
os.kill(int(re.search(r'^(\d+)',twistd_pid).group(1)),signal.SIGTERM);
sys.exit(0)
else:
sys.exit(1)
def accept_args( args ):
args_spec = [
'root=',
'server=',
'runner=',
'passwd=',
##
'debug-level=',
'help'
]
options = {
'--root' : None,
'--server' : None,
'--runner' : None,
'--passwd' : None,
##
'--debug-level' : 0
}
( option_pairs, other_args ) = getopt.getopt( args, '', args_spec )
map( lambda x: options.__setitem__( x[0], x[1] ), option_pairs )
if options.has_key( '--help' ):
usage()
sys.exit( 1 )
return {
'root' : options[ '--root' ],
'server' : options[ '--server' ],
'runner' : options[ '--runner' ],
'passwd' : options[ '--passwd' ],
##
'debug_level' : int(options[ '--debug-level' ]),
'args' : other_args
}
commands = {
'show-revision' : show_revision,
'create-tester' : create_tester,
'create-server' : create_server,
'start' : start_daemon,
'stop' : stop_daemon
}
def lp( l ):
print l;
def usage():
lp('Usage:')
lp('')
lp('python %s [command] options' % os.path.basename( sys.argv[0] ))
lp('')
lp('Commands:')
lp('')
lp('\n'.join( commands.keys() ))
lp('')
lp('Options:')
lp('')
lp('--root Directory of server or runner.')
lp('--server The server address for the runner to connect to')
lp(' in the for of DNSNAME:PORT.')
lp('--runner The name of the runner.')
lp('--passwd The password for the runner to connect ro the server.')
lp('--debug-level Debugging level; controls the amount of debugging')
lp(' output printed; 0 by default (no debug output).')
lp('')
def run():
if len(sys.argv) > 1 and sys.argv[1] in commands:
command = sys.argv[1]
args = sys.argv[ 2: ]
else:
command = 'show-revision'
args = sys.argv[ 1: ]
commands[ command ]( **accept_args( args ) )

View file

@ -0,0 +1,415 @@
# Copyright Redshift Software, Inc. 2005-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import buildbot
import buildbot.changes.changes
import buildbot.changes.mail
import buildbot.status.builder
import buildbot.status.html
import buildbot.util
import email.Utils
import os.path
import re
import rfc822
import string
import time
import types
import twisted.python
import twisted.python.components
import twisted.web.static
import urllib
waterfall_content_html = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>BuildBot: %(project_name)s</title>
<link href="buildbot.css" rel="stylesheet" type="text/css" />
</head>
<body>
%(heading)s
%(body)s
%(footer)s
</body>
</html>
'''
waterfall_body_html = '''
<table id="waterfall">
<tr id="builders">
<td colspan="2" class="project">
<a href="%(project_url)s">%(project_name)s</a>
</td>
%(builders)s
</tr>
<tr id="current-activity">
<td colspan="2" class="heading">
CURRENT&nbsp;ACTIVITY
</td>
%(current_activity)s
</tr>
<tr id="last-activity">
<td class="heading">
TIME %(timezone)+02.0f
</td>
<td class="heading">
<a href="changes">CHANGES</a>
</td>
%(last_activity)s
</tr>
%(waterfall)s
</table>
'''
waterfall_footer_html = '''
<div id="footer">
<p><a href="http://buildbot.sourceforge.net/">Buildbot</a>-%(version)s working
for the <a href="%(project_url)s">%(project_name)s</a> project.</p>
<p>Page built: %(page_time)s</p>
</div>
'''
class Boost_WaterfallStatusResource(buildbot.status.html.WaterfallStatusResource):
def __init__(self, status, changemaster, categories, css=None):
buildbot.status.html.WaterfallStatusResource.__init__(self,status,changemaster,categories,css)
def content(self, request):
self.page_time = time.strftime("%a %d %b %Y %H:%M:%S",time.localtime(buildbot.util.now()))
return waterfall_content_html % {
"project_name" : self.status.getProjectName(),
"project_url" : self.status.getProjectURL(),
"page_time" : self.page_time,
"heading" : self.heading(request),
"body" : self.body(request),
"footer" : self.footer(request) }
def heading(self, request):
return ""
def body(self, request):
"This method builds the main waterfall display."
phase = request.args.get("phase",["2"])
phase = int(phase[0])
showBuilders = request.args.get("show", None)
allBuilders = self.status.getBuilderNames(categories=self.categories)
if showBuilders:
builderNames = []
for b in showBuilders:
if b not in allBuilders:
continue
if b in builderNames:
continue
builderNames.append(b)
else:
builderNames = allBuilders
builders = map(
lambda name: self.status.getBuilder(name),
builderNames)
if phase == -1:
return self.body0(request, builders)
(changeNames, builderNames, timestamps, eventGrid, sourceEvents) = self.buildGrid(request, builders)
if phase == 0:
return self.phase0(request, changeNames, timestamps, eventGrid)
last_activity_html = "";
for b in builders:
box = buildbot.status.html.ITopBox(b).getBox()
last_activity_html += box.td()
current_activity_html = "";
for b in builders:
box = buildbot.status.html.ICurrentBox(b).getBox()
current_activity_html += box.td()
builders_html = "";
for name in builderNames:
builders_html += "<td class=\"builder\"><a href=\"%s\">%s</a></td>" % (
urllib.quote(name),
string.join(string.split(name,'-'),'<br />') )
if phase == 1:
f = self.phase1
else:
f = self.phase2
waterfall_html = f(request, changeNames+builderNames, timestamps, eventGrid, sourceEvents)
return waterfall_body_html % {
"project_name" : self.status.getProjectName(),
"project_url" : self.status.getProjectURL(),
"last_activity" : last_activity_html,
"current_activity" : current_activity_html,
"builders" : builders_html,
"waterfall" : waterfall_html,
"version" : buildbot.version,
"page_time" : self.page_time,
"timezone" : time.timezone/60
}
def footer(self, request):
return waterfall_footer_html % {
"project_name" : self.status.getProjectName(),
"project_url" : self.status.getProjectURL(),
"version" : buildbot.version,
"page_time" : self.page_time
}
## Override some of the display elements to make them CSS friendly.
def td(text="", parms={}, **props):
props.update(parms)
td_props_html = "";
for prop in ("colspan", "rowspan", "class", "style"):
p = props.get(prop, None)
if p != None:
td_props_html += " %s=\"%s\"" % (prop, p)
if type(text) == types.ListType:
td_text_html = "<div>%s</div>" % string.join(text, "</div><div>")
else:
td_text_html = "<div>%s</div>" % text
return "<td%s>%s</td>\n" % (td_props_html,td_text_html)
color_map = {
'#c000c0' : 'purple'
}
def c(a_color):
if a_color == None:
return 'none'
elif color_map.has_key(a_color):
return color_map[a_color]
else:
return a_color
class Boost_Box:
spacer = False
def __init__(self, other_box, props={}):
self.text = other_box.text
self.urlbase = other_box.urlbase
self.show_idle = other_box.show_idle
self.parms = other_box.parms
self.parms.update(props)
def td(self, **props):
props.update(self.parms)
text = self.text
if not text and self.show_idle:
text = ["[idle]"]
return td(text, props)
class Boost_CurrentBox(buildbot.status.html.CurrentBox):
def getBox(self):
state, ETA, build = self.original.getState()
return Boost_Box( buildbot.status.html.CurrentBox.getBox(self),
{ 'class': "activity-%s" % state } )
twisted.python.components.theAdapterRegistry.adapterRegistry[
(buildbot.status.builder.BuilderStatus, buildbot.status.html.ICurrentBox)] = Boost_CurrentBox
class Boost_ChangeBox(buildbot.status.html.ChangeBox):
def getBox(self):
return Boost_Box( buildbot.status.html.ChangeBox.getBox(self),
{ 'class': "commit" } )
twisted.python.components.theAdapterRegistry.adapterRegistry[
(buildbot.changes.changes.Change, buildbot.status.html.IBox)] = Boost_ChangeBox
class Boost_BuildBox(buildbot.status.html.BuildBox):
def getBox(self):
return Boost_Box( buildbot.status.html.BuildBox.getBox(self),
{ 'class': "build" } )
twisted.python.components.theAdapterRegistry.adapterRegistry[
(buildbot.status.builder.BuildStatus, buildbot.status.html.IBox)] = Boost_BuildBox
class Boost_StepBox(buildbot.status.html.StepBox):
def getBox(self):
return Boost_Box( buildbot.status.html.StepBox.getBox(self),
{ 'class': "step-%s" % c(self.original.getColor()) } )
twisted.python.components.theAdapterRegistry.adapterRegistry[
(buildbot.status.builder.BuildStepStatus, buildbot.status.html.IBox)] = Boost_StepBox
class Boost_EventBox(buildbot.status.html.EventBox):
def getBox(self):
return Boost_Box( buildbot.status.html.EventBox.getBox(self),
{ 'class': "event-%s" % c(self.original.getColor()) } )
twisted.python.components.theAdapterRegistry.adapterRegistry[
(buildbot.status.builder.Event, buildbot.status.html.IBox)] = Boost_EventBox
class Boost_BuildTopBox(buildbot.status.html.BuildTopBox):
def getBox(self):
box = buildbot.status.html.BuildTopBox.getBox(self)
return Boost_Box( box,
{ 'class': "build-%s" % c(box.color) } )
twisted.python.components.theAdapterRegistry.adapterRegistry[
(buildbot.status.builder.BuilderStatus, buildbot.status.html.ITopBox)] = Boost_BuildTopBox
##
class Boost_StatusResource(buildbot.status.html.StatusResource):
def __init__(self, status, control, changemaster, categories, root):
buildbot.status.html.StatusResource.__init__(self,
status, control, changemaster, categories,
twisted.web.static.File(os.path.join(root,"buildbot.css")))
self.putChild("",
Boost_WaterfallStatusResource(self.status, self.changemaster,
self.categories, self.css))
self.putChild("buildbot.css",
twisted.web.static.File(os.path.join(root,"buildbot.css")))
class Boost_Waterfall(buildbot.status.html.Waterfall):
root = None
def __init__(self, http_port=None, distrib_port=None, allowForce=True, root=None):
buildbot.status.html.Waterfall.__init__(self,http_port,distrib_port,allowForce)
self.root = root
def setup(self):
buildbot.status.html.Waterfall.setup(self)
self.site.resource = Boost_StatusResource(
self.site.resource.status,
self.site.resource.control,
self.site.resource.changemaster,
self.site.resource.categories,
self.root)
def Boost_parseSyncmail(self, fd, prefix=None, sep="/"):
m = rfc822.Message(fd)
# The mail is sent from the person doing the checkin. Assume that the
# local username is enough to identify them (this assumes a one-server
# cvs-over-rsh environment rather than the server-dirs-shared-over-NFS
# model)
name, addr = m.getaddr("from")
if not addr:
return None # no From means this message isn't from FreshCVS
at = addr.find("@")
if at == -1:
who = addr # might still be useful
else:
who = addr[:at]
# take the date of the email as the time of checkin, but fall back to
# delivery time
when = buildbot.util.now()
email_time = m.getheader("date")
if email_time:
email_time = email.Utils.parsedate_tz(email_time)
if email_time:
when = email.Utils.mktime_tz(email_time)
# syncmail puts the repository-relative directory in the subject:
# "CVS: %(dir)s %(file)s,%(oldversion)s,%(newversion)s"
# this is the only reasonable way to determine the directory name
subject = m.getheader("subject")
bits = subject.split(" ")
while bits:
bit = bits.pop(0)
if bit == "CVS:":
break;
directory = bits.pop(0)
files = []
comments = ""
isdir = 0
lines = m.fp.readlines()
while lines:
line = lines.pop(0)
if (line.find("Modified Files:") == 0 or
line.find("Added Files:") == 0 or
line.find("Removed Files:") == 0):
break
while lines:
line = lines.pop(0)
if line == "\n" or line == "\r" or line == "\r\n" or line == "\n\r":
break
if line.find("Log Message:") == 0:
lines.insert(0, line)
break
if (line.find("Modified Files:") == 0 or
line.find("Added Files:") == 0 or
line.find("Removed Files:") == 0):
continue
line = line.lstrip()
line = line.rstrip()
# note: syncmail will send one email per directory involved in a
# commit, with multiple files if they were in the same directory.
# Unlike freshCVS, it makes no attempt to collect all related
# commits into a single message.
thesefiles = line.split(" ")
for file in thesefiles:
file = sep.join([directory, file])
file = file.replace("\\",sep)
file = file.replace("/",sep)
if prefix:
# insist that the file start with the prefix: we may get
# changes we don't care about too
bits = file.split(sep)
if bits[0] == prefix:
file = sep.join(bits[1:])
else:
break
# TODO: figure out how new directories are described, set .isdir
files.append(file)
if not files:
return None
while lines:
line = lines.pop(0)
if line.find("Log Message:") == 0:
break
# message is terminated by "Index:..." (patch) or "--- NEW FILE.."
# or "--- filename DELETED ---". Sigh.
while lines:
line = lines.pop(0)
if line.find("Index: ") == 0:
break
if re.search(r"^--- NEW FILE", line):
break
if re.search(r" DELETED ---$", line):
break
comments += line
comments = comments.rstrip() + "\n"
change = buildbot.changes.changes.Change(who, files, comments, isdir, when=when)
return change
class Boost_SyncmailMaildirSource(buildbot.changes.mail.SyncmailMaildirSource):
parser = Boost_parseSyncmail
def messageReceived(self, filename):
twisted.python.log.msg("Boost_SyncmailMaildirSource.messageReceived: "+filename)
buildbot.changes.mail.SyncmailMaildirSource.messageReceived(self,filename)

View file

@ -0,0 +1,132 @@
# Copyright Redshift Software, Inc. 2005-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#~ import buildbot
#~ import buildbot.process.factory
import buildbot.process.step
#~ import os.path
import re
import string
#~ import time
import twisted.python
#~ import types
#~ import urllib
from buildbot.process.factory import s
class command_base(buildbot.process.step.ShellCommand):
def __init__(self, _name, _description, **kwargs):
if kwargs.get('name'): _name = kwargs.get('name')
if kwargs.get('description'): _description = kwargs.get('description')
buildbot.process.step.ShellCommand.__init__(self,**kwargs)
if kwargs.has_key('name'): del kwargs['name']
if kwargs.has_key('description'): del kwargs['description']
if kwargs.has_key('build'): del kwargs['build']
self.name = _name
self.description = _description
self.cmd = buildbot.process.step.LoggedRemoteCommand(_name,kwargs)
class SelfUpdate(command_base):
def __init__(self, **kwargs):
command_base.__init__(self, 'selfupdate', ["self","update"], **kwargs)
class Tarball(command_base):
def __init__(self, **kwargs):
command_base.__init__(self, 'tarball', ["tarball"], **kwargs)
class Boost_Jam_Build(command_base):
def __init__(self, **kwargs):
command_base.__init__(self, 'boost.jam.build', ["bjam","build"], **kwargs)
class Boost_Jam(command_base):
def __init__(self, **kwargs):
command_base.__init__(self, 'boost.jam', ["bjam"], **kwargs)
class Boost_Test(command_base):
def __init__(self, **kwargs):
self.tests = kwargs.get('tests');
if kwargs.has_key('tests'): del kwargs['tests']
self._kwargs = kwargs
command_base.__init__(self, 'boost.jam', ["btest"], **kwargs)
def commandComplete(self, cmd):
def test_match(t,r):
return t or r.match(parts[1])
#~ Get the log so we can parse it to find all the targets
#~ we can test.
out = cmd.log.getText()
lines = string.split(out,"\n")
test_targets = {}
test_re = []
for test in self.tests:
test_re.append(re.compile(test))
for line in lines:
parts = re.split('(?:" ")|(?:" ")|(?: ")|(?:" )|(?: [[]")|(?:"[]] )|(?:")',line)
if not parts: continue
if parts[0] != 'boost-test(TARGET)': continue
if not reduce(test_match,test_re,False): continue
try:
target_i = parts.index(':')+1
except:
continue
twisted.python.log.msg("Boost_Test.commandComplete: TEST = %s -- TARGETS = %s" %
(parts[1],string.join(parts[target_i:-1],' ')) )
for t in parts[target_i:-1]:
test_targets[t] = True
test_targets = test_targets.keys()
test_targets.sort()
#~ Construct new steps for each of the targets we want to test. It would be much
#~ better to tell bjam all targets to test in groups instead of one per invocation.
#~ But there's no "easy" way to do that. Passing in args can blow the command line
#~ limits. Setting an env can also blow that limit, but this may be a higher limit
#~ and we could do them piecemeal.
kwargs = self._kwargs.copy()
kwargs.update({
'flunkOnFailure': False,
'appendToLog': True
})
kwargs['options'].remove('--dump-tests')
kwargs['options'].remove('--dump-test-targets')
count = 0
for test_target in test_targets:
kwargs['target'] = test_target
step = Boost_Jam(**kwargs)
count += 1
step.name = "%s.%d" % (step.name,count)
#~ The steps up to our point have been eaten away already. So we
#~ can add to the front so that the additional steps get executed
#~ before the rest.
self.build.steps.insert(count-1,step)
self.build.build_status.addStep(step)
#~ Rearrange the steps on the build_status to match the order in the
#~ actual build.
existing_count = len(self.build.steps)-count
new_count = count
a = self.build.build_status.steps[0:-new_count-existing_count]
c = self.build.build_status.steps[-new_count-existing_count:-new_count]
b = self.build.build_status.steps[-new_count:]
self.build.build_status.steps = a+b+c
class Boost_Process_Jam_Log(command_base):
def __init__(self, **kwargs):
command_base.__init__(self, 'boost.process_jam_log', ["process log"], **kwargs)
class Boost_Collect_Results(command_base):
def __init__(self, **kwargs):
command_base.__init__(self, 'boost.collect_results', ["collect results"], **kwargs)
class Boost_Publish_Results(command_base):
def __init__(self, **kwargs):
command_base.__init__(self, 'boost.publish_results', ["publish results"], **kwargs)

View file

@ -0,0 +1,262 @@
#~ Copyright Redshift Software, Inc. 2006-2007
#~ Distributed under the Boost Software License, Version 1.0.
#~ (See accompanying file LICENSE_1_0.txt or copy at
#~ http://www.boost.org/LICENSE_1_0.txt)
import __builtin__
import sys
import os
import os.path
import compiler
import imp
import zipimport
import cStringIO
import zipfile
import re
import stat
import calendar
class patchwork_module:
def __init__(self,zip_path):
self.zip_path = zip_path
self.zip = zipfile.ZipFile(zip_path,'r')
def execute(self, args, scope = globals(), scripts = None):
if scripts:
script = None
files = self.zip.namelist()
reScripts = re.compile(scripts)
for zipPath in files:
if reScripts.match(zipPath):
if not zipPath.endswith('/'):
zipPath = os.path.dirname(zipPath)
script = zipPath+os.path.basename(args[0])
break
print "Running: %s" % (script)
exec self.zip.read(script) in scope
else:
exec self.zip.read(args[0]) in scope
class patchwork_globals:
def __init__(self):
#~ The set of importers we need to look hook into.
self.importers = {}
#~ The packages we are patching into a cohesive single set.
self.packages = {}
#~ The packages to search for, in priority order.
self.packages_to_search = None
#~ The file info for each file in all the zip archives.
self.zipinfo = {}
_g_ = patchwork_globals()
#~ Define a module path, which can be a zip file, and its packages.
def def_modules(dir_and_file,packages):
#~ print "--- patchwork.def_modules(%s,{...})" % (dir_and_file)
def _key_and_file_(file_match,file_entry):
m = re.match(file_match,file_entry)
if m:
return [ map(lambda y: int(y), m.groups()), file_entry ]
else:
return None
dir = filter(
None,
map(
lambda x: _key_and_file_(dir_and_file[1],x),
os.listdir(dir_and_file[0])
)
)
dir.sort()
dir.reverse()
path = os.path.join(dir_and_file[0],dir[0][1])
print "Using: %s" % (path)
module = None
if path.endswith('.zip') and not _g_.importers.has_key(path):
module = patchwork_module(path)
zip = module.zip
files = zip.namelist()
_g_.importers[path] = zipimport.zipimporter(path)
for zipinfo in zip.infolist():
_g_.zipinfo[os.path.join(path,zipinfo.filename)] = zipinfo
for package in packages.keys():
if os.path.exists(packages[package]):
#~ print "--| SRC FILE: %s" % (packages[package]);
_g_.packages[package] = { 'path' : packages[package], 'importer' : path }
else:
rePackage = re.compile(packages[package])
for zipPath in files:
if rePackage.match(zipPath):
if not zipPath.endswith('/'):
zipPath = os.path.dirname(zipPath)
#~ print "--- patchwork.def_modules found zip path %s" % (zipPath)
#~ print "--| ZIP FILE: %s" % (zipPath);
_g_.packages[package] = { 'path' : zipPath, 'importer' : path }
break
_g_.packages_to_search = _g_.packages.keys()
_g_.packages_to_search.sort()
_g_.packages_to_search.reverse()
sys.path.insert(0,path)
else:
raise ImportError
return module
def _open_(filename, mode = 'r', bufsize = -1):
#~ print "--- patchwork.open(%s,%s,%d)\n" % (filename,mode,bufsize)
for importer in _g_.importers.keys():
if filename.startswith(importer):
return cStringIO.StringIO(_g_.importers[importer].get_data(filename))
return __builtin__.open(filename,mode,bufsize)
def _file_(filename, mode = 'r', bufsize = -1):
#~ print "--- patchwork.file(%s,%s,%d)\n" % (filename,mode,bufsize)
return _open_(filename,mode,bufsize)
def _stat_(filename):
#~ print "--- patchwork.stat(%s)\n" % (filename)
if filename in _g_.zipinfo:
st_size = _g_.zipinfo[filename].file_size
st_mtime = calendar.timegm(_g_.zipinfo[filename].date_time)
return (
#~ st_mode
0100444,
#~ st_ino
0,
#~ st_dev
0,
#~ st_nlink
0,
#~ st_uid
0,
#~ st_gid
0,
#~ st_size
st_size,
#~ st_atime
st_mtime,
#~ st_mtime
st_mtime,
#~ st_ctime
st_mtime
)
return os.stat(filename)
#~ Direct loader of modules, and packages, from other importers.
class patchwork_loader:
def __init__(self,importer,module,path):
#~ print "--- patchwork_loader.__init__(self,importer,\n\t%s,\n\t%s)" % (module,path)
self.importer = importer
self.module = module
self.path = path
def find_module(self,fullname,path=None):
#~ print "--- patchwork_loader.find_module(self,\n\t%s,\n\t%s)" % (fullname,path)
return self.importer.find_module(fullname,path=path)
def load_module(self,fullname):
#~ print "--- patchwork_loader.load_module(self,\n\t%s)" % (fullname)
source = ""
if os.path.exists(self.path):
#~ print "\tRC FILE: %s" % (self.path);
source += file(self.path,"rU").read()
else:
#~ print "\tZIP FILE: %s" % (self.path);
source += self.importer.get_data(self.path).replace("\r\n","\n").replace("\r","\n")
source += '''
import __builtin__
if __builtin__.open == open:
from boost.patchwork import _open_ as open
if isinstance(file,type):
from boost.patchwork import _file_ as file
from boost.patchwork import _stat_ as stat
'''
code = compiler.compile(source,self.path,'exec')
mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
mod.__file__ = os.path.join(self.importer.archive,self.path)
mod.__loader__ = self
if self.path.endswith("__init__.py"):
mod.__path__ = [ os.path.join(self.importer.archive,os.path.dirname(self.path)) ]
exec code in mod.__dict__
#~ We return the sys.modules entry instead of the mod variable directly
#~ because it's possible for the module itself to override the sys.modules
#~ entry with a custom one. For example, this is what Twisted 2.5 does.
return sys.modules[fullname]
#~ Python 2.3 style importer that searches through our package patchwork set
#~ and loads according to the location for the package.
class patchwork_importer:
def __init__(self,archivepath):
#~ print "--- patchwork_importer.__init__(self,%s)" % (archivepath)
found = None
for importer in _g_.importers.keys():
if archivepath.startswith(importer):
found = 1
break
if not found:
raise ImportError
def find_module(self,fullname,path=None):
#~ print "--- patchwork_importer.find_module(self,\n\t%s,\n\t%s)" % (fullname,path)
loader = None
for package in _g_.packages_to_search:
if fullname.startswith(package):
package_dirname = package.split('.')
fullname_base = fullname.split('.')[len(package_dirname):]
importer = _g_.importers[_g_.packages[package]['importer']]
if os.path.exists(_g_.packages[package]['path']):
path_base = _g_.packages[package]['path']
else:
path_base = os.path.join(_g_.packages[package]['path'],*fullname_base)
if os.path.exists(os.path.join(path_base,"__init__")+".py"):
#~ Source package.
loader = patchwork_loader(importer,fullname,
os.path.join(path_base,"__init__")+".py")
elif os.path.exists(path_base):
#~ Source module.
loader = patchwork_loader(importer,fullname,
path_base)
elif os.path.exists(path_base+".py"):
#~ Source module.
loader = patchwork_loader(importer,fullname,
path_base+".py")
elif importer._files.has_key(os.path.join(path_base,"__init__")+".py"):
#~ Source package.
loader = patchwork_loader(importer,fullname,
os.path.join(path_base,"__init__")+".py")
elif importer._files.has_key(path_base+".py"):
#~ Source module.
loader = patchwork_loader(importer,fullname,
path_base+".py")
if loader:
#~ print "--- patchwork_importer.find_module(self,%s,%s)" % (fullname,path)
#~ print "--- package = %s" % (package)
#~ print "--- %s.path = %s" % (loader,loader.path)
break;
return loader
#~ Shove our special importer into the global importer hooks.
sys.path_hooks.insert(0,patchwork_importer)

View file

@ -0,0 +1,70 @@
#! /usr/bin/python
#~ Copyright Redshift Software, Inc. 2006-2007
#~ Distributed under the Boost Software License, Version 1.0.
#~ (See accompanying file LICENSE_1_0.txt or copy at
#~ http://www.boost.org/LICENSE_1_0.txt)
import sys
import os
import os.path
import urllib
from boost.patchwork import def_modules
#~ The directory this file is in.
root = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
#~ Download current packages.
#~ if not os.path.isdir(os.path.join(root,'_packages')):
#~ os.mkdir(os.path.join(root,'_packages'))
#~ if not os.path.isdir(os.path.join(root,'_packages')):
#~ sys.exit('No _packages directory found.')
#~ urllib.urlretrieve('',os.path.join(root,'_packages',''))
#~ urllib.urlretrieve('',os.path.join(root,'_packages',''))
#~ Bootstrap Boost package.
#~ sys.path.insert(0,os.path.join(root,'_packages','boost-0.0.1.zip'))
#~ The zip files we import from...
#~ BuildBot has a simple single package tree.
def_modules(
[ os.path.join(root,'_packages'), 'buildbot-(\d+).(\d+).(\d+).zip' ],
{ 'buildbot' :
'^buildbot-[^/]+/buildbot/__init__.py$' }
)
#~ Twisted has a variety of split packages.
def_modules(
[ os.path.join(root,'_packages'), 'Twisted-(\d+).(\d+).(\d+).zip' ],
{ 'twisted' :
'^Twisted[^/]+/TwistedCore-[^/]+/twisted/$' ,
'twisted.conch' :
'^Twisted[^/]+/TwistedConch-[^/]+/twisted/conch/$' ,
'twisted.lore' :
'^Twisted[^/]+/TwistedLore-[^/]+/twisted/lore/$' ,
'twisted.mail' :
'^Twisted[^/]+/TwistedMail-[^/]+/twisted/mail/$' ,
'twisted.names' :
'^Twisted[^/]+/TwistedNames-[^/]+/twisted/names/$' ,
'twisted.news' :
'^Twisted[^/]+/TwistedNews-[^/]+/twisted/news/$' ,
'twisted.runner' :
'^Twisted[^/]+/TwistedRunner-[^/]+/twisted/runner/$' ,
'twisted.web' :
'^Twisted[^/]+/TwistedWeb-[^/]+/twisted/web/$' ,
'twisted.words' :
'^Twisted[^/]+/TwistedWords-[^/]+/twisted/words/$' ,
'zope' :
'^Twisted[^/]+/zope.interface-[^/]+/src/zope/$' }
)
#~ Since we have our own extra slave commands they have to loaded before
#~ starting the slave deamon.
if len(sys.argv) > 1 and sys.argv[1] == 'start':
import boost.bot.remote
#~ And run the buildbot frontend script.
from buildbot.scripts import runner
runner.run()

View file

@ -0,0 +1,53 @@
// ---- time_string: thin wrapper around std::strftime -------- //
//
// Copyright Gennaro Prota 2006
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// ------------------------------------------------------------------
//
// $Id$
#ifndef BOOST_TIME_STRING_HPP_GP_20060731
#define BOOST_TIME_STRING_HPP_GP_20060731
#include <string>
#include <ctime>
namespace boost {
// Many of the boost tools just need a quick way to obtain
// a formatted "run date" string or similar. This is one.
//
// In case of failure false is returned and result is
// unchanged.
//
inline
bool time_string(std::string & result
, const std::string & format = "%X UTC, %A %d %B %Y")
{
// give up qualifying names and using std::size_t,
// to avoid including "config.hpp"
using namespace std;
const int sz = 256;
char buffer [ sz ] = { 0 };
const time_t no_cal_time ( -1 );
time_t tod;
const bool ok =
time ( &tod ) != no_cal_time
&& strftime( buffer, sz, format.c_str(), gmtime( &tod ) ) != 0
;
if (ok)
result = buffer;
return ok;
}
}
#endif // include guard

@ -1 +1 @@
Subproject commit 35f8b2e4de629fea0d962602c6cc009853007ce6
Subproject commit deeb8c598dfc087618586543232bf2a18acbe5c1

1
tools/litre Submodule

@ -0,0 +1 @@
Subproject commit 564d4d8d30b7e03ac5e25d78e14d2c19fa321c83

@ -1 +1 @@
Subproject commit 7e558d5fbda52eea6bd2309f06e0ecf49f8ba004
Subproject commit 848b1aacaca4ee03b38aabc87b09932383722f6f

View file

@ -1,41 +0,0 @@
# Regression test status reporting tools build Jamfile
exe process_jam_log
:
../process_jam_log.cpp ../detail/tiny_xml.cpp
/boost/filesystem//boost_filesystem
:
:
release
;
exe compiler_status
:
../compiler_status.cpp ../detail/tiny_xml.cpp
/boost/filesystem//boost_filesystem
:
:
release
;
install dist-bin
:
process_jam_log
compiler_status
:
<install-type>EXE
<location>../../../dist/bin
:
release
;
install dist-lib
:
process_jam_log
compiler_status
:
<install-type>LIB
<location>../../../dist/lib
:
release
;

View file

@ -0,0 +1,60 @@
# Regression test status reporting tools build Jamfile
if [ glob ../../../boost-build.jam ]
{
use-project /boost : ../../.. ;
}
else
{
import modules ;
use-project /boost : [ MATCH --boost=(.*) : [ modules.peek : ARGV ] ] ;
}
if ! [ glob ../src/process_jam_log.cpp ]
{
project boost/regression
:
source-location ..
;
}
else
{
project boost/regression
:
source-location ../src
;
}
exe process_jam_log
:
process_jam_log.cpp detail/tiny_xml.cpp
/boost/filesystem//boost_filesystem/<link>static
:
<define>BOOST_ALL_NO_LIB=1
<define>_CRT_SECURE_NO_WARNINGS
:
release
;
#~ explicit process_jam_log ;
exe compiler_status
:
compiler_status.cpp detail/tiny_xml.cpp
/boost/filesystem//boost_filesystem/<link>static
:
<define>BOOST_ALL_NO_LIB=1
:
release
;
explicit compiler_status ;
exe library_status
:
library_status.cpp detail/tiny_xml.cpp
/boost/filesystem//boost_filesystem/<link>static
:
<define>BOOST_ALL_NO_LIB=1
:
release
;
explicit library_status ;

View file

@ -0,0 +1,59 @@
<?xml version="1.0" encoding="Windows-1252"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="7.10"
Name="compiler_status"
ProjectGUID="{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}"
Keyword="MakeFileProj">
<Platforms>
<Platform
Name="Win32"/>
</Platforms>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="Debug"
IntermediateDirectory="Debug"
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..\..\..\..\tools\regression\build
..\..\jam\src\bin.ntx86\bjam.exe compiler_status.exe variant=debug"
ReBuildCommandLine="cd ..\..\..\..\tools\regression\build
...\..\jam\src\bin.ntx86\bjam.exe -a compiler_status.exe variant=debug"
CleanCommandLine="cd ..\..\..\..\tools\regression\build
call bjam --v2 msvc-7.1 debug clean
"
Output="compiler_status.exe"/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory="Release"
IntermediateDirectory="Release"
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam compiler_status variant=release link=static"
ReBuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam -a compiler_status variant=release link=static"
CleanCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam clean"
Output="..\..\..\..\bin.v2\tools\regression\build\msvc-7.1\release\link-static\compiler_status.exe"/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<Filter
Name="Source Files"
Filter="cpp;c;cxx;def;odl;idl;hpj;bat;asm;asmx"
UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}">
<File
RelativePath="..\..\compiler_status.cpp">
</File>
</Filter>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

View file

@ -0,0 +1,64 @@
<?xml version="1.0" encoding="Windows-1252"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="7.10"
Name="library_status"
ProjectGUID="{465BDB84-92B5-4C60-AF26-8BD1A61A089E}"
Keyword="MakeFileProj">
<Platforms>
<Platform
Name="Win32"/>
</Platforms>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="Debug"
IntermediateDirectory="Debug"
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam library_status variant=debug link=static"
ReBuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam -a library_status variant=debug link=static"
CleanCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam clean"
Output="..\..\..\..\bin.v2\tools\regression\build\msvc-7.1\debug\link-static\library_status.exe"/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory="Release"
IntermediateDirectory="Release"
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam library_status variant=release link=static"
ReBuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam -a library_status variant=release link=static"
CleanCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam clean"
Output="..\..\..\..\bin.v2\tools\regression\build\msvc-7.1\release\link-static\library_status.exe"/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<Filter
Name="Source Files"
Filter="cpp;c;cxx;def;odl;idl;hpj;bat;asm;asmx"
UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}">
<File
RelativePath="..\..\library_status.cpp">
</File>
<File
RelativePath="..\..\detail\tiny_xml.cpp">
</File>
<File
RelativePath="..\..\detail\tiny_xml.hpp">
</File>
</Filter>
</Files>
<Globals>
</Globals>
</VisualStudioProject>

View file

@ -4,6 +4,7 @@
Version="7.10"
Name="process_jam_log"
ProjectGUID="{9A751791-929F-496A-8DE7-B61020619BFA}"
RootNamespace="process_jam_log"
Keyword="MakeFileProj">
<Platforms>
<Platform
@ -17,14 +18,16 @@
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd ..\..\..\..\tools\regression\build
..\..\jam\src\bin.ntx86\bjam.exe variant=debug
BuildCommandLine="cd ..\..\..\..\tools\regression\build
..\..\jam\src\bin.ntx86\bjam.exe process_jam_log variant=debug
"
ReBuildCommandLine="cd ..\..\..\..\tools\regression\build
..\..\jam\src\bin.ntx86\bjam.exe -a variant=debug
ReBuildCommandLine="cd ..\..\..\..\tools\regression\build
call bjam --v2 msvc-7.1 debug clean
call bjam --v2 msvc-7.1 debug"
CleanCommandLine="cd ..\..\..\..\tools\regression\build
call bjam --v2 msvc-7.1 debug clean
"
Output="process_jam_log.exe"/>
Output="../../../../bin.v2/tools/regression/build/msvc-7.1/debug/link-static/process_jam_log.exe"/>
</Configuration>
<Configuration
Name="Release|Win32"
@ -33,9 +36,13 @@
ConfigurationType="0">
<Tool
Name="VCNMakeTool"
BuildCommandLine="cd C:\users\misha\Stuff\boost\boost\tools\regression\build"
ReBuildCommandLine="cd C:\users\misha\Stuff\boost\boost\tools\regression\build"
Output="process_jam_log.exe"/>
BuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam process_jam_log variant=release link=static"
ReBuildCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam -a process_jam_log variant=release link=static"
CleanCommandLine="cd ..
..\..\jam\src\bin.ntx86\bjam clean"
Output="..\..\..\..\bin.v2\tools\regression\build\msvc-7.1\release\link-static\process_jam_log.exe"/>
</Configuration>
</Configurations>
<References>
@ -45,9 +52,6 @@
Name="Source Files"
Filter="cpp;c;cxx;def;odl;idl;hpj;bat;asm;asmx"
UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}">
<File
RelativePath="..\..\process_jam_log.cpp">
</File>
</Filter>
<Filter
Name="Header Files"

View file

@ -0,0 +1,37 @@
Microsoft Visual Studio Solution File, Format Version 8.00
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "compiler_status", "compiler_status.vcproj", "{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}"
ProjectSection(ProjectDependencies) = postProject
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "process_jam_log", "process_jam_log.vcproj", "{9A751791-929F-496A-8DE7-B61020619BFA}"
ProjectSection(ProjectDependencies) = postProject
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "library_status", "library_status.vcproj", "{465BDB84-92B5-4C60-AF26-8BD1A61A089E}"
ProjectSection(ProjectDependencies) = postProject
EndProjectSection
EndProject
Global
GlobalSection(SolutionConfiguration) = preSolution
Debug = Debug
Release = Release
EndGlobalSection
GlobalSection(ProjectConfiguration) = postSolution
{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}.Debug.ActiveCfg = Debug|Win32
{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}.Debug.Build.0 = Debug|Win32
{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}.Release.ActiveCfg = Release|Win32
{81F22EF9-A1B8-46CB-9C2D-56FD4327B942}.Release.Build.0 = Release|Win32
{9A751791-929F-496A-8DE7-B61020619BFA}.Debug.ActiveCfg = Debug|Win32
{9A751791-929F-496A-8DE7-B61020619BFA}.Debug.Build.0 = Debug|Win32
{9A751791-929F-496A-8DE7-B61020619BFA}.Release.ActiveCfg = Release|Win32
{9A751791-929F-496A-8DE7-B61020619BFA}.Release.Build.0 = Release|Win32
{465BDB84-92B5-4C60-AF26-8BD1A61A089E}.Debug.ActiveCfg = Debug|Win32
{465BDB84-92B5-4C60-AF26-8BD1A61A089E}.Debug.Build.0 = Debug|Win32
{465BDB84-92B5-4C60-AF26-8BD1A61A089E}.Release.ActiveCfg = Release|Win32
{465BDB84-92B5-4C60-AF26-8BD1A61A089E}.Release.Build.0 = Release|Win32
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
EndGlobalSection
GlobalSection(ExtensibilityAddIns) = postSolution
EndGlobalSection
EndGlobal

View file

@ -5,7 +5,7 @@
<meta http-equiv="Content-Type"
content="text/html; charset=iso-8859-1">
<meta name="ProgId" content="FrontPage.Editor.Document">
<meta name="GENERATOR" content="Microsoft FrontPage Express 2.0">
<meta name="GENERATOR" content="Microsoft FrontPage 5.0">
<title>Regression Test Reporting Tools</title>
</head>
@ -32,12 +32,22 @@ used to generate the actual status reports.</p>
files.</li>
<li><a href="build/Jamfile.v2">Jamfile.v2</a> - Builds
process_jam_log and compiler_status executables.</li>
<li><a href="library_status.html">Library Status</a> - Runs test
programs for one or all boost libraries on
your local installation and generates complete tables
to show which combinations of libraries, compilers,
compiler settings pass and fail at your local installation.</li>
</ul>
<hr>
<p>Revised <!--webbot bot="Timestamp" startspan s-type="EDITED"
s-format="%d %B, %Y" -->09 January, 2003<!--webbot bot="Timestamp"
i-checksum="38582" endspan --></p>
s-format="%d %B, %Y" -->06 November, 2007<!--webbot bot="Timestamp"
i-checksum="39367" endspan --></p>
<p>© Copyright Beman Dawes 2003</p>
<p>Distributed under the Boost Software License, Version 1.0. See
<a href="http://www.boost.org/LICENSE_1_0.txt">www.boost.org/LICENSE_1_0.txt</a></p>
</body>
</html>
</html>

View file

@ -0,0 +1,166 @@
<html>
<head>
<meta http-equiv="Content-Language" content="en-us">
<meta http-equiv="Content-Type"
content="text/html; charset=iso-8859-1">
<title>Libary Status</title>
</head>
<body bgcolor="#FFFFFF">
<table border="0">
<tr>
<td><img border="0" src="../../boost.png" width="277" height="86" alt="boost.png (6897 bytes)"></td>
<td><h1>Generating Library Status Tables</h1></td>
</tr>
</table>
<h3>Purpose</h3>
Any time one considers using a library as large and complex
as the Boost libraries, he must have a way of validating
the the library functions in his environment. This should
be done when the library is installed and anytime questions
are raised regarding its applicabililty and/or its usage.
<p>
The procedures described here permit a user to run any
combination of tests on any or all libraries and generate
a set of convenient tables which show which libraries
pass which tests under what conditions.
<h3>Preliminaries</h3>
Generating these tables requires a couple of utility programs:
<code>process_jam_log</code> and <code>library_status</code>.
These can be built by moving to the directory <code>tools/regression/build</code>
and invoking bjam. If all goes well these utility programs
will be found in the directory <code>dist/bin</code>. From
there they should be moved to a place in the current
path.
<p>
<h3>Running Tests for One Library</h3>
<ol>
<li>Start from your command line environment.
<li>set the current directory to:../libs/&lt;library name&gt;/test
<li>Invoke one of the following:
<ul>
<li><code>../../../tools/regression/library_test (*nix)</code>.
<li><code>..\..\..\tools\regression\library_test (windows)</code>.
</ul>
<li>This will display short help message describing the how to set
the command line arguments for the compilers and variants you want to
appear in the final table.
<li>Setting these arguments requires rudimentary knowledge of bjam
usage. Hopefully, if you've arrived at this page you've gained the
required knowledge during the installation and library build process.
<li>Rerun the abve command with the argument set accordingly.
<li>When the command terminates, there should be a file named
"library_status.html" in the current directory.
<li>Display this file with any web browser.
</ol>
There should appear a table similar to the following for the regex
library.
<p>
<table border="1" cellspacing="0" cellpadding="5">
<tr>
<td rowspan="4">Test Name</td>
<td align="center" colspan="4" >msvc-7.1</td>
</tr><tr>
<td align="center" colspan="2" >debug</td>
<td align="center" colspan="2" >release</td>
</tr><tr>
<td align="center" >link-static</td>
<td align="center" rowspan="2" >threading-multi</td>
<td align="center" >link-static</td>
<td align="center" rowspan="2" >threading-multi</td>
</tr><tr>
<td align="center" >threading-multi</td>
<td align="center" >threading-multi</td>
</tr><tr><td>bad_expression_test</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-bad_expression_test.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-bad_expression_test.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
<tr><td>captures</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-captures-msvc-7.1-debug-threading-multi"><font color="#FF0000"><i>Fail</i></font></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-captures-msvc-7.1-release-threading-multi"><font color="#FF0000"><i>Fail</i></font></a></td></tr>
<tr><td>captures_test</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-captures_test.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-captures_test.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
<tr><td>concept_check</a></td><td align="right"><i>Missing</i></td><td align="right">Pass</td><td align="right"><i>Missing</i></td><td align="right">Pass</td></tr>
<tr><td>icu_concept_check</a></td><td align="right"><i>Missing</i></td><td align="right">Pass</td><td align="right"><i>Missing</i></td><td align="right">Pass</td></tr>
<tr><td>object_cache_test</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-object_cache_test.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-object_cache_test.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
<tr><td>posix_api_check</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-posix_api_check.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-posix_api_check.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
<tr><td>posix_api_check_cpp</a></td><td align="right"><i>Missing</i></td><td align="right">Pass</td><td align="right"><i>Missing</i></td><td align="right">Pass</td></tr>
<tr><td>recursion_test</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-recursion_test.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-recursion_test.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
<tr><td>regex_config_info</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_config_info.test-msvc-7.1-debug-threading-multi">Pass</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_config_info.test-msvc-7.1-release-threading-multi">Pass</a></td></tr>
<tr><td>regex_dll_config_info</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_dll_config_info.test-msvc-7.1-debug-threading-multi">Pass</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_dll_config_info.test-msvc-7.1-release-threading-multi">Pass</a></td></tr>
<tr><td>regex_regress</a></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_regress.test-msvc-7.1-debug-link-static-threading-multi">Pass</a><sup>*</sup></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_regress.test-msvc-7.1-release-link-static-threading-multi">Pass</a><sup>*</sup></td><td align="right"><i>Missing</i></td></tr>
<tr><td>regex_regress_dll</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_regress_dll.test-msvc-7.1-debug-threading-multi">Pass</a><sup>*</sup></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-regex_regress_dll.test-msvc-7.1-release-threading-multi">Pass</a><sup>*</sup></td></tr>
<tr><td>regex_regress_threaded</a></td><td align="right"><i>Missing</i></td><td align="right">Pass</td><td align="right"><i>Missing</i></td><td align="right">Pass</td></tr>
<tr><td>static_mutex_test</a></td><td align="right"><i>Missing</i></td><td align="right">Pass</td><td align="right"><i>Missing</i></td><td align="right">Pass</td></tr>
<tr><td>test_collate_info</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-test_collate_info.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-test_collate_info.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
<tr><td>unicode_iterator_test</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-unicode_iterator_test.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-unicode_iterator_test.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
<tr><td>wide_posix_api_check_c</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-wide_posix_api_check_c.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-wide_posix_api_check_c.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
<tr><td>wide_posix_api_check_cpp</a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-wide_posix_api_check_cpp.test-msvc-7.1-debug-threading-multi"><i>Warn</i></a></td><td align="right"><i>Missing</i></td><td align="right"><a href="links.html#C:-Boost134-bin.v2-libs-regex-test-wide_posix_api_check_cpp.test-msvc-7.1-release-threading-multi"><i>Warn</i></a></td></tr>
</table>
<p>
This table was generated by invoking the following command line:
<p>
<code>
../../../tools/regression/library_test --toolset=msvc-7.1 variant=debug,release
</code>
<p>
from within the .../libs/regex/test directory.
<p>
This table shows the regex test results for both debug and release
versions of the library. Also it displays the fact that one of the
tests is run specifically with the static linking/multi-threading
versions of the runtime libraries. The cells marked "Missing" correspond
to tests that were not run for some reason or another. This is usually
because the corresponding <code>Jamfile.v2</code> excludes this test
for the given combination of compiler and build attributes. In this
example, all tests were run with the same compiler. If additional
compilers were used, they would appear as more columns in the table.
<p>
The table above is just an illustration so the links don't actually
point to anything. In the table you generated, the links will
display a page describing any errors, warnings or other available
information about the tests. If the test passes, usually, there
is no additional information and hence no link.
<p>
The tables are cumulative. That is, if you run one set of tests
now and tests with different attributes later, the table will
contain all the results to date. The test results are stored
in <code>../bin.v2/libs/test/&lt;library%gt;/...</code>.
To reinitialize the test results to empty, delete the corresponding
files in this directory.
<p>
The procedure above assumes that the table are generated within
the directory <code>../libs/&lt;library&gt;/test</code>. This is the
most common case since this directory contains the
<code>Jamfile.v2</code> as well as the source code that is
used by official boost testers. However, this is just a convention.
The table can be generated for other directories within the
libary. One possiblity would be to generate the table for
all the examples in <code>../libs/%lt;library%gt;/example</code>. Or
one might have a special directory of performance tests which
take a long time to run and hence are not suitable for running
by official boost testers. Just remember that library
status table is generated in the directory from which the
<code>library_test</code> command is invoked.
<p>
<h3>Running Tests for All Libraries</h3>
For those with *nix or cygwin command line shells, there is shell
script that can be run from the boost root directory:
<p>
<code> tools/regression/library_test_all</code>
<p>
The command line arguments are the same as for running the test
for one library. This script creates all the html files in all
the test directories as well as an html page in the <code>status</code>
directory named <code>library_status_summary.html</code>. This
can be used to browse through all test results for all test in
all libraries.
<hr>
<p>
Copyright 2007 Robert Ramey. Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
<p>
Revised <!--webbot bot="Timestamp" startspan s-type="EDITED"
s-format="%d %B, %Y" -->14 August, 2007<!--webbot bot="Timestamp"
i-checksum="38582" endspan --></p>
</body>
</html>

View file

@ -0,0 +1,70 @@
#!/bin/sh
#~ Copyright Redshift Software, Inc. 2007
#~ Distributed under the Boost Software License, Version 1.0.
#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
export PATH=/bin:/usr/bin:${PATH}
timestamp=`date +%F-%H-%M-%S-%Z`
branch=$1
revision=`svn info file:///home/subversion/boost/${branch} | grep '^Revision:' | cut --byte=11-`
tag=boost-${1/\/*}-${timestamp}
export_dir=boost-$$
# Remove files as listed in stdin, the assumption is that processing
# of the file is complete and can be removed.
rm_c()
{
while read f; do
rm -f ${f}
done
}
# Generate the export file tree, and incrementally output the files
# created.
svn_export()
{
svn export -r ${revision} file:///home/subversion/boost/${branch} ${tag}
echo "Revision: ${revision}" > ${tag}/svn_info.txt
echo "---- ${tag}/svn_info.txt"
}
# Create the archive incrementally, deleting files as we are done
# adding them to the archive.
make_archive()
{
svn_export \
| cut --bytes=6- \
| star -c -D -to-stdout -d artype=pax list=- 2>/dev/null \
| bzip2 -6 -c \
| tee $1 \
| tar -jtf - \
| rm_c
}
run()
{
cd /tmp
rm -rf ${export_dir}
mkdir ${export_dir}
cd ${export_dir}
mkfifo out.tbz2
make_archive out.tbz2 &
cat out.tbz2
cd /tmp
rm -rf ${export_dir}
}
run_debug()
{
rm -rf ${export_dir}
mkdir ${export_dir}
cd ${export_dir}
mkfifo out.tbz2
make_archive out.tbz2 &
cat out.tbz2 > ../${tag}.tar.bz2
cd ..
rm -rf ${export_dir}
}
run
#run_debug

View file

@ -0,0 +1,500 @@
# Copyright (c) MetaCommunications, Inc. 2003-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import xml.sax.saxutils
import zipfile
import ftplib
import time
import stat
import xml.dom.minidom
import xmlrpclib
import httplib
import os.path
import string
import sys
def process_xml_file( input_file, output_file ):
utils.log( 'Processing test log "%s"' % input_file )
f = open( input_file, 'r' )
xml = f.readlines()
f.close()
for i in range( 0, len(xml)):
xml[i] = string.translate( xml[i], utils.char_translation_table )
output_file.writelines( xml )
def process_test_log_files( output_file, dir, names ):
for file in names:
if os.path.basename( file ) == 'test_log.xml':
process_xml_file( os.path.join( dir, file ), output_file )
def collect_test_logs( input_dirs, test_results_writer ):
__log__ = 1
utils.log( 'Collecting test logs ...' )
for input_dir in input_dirs:
utils.log( 'Walking directory "%s" ...' % input_dir )
os.path.walk( input_dir, process_test_log_files, test_results_writer )
dart_status_from_result = {
'succeed': 'passed',
'fail': 'failed',
'note': 'passed',
'': 'notrun'
}
dart_project = {
'trunk': 'Boost_HEAD',
'': 'Boost_HEAD'
}
dart_track = {
'full': 'Nightly',
'incremental': 'Continuous',
'': 'Experimental'
}
ascii_only_table = ""
for i in range(0,256):
if chr(i) == '\n' or chr(i) == '\r':
ascii_only_table += chr(i)
elif i < 32 or i >= 0x80:
ascii_only_table += '?'
else:
ascii_only_table += chr(i)
class xmlrpcProxyTransport(xmlrpclib.Transport):
def __init__(self, proxy):
self.proxy = proxy
def make_connection(self, host):
self.realhost = host
return httplib.HTTP(self.proxy)
def send_request(self, connection, handler, request_body):
connection.putrequest('POST','http://%s%s' % (self.realhost,handler))
def send_host(self, connection, host):
connection.putheader('Host',self.realhost)
def publish_test_logs(
input_dirs,
runner_id, tag, platform, comment_file, timestamp, user, source, run_type,
dart_server = None,
http_proxy = None,
**unused
):
__log__ = 1
utils.log( 'Publishing test logs ...' )
dart_rpc = None
dart_dom = {}
def _publish_test_log_files_ ( unused, dir, names ):
for file in names:
if os.path.basename( file ) == 'test_log.xml':
utils.log( 'Publishing test log "%s"' % os.path.join(dir,file) )
if dart_server:
log_xml = open(os.path.join(dir,file)).read().translate(ascii_only_table)
#~ utils.log( '--- XML:\n%s' % log_xml)
#~ It seems possible to get an empty XML result file :-(
if log_xml == "": continue
log_dom = xml.dom.minidom.parseString(log_xml)
test = {
'library': log_dom.documentElement.getAttribute('library'),
'test-name': log_dom.documentElement.getAttribute('test-name'),
'toolset': log_dom.documentElement.getAttribute('toolset')
}
if not test['test-name'] or test['test-name'] == '':
test['test-name'] = 'unknown'
if not test['toolset'] or test['toolset'] == '':
test['toolset'] = 'unknown'
if not dart_dom.has_key(test['toolset']):
dart_dom[test['toolset']] = xml.dom.minidom.parseString(
'''<?xml version="1.0" encoding="UTF-8"?>
<DartSubmission version="2.0" createdby="collect_and_upload_logs.py">
<Site>%(site)s</Site>
<BuildName>%(buildname)s</BuildName>
<Track>%(track)s</Track>
<DateTimeStamp>%(datetimestamp)s</DateTimeStamp>
</DartSubmission>
''' % {
'site': runner_id,
'buildname': "%s -- %s (%s)" % (platform,test['toolset'],run_type),
'track': dart_track[run_type],
'datetimestamp' : timestamp
} )
submission_dom = dart_dom[test['toolset']]
for node in log_dom.documentElement.childNodes:
if node.nodeType == xml.dom.Node.ELEMENT_NODE:
if node.firstChild:
log_data = xml.sax.saxutils.escape(node.firstChild.data)
else:
log_data = ''
test_dom = xml.dom.minidom.parseString('''<?xml version="1.0" encoding="UTF-8"?>
<Test>
<Name>.Test.Boost.%(tag)s.%(library)s.%(test-name)s.%(type)s</Name>
<Status>%(result)s</Status>
<Measurement name="Toolset" type="text/string">%(toolset)s</Measurement>
<Measurement name="Timestamp" type="text/string">%(timestamp)s</Measurement>
<Measurement name="Log" type="text/text">%(log)s</Measurement>
</Test>
''' % {
'tag': tag,
'library': test['library'],
'test-name': test['test-name'],
'toolset': test['toolset'],
'type': node.nodeName,
'result': dart_status_from_result[node.getAttribute('result')],
'timestamp': node.getAttribute('timestamp'),
'log': log_data
})
submission_dom.documentElement.appendChild(
test_dom.documentElement.cloneNode(1) )
for input_dir in input_dirs:
utils.log( 'Walking directory "%s" ...' % input_dir )
os.path.walk( input_dir, _publish_test_log_files_, None )
if dart_server:
try:
rpc_transport = None
if http_proxy:
rpc_transport = xmlrpcProxyTransport(http_proxy)
dart_rpc = xmlrpclib.ServerProxy(
'http://%s/%s/Command/' % (dart_server,dart_project[tag]),
rpc_transport )
for dom in dart_dom.values():
#~ utils.log('Dart XML: %s' % dom.toxml('utf-8'))
dart_rpc.Submit.put(xmlrpclib.Binary(dom.toxml('utf-8')))
except Exception, e:
utils.log('Dart server error: %s' % e)
def upload_to_ftp( tag, results_file, ftp_proxy, debug_level ):
ftp_site = 'fx.meta-comm.com'
site_path = '/boost-regression'
utils.log( 'Uploading log archive "%s" to ftp://%s%s/%s' % ( results_file, ftp_site, site_path, tag ) )
if not ftp_proxy:
ftp = ftplib.FTP( ftp_site )
ftp.set_debuglevel( debug_level )
ftp.login()
else:
utils.log( ' Connecting through FTP proxy server "%s"' % ftp_proxy )
ftp = ftplib.FTP( ftp_proxy )
ftp.set_debuglevel( debug_level )
ftp.set_pasv (0) # turn off PASV mode
ftp.login( 'anonymous@%s' % ftp_site, 'anonymous@' )
ftp.cwd( site_path )
try:
ftp.cwd( tag )
except ftplib.error_perm:
for dir in tag.split( '/' ):
ftp.mkd( dir )
ftp.cwd( dir )
f = open( results_file, 'rb' )
ftp.storbinary( 'STOR %s' % os.path.basename( results_file ), f )
ftp.quit()
def copy_comments( results_xml, comment_file ):
results_xml.startElement( 'comment', {} )
if os.path.exists( comment_file ):
utils.log( 'Reading comments file "%s"...' % comment_file )
f = open( comment_file, 'r' )
try:
results_xml.characters( f.read() )
finally:
f.close()
else:
utils.log( 'Warning: comment file "%s" is not found.' % comment_file )
results_xml.endElement( 'comment' )
def compress_file( file_path, archive_path ):
utils.log( 'Compressing "%s"...' % file_path )
try:
z = zipfile.ZipFile( archive_path, 'w', zipfile.ZIP_DEFLATED )
z.write( file_path, os.path.basename( file_path ) )
z.close()
utils.log( 'Done writing "%s".'% archive_path )
except Exception, msg:
utils.log( 'Warning: Compressing falied (%s)' % msg )
utils.log( ' Trying to compress using a platform-specific tool...' )
try: import zip_cmd
except ImportError:
script_dir = os.path.dirname( os.path.abspath( sys.argv[0] ) )
utils.log( 'Could not find \'zip_cmd\' module in the script directory (%s).' % script_dir )
raise Exception( 'Compressing failed!' )
else:
if os.path.exists( archive_path ):
os.unlink( archive_path )
utils.log( 'Removing stale "%s".' % archive_path )
zip_cmd.main( file_path, archive_path )
utils.log( 'Done compressing "%s".' % archive_path )
def read_timestamp( file ):
if not os.path.exists( file ):
result = time.gmtime()
utils.log( 'Warning: timestamp file "%s" does not exist'% file )
utils.log( 'Using current UTC time (%s)' % result )
return result
return time.gmtime( os.stat( file ).st_mtime )
def collect_logs(
results_dir
, runner_id
, tag
, platform
, comment_file
, timestamp_file
, user
, source
, run_type
, dart_server = None
, http_proxy = None
, revision = ''
, **unused
):
timestamp = time.strftime( '%Y-%m-%dT%H:%M:%SZ', read_timestamp( timestamp_file ) )
if dart_server:
publish_test_logs( [ results_dir ],
runner_id, tag, platform, comment_file, timestamp, user, source, run_type,
dart_server = dart_server,
http_proxy = http_proxy )
results_file = os.path.join( results_dir, '%s.xml' % runner_id )
results_writer = open( results_file, 'w' )
utils.log( 'Collecting test logs into "%s"...' % results_file )
results_xml = xml.sax.saxutils.XMLGenerator( results_writer )
results_xml.startDocument()
results_xml.startElement(
'test-run'
, {
'tag': tag
, 'platform': platform
, 'runner': runner_id
, 'timestamp': timestamp
, 'source': source
, 'run-type': run_type
, 'revision': revision
}
)
copy_comments( results_xml, comment_file )
collect_test_logs( [ results_dir ], results_writer )
results_xml.endElement( "test-run" )
results_xml.endDocument()
results_writer.close()
utils.log( 'Done writing "%s".' % results_file )
compress_file(
results_file
, os.path.join( results_dir,'%s.zip' % runner_id )
)
def upload_logs(
results_dir
, runner_id
, tag
, user
, ftp_proxy
, debug_level
, send_bjam_log = False
, timestamp_file = None
, dart_server = None
, **unused
):
logs_archive = os.path.join( results_dir, '%s.zip' % runner_id )
upload_to_ftp( tag, logs_archive, ftp_proxy, debug_level )
if send_bjam_log:
bjam_log_path = os.path.join( results_dir, 'bjam.log' )
if not timestamp_file:
timestamp_file = bjam_log_path
timestamp = time.strftime( '%Y-%m-%d-%H-%M-%S', read_timestamp( timestamp_file ) )
logs_archive = os.path.join( results_dir, '%s.%s.log.zip' % ( runner_id, timestamp ) )
compress_file( bjam_log_path, logs_archive )
upload_to_ftp( '%s/logs' % tag, logs_archive, ftp_proxy, debug_level )
def collect_and_upload_logs(
results_dir
, runner_id
, tag
, platform
, comment_file
, timestamp_file
, user
, source
, run_type
, revision = None
, ftp_proxy = None
, debug_level = 0
, send_bjam_log = False
, dart_server = None
, http_proxy = None
, **unused
):
collect_logs(
results_dir
, runner_id
, tag
, platform
, comment_file
, timestamp_file
, user
, source
, run_type
, revision = revision
, dart_server = dart_server
, http_proxy = http_proxy
)
upload_logs(
results_dir
, runner_id
, tag
, user
, ftp_proxy
, debug_level
, send_bjam_log
, timestamp_file
, dart_server = dart_server
)
def accept_args( args ):
args_spec = [
'locate-root='
, 'runner='
, 'tag='
, 'platform='
, 'comment='
, 'timestamp='
, 'source='
, 'run-type='
, 'user='
, 'ftp-proxy='
, 'proxy='
, 'debug-level='
, 'send-bjam-log'
, 'help'
, 'dart-server='
, 'revision='
]
options = {
'--tag' : 'trunk'
, '--platform' : sys.platform
, '--comment' : 'comment.html'
, '--timestamp' : 'timestamp'
, '--user' : None
, '--source' : 'SVN'
, '--run-type' : 'full'
, '--ftp-proxy' : None
, '--proxy' : None
, '--debug-level' : 0
, '--dart-server' : 'beta.boost.org:8081'
, '--revision' : None
}
utils.accept_args( args_spec, args, options, usage )
return {
'results_dir' : options[ '--locate-root' ]
, 'runner_id' : options[ '--runner' ]
, 'tag' : options[ '--tag' ]
, 'platform' : options[ '--platform']
, 'comment_file' : options[ '--comment' ]
, 'timestamp_file' : options[ '--timestamp' ]
, 'user' : options[ '--user' ]
, 'source' : options[ '--source' ]
, 'run_type' : options[ '--run-type' ]
, 'ftp_proxy' : options[ '--ftp-proxy' ]
, 'http_proxy' : options[ '--proxy' ]
, 'debug_level' : int(options[ '--debug-level' ])
, 'send_bjam_log' : options.has_key( '--send-bjam-log' )
, 'dart_server' : options[ '--dart-server' ]
, 'revision ' : options[ '--revision' ]
}
commands = {
'collect-and-upload' : collect_and_upload_logs
, 'collect-logs' : collect_logs
, 'upload-logs' : upload_logs
}
def usage():
print 'Usage: %s [command] [options]' % os.path.basename( sys.argv[0] )
print '''
Commands:
\t%s
Options:
\t--locate-root directory to to scan for "test_log.xml" files
\t--runner runner ID (e.g. "Metacomm")
\t--timestamp path to a file which modification time will be used
\t as a timestamp of the run ("timestamp" by default)
\t--comment an HTML comment file to be inserted in the reports
\t ("comment.html" by default)
\t--tag the tag for the results ("trunk" by default)
\t--user SourceForge user name for a shell account (optional)
\t--source where Boost sources came from ("SVN" or "tarball";
\t "SVN" by default)
\t--run-type "incremental" or "full" ("full" by default)
\t--send-bjam-log in addition to regular XML results, send in full bjam
\t log of the regression run
\t--proxy HTTP proxy server address and port (e.g.
\t 'http://www.someproxy.com:3128', optional)
\t--ftp-proxy FTP proxy server (e.g. 'ftpproxy', optional)
\t--debug-level debugging level; controls the amount of debugging
\t output printed; 0 by default (no debug output)
\t--dart-server The dart server to send results to.
''' % '\n\t'.join( commands.keys() )
def main():
if len(sys.argv) > 1 and sys.argv[1] in commands:
command = sys.argv[1]
args = sys.argv[ 2: ]
else:
command = 'collect-and-upload'
args = sys.argv[ 1: ]
commands[ command ]( **accept_args( args ) )
if __name__ != '__main__': import utils
else:
# in absense of relative import...
xsl_path = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
while os.path.basename( xsl_path ) != 'xsl_reports': xsl_path = os.path.dirname( xsl_path )
sys.path.append( xsl_path )
import utils
main()

View file

@ -19,6 +19,7 @@
*******************************************************************************/
#include "boost/config.hpp"
#include "boost/filesystem/operations.hpp"
#include "boost/filesystem/fstream.hpp"
#include "detail/tiny_xml.hpp"
@ -26,6 +27,7 @@ namespace fs = boost::filesystem;
namespace xml = boost::tiny_xml;
#include <cstdlib> // for abort, exit
#include <cctype> // for toupper
#include <string>
#include <vector>
#include <set>
@ -52,10 +54,12 @@ namespace
fs::path boost_root; // boost-root complete path
fs::path locate_root; // locate-root (AKA ALL_LOCATE_TARGET) complete path
bool compile_time;
bool run_time;
bool ignore_pass;
bool no_warn;
bool no_links;
bool boost_build_v2;
bool boost_build_v2 = true;
fs::path jamfile_path;
@ -83,12 +87,32 @@ namespace
const string empty_string;
std::vector<int> error_count;
// prefix for library and test hyperlink prefix
string cvs_root ( "http://boost.cvs.sourceforge.net/" );
string url_prefix_dir_view( cvs_root + "boost/boost" );
string url_prefix_checkout_view( cvs_root + "*checkout*/boost/boost" );
string url_suffix_text_view( "?view=markup&rev=HEAD" );
// get revision number (as a string) if boost_root is svn working copy -----//
string revision( const fs::path & boost_root )
{
string rev;
fs::path entries( boost_root / ".svn" / "entries" );
fs::ifstream entries_file( entries );
if ( entries_file )
{
std::getline( entries_file, rev );
std::getline( entries_file, rev );
std::getline( entries_file, rev );
std::getline( entries_file, rev ); // revision number as a string
}
return rev;
}
// build notes_bookmarks from notes HTML -----------------------------------//
void build_notes_bookmarks()
@ -180,38 +204,11 @@ namespace
}
// platform_desc -----------------------------------------------------------//
// from locate_root/status/bin/config_info.test/xxx/.../config_info.output
string platform_desc()
{
string result;
fs::path dot_output_path;
// the gcc config_info "Detected Platform" sometimes reports "cygwin", so
// prefer any of the other compilers.
if ( find_file( locate_root / "bin/boost/status/config_info.test",
"config_info.output", dot_output_path, "gcc" )
|| find_file( locate_root / "bin/boost/status/config_info.test",
"config_info.output", dot_output_path )
|| find_file( locate_root / "status/bin/config_info.test",
"config_info.output", dot_output_path, "gcc" )
|| find_file( locate_root / "status/bin/config_info.test",
"config_info.output", dot_output_path ) )
{
fs::ifstream file( dot_output_path );
if ( file )
{
while( std::getline( file, result ) )
{
if ( result.find( "Detected Platform: " ) == 0 )
{
result.erase( 0, 19 );
return result;
}
}
result.clear();
}
}
string result = BOOST_PLATFORM;
result[0] = std::toupper( result[0] );
return result;
}
@ -531,14 +528,16 @@ const fs::path find_bin_path(const string& relative)
// do_cell ---------------------------------------------------------------//
bool do_cell( const string & lib_name,
bool do_cell(
int compiler,
const string & lib_name,
const fs::path & test_dir,
const string & test_type,
const string & test_name,
const string & toolset,
string & target,
bool always_show_run_output )
// return true if any results except pass_msg
// return true if any results except simple pass_msg
{
fs::path target_dir( target_directory( test_dir / toolset ) );
bool pass = false;
@ -568,7 +567,8 @@ const fs::path find_bin_path(const string& relative)
const xml::element & db( *dbp );
std::string test_type_base( test_type );
if ( test_type_base.size() > 5 )
if ( test_type_base == "run_pyd" ) test_type_base = "run";
else if ( test_type_base.size() > 5 )
{
const string::size_type trailer = test_type_base.size() - 5;
if ( test_type_base.substr( trailer ) == "_fail" )
@ -576,14 +576,6 @@ const fs::path find_bin_path(const string& relative)
test_type_base.erase( trailer );
}
}
if ( test_type_base.size() > 4 )
{
const string::size_type trailer = test_type_base.size() - 4;
if ( test_type_base.substr( trailer ) == "_pyd" )
{
test_type_base.erase( trailer );
}
}
const xml::element & test_type_element( find_element( db, test_type_base ) );
pass = !test_type_element.name.empty()
@ -599,8 +591,9 @@ const fs::path find_bin_path(const string& relative)
always_show_run_output || note );
}
// generate the status table cell pass/warn/fail HTML
target += "<td>";
// generate the status table cell pass/warn/fail HTML
if ( anything_generated != 0 )
{
target += "<a href=\"";
@ -624,6 +617,40 @@ const fs::path find_bin_path(const string& relative)
if ( !notes.empty() )
target += get_notes( toolset, lib_name, test_name, !pass );
// generate compile-time if requested
if ( compile_time )
{
const xml::element & compile_element( find_element( db, "compile" ) );
if ( !compile_element.name.empty() )
{
string times = attribute_value( compile_element, "timings" );
if ( !times.empty() )
{
target += "<br>";
target += times.substr( 0, times.find( " " ) );
}
}
}
// generate run-time if requested
if ( run_time )
{
const xml::element & run_element( find_element( db, "run" ) );
if ( !run_element.name.empty() )
{
string times = attribute_value( run_element, "timings" );
if ( !times.empty() )
{
target += "<br>";
target += times.substr( 0, times.find( " " ) );
}
}
}
if ( !pass ) ++error_count[compiler];
target += "</td>";
return (anything_generated != 0) || !pass;
}
@ -661,7 +688,12 @@ const fs::path find_bin_path(const string& relative)
target += "<tr><td><a href=\"" + url_prefix_dir_view + "/libs/" + lib_name
+ "\">" + lib_name + "</a></td>";
target += "<td><a href=\"" + url_prefix_checkout_view + "/" + test_path
+ url_suffix_text_view + "\">" + test_name + "</a></td>";
+ url_suffix_text_view + "\">" + test_name + "</a>";
if ( compile_time ) target += "<br> Compile time:";
if ( run_time ) target += "<br> Run time:";
target += "</td>";
target += "<td>" + test_type + "</td>";
bool no_warn_save = no_warn;
@ -669,10 +701,11 @@ const fs::path find_bin_path(const string& relative)
// for each compiler, generate <td>...</td> html
bool anything_to_report = false;
int compiler = 0;
for ( std::vector<string>::const_iterator itr=toolsets.begin();
itr != toolsets.end(); ++itr )
itr != toolsets.end(); ++itr, ++compiler )
{
anything_to_report |= do_cell( lib_name, test_dir, test_type, test_name, *itr, target,
anything_to_report |= do_cell( compiler, lib_name, test_dir, test_type, test_name, *itr, target,
always_show_run_output );
}
@ -790,6 +823,7 @@ const fs::path find_bin_path(const string& relative)
<< (desc.size() ? desc : compiler_itr->leaf())
<< (vers.size() ? (string( "<br>" ) + vers ) : string( "" ))
<< "</td>\n";
error_count.push_back( 0 );
}
}
}
@ -800,7 +834,19 @@ const fs::path find_bin_path(const string& relative)
do_table_body( bin_path );
report << "</table>\n";
// error total row
report << "<tr> <td> &nbsp;</td><td>Number of Failures</td><td> &nbsp;</td>\n";
// for each compiler, generate <td>...</td> html
int compiler = 0;
for ( std::vector<string>::const_iterator itr=toolsets.begin();
itr != toolsets.end(); ++itr, ++compiler )
{
report << "<td align=\"center\">" << error_count[compiler] << "</td>\n";
}
report << "</tr>\n</table>\n";
}
} // unnamed namespace
@ -827,9 +873,12 @@ int cpp_main( int argc, char * argv[] ) // note name!
{ notes_map_path = fs::path( argv[2], fs::native ); --argc; ++argv; }
else if ( std::strcmp( argv[1], "--ignore-pass" ) == 0 ) ignore_pass = true;
else if ( std::strcmp( argv[1], "--no-warn" ) == 0 ) no_warn = true;
else if ( std::strcmp( argv[1], "--v1" ) == 0 ) boost_build_v2 = false;
else if ( std::strcmp( argv[1], "--v2" ) == 0 ) boost_build_v2 = true;
else if ( argc > 2 && std::strcmp( argv[1], "--jamfile" ) == 0)
{ jamfile_path = fs::path( argv[2], fs::native ); --argc; ++argv; }
else if ( std::strcmp( argv[1], "--compile-time" ) == 0 ) compile_time = true;
else if ( std::strcmp( argv[1], "--run-time" ) == 0 ) run_time = true;
else { std::cerr << "Unknown option: " << argv[1] << "\n"; argc = 1; }
--argc;
++argv;
@ -854,6 +903,12 @@ int cpp_main( int argc, char * argv[] ) // note name!
" --notes-map path Path to file of toolset/test,n lines, where\n"
" n is number of note bookmark in --notes file.\n"
" --jamfile path Path to Jamfile. By default \"Jamfile\".\n"
" --v1 Assume Boost.Build version 1.\n"
" --v2 Assume Boost.Build version 2. (default)\n"
" --ignore-pass Ignore passing tests.\n"
" --no-warn Do not report warnings.\n"
" --compile-time Show compile time.\n"
" --run-time Show run time.\n"
"Example: compiler_status --compiler gcc /boost-root cs.html cs-links.html\n"
"Note: Only the leaf of the links-file path and --notes file string are\n"
"used in status-file HTML links. Thus for browsing, status-file,\n"
@ -906,21 +961,28 @@ int cpp_main( int argc, char * argv[] ) // note name!
std::strftime( run_date, sizeof(run_date),
"%X UTC, %A %d %B %Y", std::gmtime( &tod ) );
std::string rev = revision( boost_root );
report << "<html>\n"
"<head>\n"
"<title>Boost Compiler Status Automatic Test</title>\n"
"<title>Boost Test Results</title>\n"
"</head>\n"
"<body bgcolor=\"#ffffff\" text=\"#000000\">\n"
"<table border=\"0\">\n"
"<tr>\n"
"<td><img border=\"0\" src=\"../boost.png\" width=\"277\" "
"<td><img border=\"0\" src=\"http://www.boost.org/boost.png\" width=\"277\" "
"height=\"86\"></td>\n"
"<td>\n"
"<h1>Compiler Status: " + platform_desc() + "</h1>\n"
"<b>Run Date:</b> "
<< run_date
<< "\n"
;
"<h1>Boost Test Results - " + platform_desc() + "</h1>\n"
"<b>Run</b> "
<< run_date;
if ( !rev.empty() ) report << ", <b>Revision</b> " << rev;
report << "\n";
if ( compile_time )
report << "<p>Times reported are elapsed wall clock time in seconds.</p>\n";
if ( !comment_path.empty() )
{
@ -941,19 +1003,19 @@ int cpp_main( int argc, char * argv[] ) // note name!
links_file
<< "<html>\n"
"<head>\n"
"<title>Boost Compiler Status Error Log</title>\n"
"<title>Boost Test Details</title>\n"
"</head>\n"
"<body bgcolor=\"#ffffff\" text=\"#000000\">\n"
"<table border=\"0\">\n"
"<tr>\n"
"<td><img border=\"0\" src=\"../boost.png\" width=\"277\" "
"<td><img border=\"0\" src=\"http://www.boost.org/boost.png\" width=\"277\" "
"height=\"86\"></td>\n"
"<td>\n"
"<h1>Compiler Status: " + platform_desc() + "</h1>\n"
"<h1>Boost Test Details - " + platform_desc() + "</h1>\n"
"<b>Run Date:</b> "
<< run_date
<< "\n</td>\n</table>\n<br>\n"
;
<< run_date;
if ( !rev.empty() ) links_file << ", <b>Revision</b> " << rev;
links_file << "\n</td>\n</table>\n<br>\n";
}
do_table();

View file

@ -0,0 +1,983 @@
// Generate Compiler Status HTML from jam regression test output -----------//
// Copyright Beman Dawes 2002. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org/tools/regression/ for documentation.
//Note: This version of the original program builds a large table
//which includes all build variations such as build/release, static/dynamic, etc.
/*******************************************************************************
This program was designed to work unchanged on all platforms and
configurations. All output which is platform or configuration dependent
is obtained from external sources such as the .xml file from
process_jam_log execution, the tools/build/xxx-tools.jam files, or the
output of the config_info tests.
Please avoid adding platform or configuration dependencies during
program maintenance.
*******************************************************************************/
#include "boost/filesystem/operations.hpp"
#include "boost/filesystem/fstream.hpp"
namespace fs = boost::filesystem;
#include "detail/tiny_xml.hpp"
namespace xml = boost::tiny_xml;
#include "boost/iterator/transform_iterator.hpp"
#include <cstdlib> // for abort, exit
#include <string>
#include <vector>
#include <set>
#include <utility> // for make_pair on STLPort
#include <map>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <ctime>
#include <stdexcept>
#include <cassert>
#include <utility>
using std::string;
const string pass_msg( "Pass" );
const string warn_msg( "<i>Warn</i>" );
const string fail_msg( "<font color=\"#FF0000\"><i>Fail</i></font>" );
const string note_msg( "<sup>*</sup>" );
const string missing_residue_msg( "<i>Missing</i>" );
const std::size_t max_compile_msg_size = 10000;
namespace
{
fs::path boost_root; // boost-root complete path
fs::path locate_root; // locate-root (AKA ALL_LOCATE_TARGET) complete path
bool ignore_pass = false;
bool no_warn = false;
bool no_links = false;
fs::directory_iterator end_itr;
// transform pathname to something html can accept
struct char_xlate {
typedef char result_type;
result_type operator()(char c) const{
if(c == '/')
return '-';
return c;
}
};
typedef boost::transform_iterator<char_xlate, std::string::const_iterator> html_from_path;
template<class I1, class I2>
std::ostream & operator<<(
std::ostream &os,
std::pair<I1, I2> p
){
while(p.first != p.second)
os << *p.first++;
return os;
}
struct col_node {
int rows, cols;
bool has_leaf;
typedef std::map<std::string, col_node> subcolumns_t;
subcolumns_t m_subcolumns;
bool operator<(const col_node &cn) const;
col_node() :
has_leaf(false)
{}
std::pair<int, int> get_spans();
};
std::pair<int, int> col_node::get_spans(){
rows = 1;
cols = 0;
if(has_leaf){
cols = 1;
}
if(! m_subcolumns.empty()){
subcolumns_t::iterator itr;
for(itr = m_subcolumns.begin(); itr != m_subcolumns.end(); ++itr){
std::pair<int, int> spans;
spans = itr->second.get_spans();
rows = std::max(rows, spans.first);
cols += spans.second;
}
++rows;
}
return std::make_pair(rows, cols);
}
void build_node_tree(const fs::path & dir_root, col_node & node){
fs::path xml_file_path( dir_root / "test_log.xml" );
if (fs::exists( xml_file_path ) )
{
node.has_leaf = true;
}
fs::directory_iterator itr(dir_root);
while(itr != end_itr){
if(fs::is_directory(*itr)){
std::pair<col_node::subcolumns_t::iterator, bool> result
= node.m_subcolumns.insert(
std::make_pair(itr->leaf(), col_node())
);
build_node_tree(*itr, result.first->second);
}
++itr;
}
}
fs::ofstream report;
fs::ofstream links_file;
string links_name;
fs::path notes_path;
string notes_html;
fs::path notes_map_path;
typedef std::multimap< string, string > notes_map; // key is test_name-toolset,
// value is note bookmark
notes_map notes;
string specific_compiler; // if running on one toolset only
const string empty_string;
// build notes_bookmarks from notes HTML -----------------------------------//
void build_notes_bookmarks()
{
if ( notes_map_path.empty() ) return;
fs::ifstream notes_map_file( notes_map_path );
if ( !notes_map_file )
{
std::cerr << "Could not open --notes-map input file: " << notes_map_path.string() << std::endl;
std::exit( 1 );
}
string line;
while( std::getline( notes_map_file, line ) )
{
string::size_type pos = 0;
if ( (pos = line.find( ',', pos )) == string::npos ) continue;
string key(line.substr( 0, pos ) );
string bookmark( line.substr( pos+1 ) );
// std::cout << "inserting \"" << key << "\",\"" << bookmark << "\"\n";
notes.insert( notes_map::value_type( key, bookmark ) );
}
}
// load_notes_html ---------------------------------------------------------//
bool load_notes_html()
{
if ( notes_path.empty() ) return false;
fs::ifstream notes_file( notes_path );
if ( !notes_file )
{
std::cerr << "Could not open --notes input file: " << notes_path.string() << std::endl;
std::exit( 1 );
}
string line;
bool in_body( false );
while( std::getline( notes_file, line ) )
{
if ( in_body && line.find( "</body>" ) != string::npos ) in_body = false;
if ( in_body ) notes_html += line;
else if ( line.find( "<body>" ) ) in_body = true;
}
return true;
}
// extract object library name from target directory string ----------------//
string extract_object_library_name( const string & s )
{
string t( s );
string::size_type pos = t.find( "/build/" );
if ( pos != string::npos ) pos += 7;
else if ( (pos = t.find( "/test/" )) != string::npos ) pos += 6;
else return "";
return t.substr( pos, t.find( "/", pos ) - pos );
}
// element_content ---------------------------------------------------------//
const string & element_content(
const xml::element & root, const string & name )
{
const static string empty_string;
xml::element_list::const_iterator itr;
for ( itr = root.elements.begin();
itr != root.elements.end() && (*itr)->name != name;
++itr ) {}
return itr != root.elements.end() ? (*itr)->content : empty_string;
}
// find_element ------------------------------------------------------------//
const xml::element & find_element(
const xml::element & root, const string & name )
{
static const xml::element empty_element;
xml::element_list::const_iterator itr;
for ( itr = root.elements.begin();
itr != root.elements.end() && (*itr)->name != name;
++itr ) {}
return itr != root.elements.end() ? *((*itr).get()) : empty_element;
}
// attribute_value ----------------------------------------------------------//
const string & attribute_value(
const xml::element & element,
const string & attribute_name
){
xml::attribute_list::const_iterator atr;
for(
atr = element.attributes.begin();
atr != element.attributes.end();
++atr
){
if(atr->name == attribute_name)
return atr->value;
}
static const string empty_string;
return empty_string;
}
// generate_report ---------------------------------------------------------//
// return 0 if nothing generated, 1 otherwise, except 2 if compiler msgs
int generate_report(
const xml::element & db,
const std::string source_library_name,
const string & test_type,
const fs::path & target_dir,
bool pass,
bool always_show_run_output
)
{
// compile msgs sometimes modified, so make a local copy
string compile( ((pass && no_warn)
? empty_string : element_content( db, "compile" )) );
const string & link( pass ? empty_string : element_content( db, "link" ) );
const string & run( (pass && !always_show_run_output)
? empty_string : element_content( db, "run" ) );
string lib( (pass ? empty_string : element_content( db, "lib" )) );
// some compilers output the filename even if there are no errors or
// warnings; detect this if one line of output and it contains no space.
string::size_type pos = compile.find( '\n', 1 );
if ( pos != string::npos && compile.size()-pos <= 2
&& compile.find( ' ' ) == string::npos ) compile.clear();
if ( lib.empty()
&& (
compile.empty() || test_type == "compile_fail"
)
&& link.empty()
&& run.empty()
)
return 0;
int result = 1; // some kind of msg for sure
// limit compile message length
if ( compile.size() > max_compile_msg_size )
{
compile.erase( max_compile_msg_size );
compile += "...\n (remainder deleted because of excessive size)\n";
}
links_file << "<h2><a name=\"";
links_file << std::make_pair(
html_from_path(target_dir.string().begin()),
html_from_path(target_dir.string().end())
)
<< "\">"
<< std::make_pair(
html_from_path(target_dir.string().begin()),
html_from_path(target_dir.string().end())
)
;
links_file << "</a></h2>\n";;
if ( !compile.empty() )
{
++result;
links_file << "<h3>Compiler output:</h3><pre>"
<< compile << "</pre>\n";
}
if ( !link.empty() )
links_file << "<h3>Linker output:</h3><pre>" << link << "</pre>\n";
if ( !run.empty() )
links_file << "<h3>Run output:</h3><pre>" << run << "</pre>\n";
// for an object library failure, generate a reference to the object
// library failure message, and (once only) generate the object
// library failure message itself
static std::set< string > failed_lib_target_dirs; // only generate once
if ( !lib.empty() )
{
if ( lib[0] == '\n' ) lib.erase( 0, 1 );
string object_library_name( extract_object_library_name( lib ) );
// changing the target directory naming scheme breaks
// extract_object_library_name()
assert( !object_library_name.empty() );
if ( object_library_name.empty() )
std::cerr << "Failed to extract object library name from " << lib << "\n";
links_file << "<h3>Library build failure: </h3>\n"
"See <a href=\"#"
<< source_library_name << "-"
<< object_library_name << "-"
<< std::make_pair(
html_from_path(target_dir.string().begin()),
html_from_path(target_dir.string().end())
)
<< source_library_name << " - "
<< object_library_name << " - "
<< std::make_pair(
html_from_path(target_dir.string().begin()),
html_from_path(target_dir.string().end())
)
<< "</a>";
if ( failed_lib_target_dirs.find( lib ) == failed_lib_target_dirs.end() )
{
failed_lib_target_dirs.insert( lib );
fs::path pth( locate_root / lib / "test_log.xml" );
fs::ifstream file( pth );
if ( file )
{
xml::element_ptr db = xml::parse( file, pth.string() );
generate_report(
*db,
source_library_name,
test_type,
target_dir,
false,
false
);
}
else
{
links_file << "<h2><a name=\""
<< object_library_name << "-"
<< std::make_pair(
html_from_path(target_dir.string().begin()),
html_from_path(target_dir.string().end())
)
<< "\">"
<< object_library_name << " - "
<< std::make_pair(
html_from_path(target_dir.string().begin()),
html_from_path(target_dir.string().end())
)
<< "</a></h2>\n"
<< "test_log.xml not found\n";
}
}
}
return result;
}
// add_notes --------------------------------------------------------------//
void add_notes( const string & key, bool fail, string & sep, string & target )
{
notes_map::const_iterator itr = notes.lower_bound( key );
if ( itr != notes.end() && itr->first == key )
{
for ( ; itr != notes.end() && itr->first == key; ++itr )
{
string note_desc( itr->second[0] == '-'
? itr->second.substr( 1 ) : itr->second );
if ( fail || itr->second[0] == '-' )
{
target += sep;
sep = ",";
target += "<a href=\"";
target += "#";
target += note_desc;
target += "\">";
target += note_desc;
target += "</a>";
}
}
}
}
// do_cell ---------------------------------------------------------------//
bool do_cell(
const fs::path & target_dir,
const string & lib_name,
string & target,
bool profile
){
// return true if any results except pass_msg
bool pass = false;
fs::path xml_file_path( target_dir / "test_log.xml" );
if ( !fs::exists( xml_file_path ) )
{
// suppress message because there are too many of them.
// "missing" is a legitmate result as its not a requirement
// that every test be run in every figuration.
//std::cerr << "Missing jam_log.xml in target:\n "
// << target_dir.string() << "\n";
target += "<td align=\"right\">" + missing_residue_msg + "</td>";
return true;
}
int anything_generated = 0;
bool note = false;
fs::ifstream file( xml_file_path );
if ( !file ) // could not open jam_log.xml
{
std::cerr << "Can't open jam_log.xml in target:\n "
<< target_dir.string() << "\n";
target += "<td>" + missing_residue_msg + "</td>";
return false;
}
string test_type( "unknown" );
bool always_show_run_output( false );
xml::element_ptr dbp = xml::parse( file, xml_file_path.string() );
const xml::element & db( *dbp );
test_type = attribute_value( db, "test-type" );
always_show_run_output
= attribute_value( db, "show-run-output" ) == "true";
std::string test_type_base( test_type );
if ( test_type_base.size() > 5 )
{
const string::size_type trailer = test_type_base.size() - 5;
if ( test_type_base.substr( trailer ) == "_fail" )
{
test_type_base.erase( trailer );
}
}
if ( test_type_base.size() > 4 )
{
const string::size_type trailer = test_type_base.size() - 4;
if ( test_type_base.substr( trailer ) == "_pyd" )
{
test_type_base.erase( trailer );
}
}
const xml::element & test_type_element( find_element( db, test_type_base ) );
pass = !test_type_element.name.empty()
&& attribute_value( test_type_element, "result" ) != "fail";
if (!no_links){
if(!test_type_element.name.empty())
note = attribute_value( test_type_element, "result" ) == "note";
anything_generated =
generate_report(
db,
lib_name,
test_type,
target_dir,
pass,
always_show_run_output || note
);
}
// generate the status table cell pass/warn/fail HTML
target += "<td align=\"right\">";
if ( anything_generated != 0 )
{
target += "<a href=\"";
target += links_name;
target += "#";
std::copy(
html_from_path(target_dir.string().begin()),
html_from_path(target_dir.string().end()),
std::back_inserter(target)
);
target += "\">";
target += pass
? (anything_generated < 2 ? pass_msg : warn_msg)
: fail_msg;
target += "</a>";
if ( pass && note ) target += note_msg;
}
else target += pass ? pass_msg : fail_msg;
// if profiling
if(profile && pass){
// add link to profile
target += " <a href=\"";
target += (target_dir / "profile.txt").string();
target += "\"><i>Profile</i></a>";
}
// if notes, generate the superscript HTML
// if ( !notes.empty() )
// target += get_notes( toolset, lib_name, test_name, !pass );
target += "</td>";
return (anything_generated != 0) || !pass;
}
bool visit_node_tree(
const col_node & node,
fs::path dir_root,
const string & lib_name,
string & target,
bool profile
){
bool retval = false;
if(node.has_leaf){
retval = do_cell(
dir_root,
lib_name,
target,
profile
);
}
col_node::subcolumns_t::const_iterator col_itr;
for(
col_itr = node.m_subcolumns.begin();
col_itr != node.m_subcolumns.end();
++col_itr
){
fs::path subdir = dir_root / col_itr->first;
retval |= visit_node_tree(
col_itr->second,
subdir,
lib_name,
target,
col_itr->first == "profile"
);
}
return retval;
}
// emit results for each test
void do_row(
col_node test_node,
const fs::path & test_dir,
const string & lib_name,
const string & test_name,
string & target
){
string::size_type row_start_pos = target.size();
target += "<tr>";
target += "<td>";
//target += "<a href=\"" + url_prefix_dir_view + "/libs/" + lib_name + "\">";
target += test_name;
target += "</a>";
target += "</td>";
// target += "<td>" + test_type + "</td>";
bool no_warn_save = no_warn;
// if ( test_type.find( "fail" ) != string::npos ) no_warn = true;
// emit cells on this row
bool anything_to_report = visit_node_tree(
test_node,
test_dir,
lib_name,
target,
false
);
target += "</tr>";
if ( ignore_pass
&& ! anything_to_report )
target.erase( row_start_pos );
no_warn = no_warn_save;
}
// do_table_body -----------------------------------------------------------//
void do_table_body(
col_node root_node,
const string & lib_name,
const fs::path & test_lib_dir
){
// rows are held in a vector so they can be sorted, if desired.
std::vector<string> results;
for ( fs::directory_iterator itr( test_lib_dir ); itr != end_itr; ++itr )
{
if(! fs::is_directory(*itr))
continue;
string test_name = itr->leaf();
// strip off the ".test" is there is one
string::size_type s = test_name.find( ".test" );
if(string::npos != s)
test_name.resize(s);
results.push_back( std::string() );
do_row(
root_node, //*test_node_itr++,
*itr, // test dir
lib_name,
test_name,
results[results.size()-1]
);
}
std::sort( results.begin(), results.end() );
for (
std::vector<string>::iterator v(results.begin());
v != results.end();
++v
){
report << *v << "\n";
}
}
// column header-----------------------------------------------------------//
int header_depth(const col_node & root){
col_node::subcolumns_t::const_iterator itr;
int max_depth = 1;
for(itr = root.m_subcolumns.begin(); itr != root.m_subcolumns.end(); ++itr){
max_depth = std::max(max_depth, itr->second.rows);
}
return max_depth;
}
void header_cell(int rows, int cols, const std::string & name){
// add row cells
report << "<td align=\"center\" " ;
if(1 < cols)
report << "colspan=\"" << cols << "\" " ;
if(1 < rows)
// span rows to the end the header
report << "rowspan=\"" << rows << "\" " ;
report << ">" ;
report << name;
report << "</td>\n";
}
void emit_column_headers(
const col_node & node,
int display_row,
int current_row,
int row_count
){
if(current_row < display_row){
if(! node.m_subcolumns.empty()){
col_node::subcolumns_t::const_iterator itr;
for(itr = node.m_subcolumns.begin(); itr != node.m_subcolumns.end(); ++itr){
emit_column_headers(itr->second, display_row, current_row + 1, row_count);
}
}
return;
}
if(node.has_leaf && ! node.m_subcolumns.empty()){
header_cell(row_count - current_row, 1, std::string(""));
}
col_node::subcolumns_t::const_iterator itr;
for(itr = node.m_subcolumns.begin(); itr != node.m_subcolumns.end(); ++itr){
if(1 == itr->second.rows)
header_cell(row_count - current_row, itr->second.cols, itr->first);
else
header_cell(1, itr->second.cols, itr->first);
}
}
fs::path find_lib_test_dir(){
// walk up from the path were we started until we find
// bin or bin.v2
fs::path::const_iterator leaf_itr = fs::initial_path().end();
fs::path test_lib_dir = fs::initial_path();
for(;;){
if(fs::is_directory( test_lib_dir / "bin.v2")){
test_lib_dir /= "bin.v2";
break;
}
if(fs::is_directory( test_lib_dir / "bin")){
// v1 includes the word boost
test_lib_dir /= "bin";
test_lib_dir /= "boost";
break;
}
if(test_lib_dir.empty())
throw std::string("binary path not found");
if(*leaf_itr != "libs")
--leaf_itr;
test_lib_dir.remove_leaf();
}
if(leaf_itr == fs::initial_path().end())
throw std::string("must be run from within a library directory");
while(leaf_itr != fs::initial_path().end()){
test_lib_dir /= *leaf_itr++; // append "libs"
}
return test_lib_dir;
}
// note : uncomment the #if/#endif and what this compile !!!
string find_lib_name(fs::path lib_test_dir){
unsigned int count;
fs::path::iterator e_itr = lib_test_dir.end();
for(count = 0;; ++count){
if(*--e_itr == "libs")
break;
if(lib_test_dir.empty())
throw std::string("must be run from within a library directory");
}
string library_name;
for(;;){
library_name.append(*++e_itr);
if(1 == --count)
break;
library_name.append("/");
}
return library_name;
}
fs::path find_boost_root(){
fs::path boost_root = fs::initial_path();
for(;;){
if(fs::is_directory( boost_root / "boost")){
break;
}
if(boost_root.empty())
throw std::string("boost root not found");
boost_root.remove_leaf();
}
return boost_root;
}
// do_table ----------------------------------------------------------------//
void do_table(const string & lib_name)
{
col_node root_node;
fs::path lib_test_dir = find_lib_test_dir();
for ( fs::directory_iterator itr(lib_test_dir); itr != end_itr; ++itr )
{
if(! fs::is_directory(*itr))
continue;
build_node_tree(*itr, root_node);
}
// visit directory nodes and record nodetree
report << "<table border=\"1\" cellspacing=\"0\" cellpadding=\"5\">\n";
// emit
root_node.get_spans();
int row_count = header_depth(root_node);
report << "<tr>\n";
report << "<td rowspan=\"" << row_count << "\">Test Name</td>\n";
// emit column headers
int row_index = 0;
for(;;){
emit_column_headers(root_node, row_index, 0, row_count);
report << "</tr>" ;
if(++row_index == row_count)
break;
report << "<tr>\n";
}
// now the rest of the table body
do_table_body(root_node, lib_name, lib_test_dir);
report << "</table>\n";
}
}// unnamed namespace
// main --------------------------------------------------------------------//
#define BOOST_NO_CPP_MAIN_SUCCESS_MESSAGE
#include <boost/test/included/prg_exec_monitor.hpp>
int cpp_main( int argc, char * argv[] ) // note name!
{
fs::path comment_path;
while ( argc > 1 && *argv[1] == '-' )
{
if ( argc > 2 && std::strcmp( argv[1], "--compiler" ) == 0 )
{ specific_compiler = argv[2]; --argc; ++argv; }
else if ( argc > 2 && std::strcmp( argv[1], "--locate-root" ) == 0 )
{ locate_root = fs::path( argv[2], fs::native ); --argc; ++argv; }
else if ( argc > 2 && std::strcmp( argv[1], "--boost-root" ) == 0 )
{ boost_root = fs::path( argv[2], fs::native ); --argc; ++argv; }
else if ( argc > 2 && std::strcmp( argv[1], "--comment" ) == 0 )
{ comment_path = fs::path( argv[2], fs::native ); --argc; ++argv; }
else if ( argc > 2 && std::strcmp( argv[1], "--notes" ) == 0 )
{ notes_path = fs::path( argv[2], fs::native ); --argc; ++argv; }
else if ( argc > 2 && std::strcmp( argv[1], "--notes-map" ) == 0 )
{ notes_map_path = fs::path( argv[2], fs::native ); --argc; ++argv; }
else if ( std::strcmp( argv[1], "--ignore-pass" ) == 0 ) ignore_pass = true;
else if ( std::strcmp( argv[1], "--no-warn" ) == 0 ) no_warn = true;
else if ( std::strcmp( argv[1], "--v2" ) == 0 )
{--argc; ++argv ;} // skip
else if ( argc > 2 && std::strcmp( argv[1], "--jamfile" ) == 0)
{--argc; ++argv;} // skip
else { std::cerr << "Unknown option: " << argv[1] << "\n"; argc = 1; }
--argc;
++argv;
}
if ( argc != 2 && argc != 3 )
{
std::cerr <<
"Usage: library_status [options...] status-file [links-file]\n"
" boost-root is the path to the boost tree root directory.\n"
" status-file and links-file are paths to the output files.\n"
" options: --compiler name Run for named compiler only\n"
" --ignore-pass Do not report tests which pass all compilers\n"
" --no-warn Warnings not reported if test passes\n"
" --boost-root path default derived from current path.\n"
" --locate-root path Path to ALL_LOCATE_TARGET for bjam;\n"
" default boost-root.\n"
" --comment path Path to file containing HTML\n"
" to be copied into status-file.\n"
" --notes path Path to file containing HTML\n"
" to be copied into status-file.\n"
" --notes-map path Path to file of toolset/test,n lines, where\n"
" n is number of note bookmark in --notes file.\n"
"Example: compiler_status --compiler gcc /boost-root cs.html cs-links.html\n"
"Note: Only the leaf of the links-file path and --notes file string are\n"
"used in status-file HTML links. Thus for browsing, status-file,\n"
"links-file, and --notes file must all be in the same directory.\n"
;
return 1;
}
if(boost_root.empty())
boost_root = find_boost_root();
if ( locate_root.empty() )
locate_root = boost_root;
report.open( fs::path( argv[1], fs::native ) );
if ( !report )
{
std::cerr << "Could not open report output file: " << argv[2] << std::endl;
return 1;
}
if ( argc == 3 )
{
fs::path links_path( argv[2], fs::native );
links_name = links_path.leaf();
links_file.open( links_path );
if ( !links_file )
{
std::cerr << "Could not open links output file: " << argv[3] << std::endl;
return 1;
}
}
else no_links = true;
build_notes_bookmarks();
const string library_name = find_lib_name(fs::initial_path());
char run_date[128];
std::time_t tod;
std::time( &tod );
std::strftime( run_date, sizeof(run_date),
"%X UTC, %A %d %B %Y", std::gmtime( &tod ) );
report
<< "<html>\n"
<< "<head>\n"
<< "<title>Boost Library Status Automatic Test</title>\n"
<< "</head>\n"
<< "<body bgcolor=\"#ffffff\" text=\"#000000\">\n"
<< "<table border=\"0\">\n"
<< "<tr>\n"
<< "<td><img border=\"0\" "
<< "src=\""
<< boost_root / "boost.png"
<< "\" width=\"277\" "
<< "height=\"86\"></td>\n"
<< "<td>\n"
<< "<h1>Library Status: " + library_name + "</h1>\n"
<< "<b>Run Date:</b> "
<< run_date
<< "\n"
;
if ( !comment_path.empty() )
{
fs::ifstream comment_file( comment_path );
if ( !comment_file )
{
std::cerr << "Could not open \"--comment\" input file: " << comment_path.string() << std::endl;
return 1;
}
char c;
while ( comment_file.get( c ) ) { report.put( c ); }
}
report << "</td>\n</table>\n<br>\n";
if ( !no_links )
{
links_file
<< "<html>\n"
<< "<head>\n"
<< "<title>Boost Library Status Error Log</title>\n"
<< "</head>\n"
<< "<body bgcolor=\"#ffffff\" text=\"#000000\">\n"
<< "<table border=\"0\">\n"
<< "<tr>\n"
<< "<td><img border=\"0\" src=\""
<< boost_root / "boost.png"
<< "\" width=\"277\" "
<< "height=\"86\"></td>\n"
<< "<td>\n"
<< "<h1>Library Status: " + library_name + "</h1>\n"
<< "<b>Run Date:</b> "
<< run_date
<< "\n</td>\n</table>\n<br>\n"
;
}
do_table(library_name);
if ( load_notes_html() ) report << notes_html << "\n";
report << "</body>\n"
"</html>\n"
;
if ( !no_links )
{
links_file
<< "</body>\n"
"</html>\n"
;
}
return 0;
}

View file

@ -0,0 +1,15 @@
@echo off
if not "%1" == "" goto bjam
echo Usage: %0 "<bjam arguments>"
echo where typical bjam arguements are:
echo --toolset=msvc-7.1,gcc
echo variant=debug,release,profile
echo link=static,shared
echo threading=single,multi
echo -sBOOST_ARCHIVE_LIST="<archive name>"
goto end
:bjam
bjam --dump-tests %* >bjam.log 2>&1
process_jam_log --v2 <bjam.log
library_status library_status.html links.html
:end

View file

@ -0,0 +1,14 @@
if test $# -eq 0
then
echo "Usage: $0 <bjam arguments>"
echo "Typical bjam arguements are:"
echo " --toolset=msvc-7.1,gcc"
echo " variant=debug,release,profile"
echo " link=static,shared"
echo " threading=single,multi"
echo " -sBOOST_ARCHIVE_LIST=<archive name>"
else
bjam --dump-tests $@ >bjam.log 2>&1
process_jam_log --v2 <bjam.log
library_status library_status.html links.html
fi

View file

@ -0,0 +1,85 @@
if test $# -eq 0
then
echo "Usage: $0 <bjam arguments>"
echo "Typical bjam arguments are:"
echo " --toolset=msvc-7.1,gcc"
echo " variant=debug,release,profile"
echo " link=static,shared"
echo " threading=single,multi"
echo
echo "note: make sure this script is run from boost root directory !!!"
exit 1
fi
if ! test -e libs
then
echo No libs directory found. Run from boost root directory !!!
exit 1
fi
#html header
cat <<end >status/library_status_contents.html
<!doctype HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<!--
(C) Copyright 2007 Robert Ramey - http://www.rrsd.com .
Use, modification and distribution is subject to the Boost Software
License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
-->
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<link rel="stylesheet" type="text/css" href="../boost.css">
<title>Library Status Contents</title>
<body>
end
cd >nul libs
# runtests, create library pages, and body of summary page
for lib_name in *
do
if test -d $lib_name
then
cd >nul $lib_name
if test -e "test/Jamfile.v2"
then
cd >nul test
echo $lib_name
echo >>../../../status/library_status_contents.html "<a target=\"detail\" href=\"../libs/$lib_name/test/library_status.html\">$lib_name</a><br>"
../../../tools/regression/library_test $@
cd >nul ..
fi
for sublib_name in *
do
if test -d $sublib_name
then
cd >nul $sublib_name
if test -e "test/Jamfile.v2"
then
cd >nul test
echo $lib_name/$sublib_name
echo >>../../../../status/library_status_contents.html "<a target=\"detail\" href=\"../libs/$lib_name/$sublib_name/test/library_status.html\">$lib_name/$sublib_name</a><br>"
../../../../tools/regression/library_test $@
cd >nul ..
fi
cd >nul ..
fi
done
cd >nul ..
fi
done
cd >nul ..
#html trailer
cat <<end >>status/library_status_contents.html
</body>
</html>
end

View file

@ -16,22 +16,20 @@
#include <string>
#include <cstring>
#include <map>
#include <utility> // for make_pair
#include <utility> // for make_pair
#include <ctime>
#include <cctype> // for tolower
#include <cstdlib> // for exit
using std::string;
namespace xml = boost::tiny_xml;
namespace fs = boost::filesystem;
#define BOOST_NO_CPP_MAIN_SUCCESS_MESSAGE
#include <boost/test/included/prg_exec_monitor.hpp>
// options
static bool echo = false;
static bool create_dirs = false;
static bool boost_build_v2 = false;
static bool boost_build_v2 = true;
namespace
{
@ -47,7 +45,33 @@ namespace
fs::path boost_root;
fs::path locate_root; // ALL_LOCATE_TARGET (or boost_root if none)
// append_html -------------------------------------------------------------//
// set_boost_root --------------------------------------------------------//
void set_boost_root()
{
boost_root = fs::initial_path();
for(;;)
{
if ( fs::exists( boost_root / "libs" ) )
{
fs::current_path( fs::initial_path() ); // restore initial path
return;
}
fs::current_path( ".." );
if ( boost_root == fs::current_path() )
{
fs::current_path( fs::initial_path() ); // restore initial path
std::cout <<
"Abort: process_jam_log must be run from within a boost directory tree\n";
std::exit(1);
}
boost_root = fs::current_path();
}
}
// append_html -------------------------------------------------------------//
void append_html( const string & src, string & target )
{
@ -523,33 +547,26 @@ namespace
// main --------------------------------------------------------------------//
int cpp_main( int argc, char ** argv )
int main( int argc, char ** argv )
{
// Turn off synchronization with corresponding C standard library files. This
// gives a significant speed improvement on platforms where the standard C++
// streams are implemented using standard C files.
std::ios::sync_with_stdio(false);
fs::initial_path();
if ( argc <= 1 )
std::cout << "Usage: bjam [bjam-args] | process_jam_log [--echo] [--create-directories] [--v2] [locate-root]\n"
std::cout << "Usage: bjam [bjam-args] | process_jam_log [--echo] [--create-directories] [--v1|v2] [locate-root]\n"
"locate-root - the same as the bjam ALL_LOCATE_TARGET\n"
" parameter, if any. Default is boost-root.\n"
"create-directories - if the directory for xml file doesn't exists - creates it.\n"
" usually used for processing logfile on different machine\n";
" usually used for processing logfile on different machine\n"
"v2 - bjam version 2 used (default).\n"
"v1 - bjam version 1 used.\n"
;
boost_root = fs::initial_path();
while ( !boost_root.empty()
&& !fs::exists( boost_root / "libs" ) )
{
boost_root /= "..";
}
if ( boost_root.empty() )
{
std::cout << "must be run from within the boost-root directory tree\n";
return 1;
}
set_boost_root();
boost_root.normalize();
@ -572,6 +589,11 @@ int cpp_main( int argc, char ** argv )
--argc; ++argv;
}
if ( argc > 1 && std::strcmp( argv[1], "--v1" ) == 0 )
{
boost_build_v2 = false;
--argc; ++argv;
}
if (argc > 1)
{
@ -681,6 +703,14 @@ int cpp_main( int argc, char ** argv )
line_start.find( ".linkonce" ) == string::npos )
)
{
if ( !test2info.size() )
{
std::cout << "*****Error - No \"boost-test\" lines encountered.\n"
" (Usually occurs when bjam was envoked without the --dump-tests option\n"
" or bjam was envoked in the wrong directory)\n";
return 1;
}
string action( ( line_start.find( "Link-action" ) != string::npos
|| line_start.find( "vc-Link" ) != string::npos
|| line_start.find( "Archive-action" ) != string::npos

View file

@ -0,0 +1,774 @@
#!/usr/bin/python
# Copyright MetaCommunications, Inc. 2003-2007
# Copyright Redshift Software, Inc. 2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import glob
import optparse
import os
import os.path
import platform
import sys
import time
#~ Place holder for xsl_reports/util module
utils = None
repo_root = {
'anon' : 'http://svn.boost.org/svn/boost/',
'user' : 'https://svn.boost.org/svn/boost/'
}
repo_path = {
'trunk' : 'trunk',
'release' : 'branches/release',
'build' : 'trunk/tools/build/v2',
'jam' : 'tags/tools/jam/Boost_Jam_3_1_15/src',
'regression' : 'trunk/tools/regression'
}
class runner:
def __init__(self,root):
commands = map(
lambda m: m[8:].replace('_','-'),
filter(
lambda m: m.startswith('command_'),
runner.__dict__.keys())
)
commands.sort()
commands = "commands: %s" % ', '.join(commands)
opt = optparse.OptionParser(
usage="%prog [options] [commands]",
description=commands)
#~ Base Options:
opt.add_option( '--runner',
help="runner ID (e.g. 'Metacomm')" )
opt.add_option( '--comment',
help="an HTML comment file to be inserted in the reports" )
opt.add_option( '--tag',
help="the tag for the results" )
opt.add_option( '--toolsets',
help="comma-separated list of toolsets to test with" )
opt.add_option( '--incremental',
help="do incremental run (do not remove previous binaries)",
action='store_true' )
opt.add_option( '--timeout',
help="specifies the timeout, in minutes, for a single test run/compilation",
type='int' )
opt.add_option( '--bjam-options',
help="options to pass to the regression test" )
opt.add_option( '--bjam-toolset',
help="bootstrap toolset for 'bjam' executable" )
opt.add_option( '--pjl-toolset',
help="bootstrap toolset for 'process_jam_log' executable" )
opt.add_option( '--platform' )
#~ Source Options:
opt.add_option( '--user',
help="Boost SVN user ID" )
opt.add_option( '--local',
help="the name of the boost tarball" )
opt.add_option( '--force-update',
help="do an SVN update (if applicable) instead of a clean checkout, even when performing a full run",
action='store_true' )
opt.add_option( '--have-source',
help="do neither a tarball download nor an SVN update; used primarily for testing script changes",
action='store_true' )
#~ Connection Options:
opt.add_option( '--proxy',
help="HTTP proxy server address and port (e.g.'http://www.someproxy.com:3128')" )
opt.add_option( '--ftp-proxy',
help="FTP proxy server (e.g. 'ftpproxy')" )
opt.add_option( '--dart-server',
help="the dart server to send results to" )
#~ Debug Options:
opt.add_option( '--debug-level',
help="debugging level; controls the amount of debugging output printed",
type='int' )
opt.add_option( '--send-bjam-log',
help="send full bjam log of the regression run",
action='store_true' )
opt.add_option( '--mail',
help="email address to send run notification to" )
opt.add_option( '--smtp-login',
help="STMP server address/login information, in the following form: <user>:<password>@<host>[:<port>]" )
opt.add_option( '--skip-tests',
help="do not run bjam; used for testing script changes",
action='store_true' )
#~ Defaults
self.runner = None
self.comment='comment.html'
self.tag='trunk'
self.toolsets=None
self.incremental=False
self.timeout=5
self.bjam_options=''
self.bjam_toolset=''
self.pjl_toolset=''
self.platform=self.platform_name()
self.user='anonymous'
self.local=None
self.force_update=False
self.have_source=False
self.proxy=None
self.ftp_proxy=None
self.dart_server=None
self.debug_level=0
self.send_bjam_log=False
self.mail=None
self.smtp_login=None
self.skip_tests=False
( _opt_, self.actions ) = opt.parse_args(None,self)
if not self.actions or self.actions == []:
self.actions = [ 'regression' ]
#~ Initialize option dependent values.
self.regression_root = root
self.boost_root = os.path.join( self.regression_root, 'boost' )
self.regression_results = os.path.join( self.regression_root, 'results' )
self.regression_log = os.path.join( self.regression_results, 'bjam.log' )
self.tools_bb_root = os.path.join( self.regression_root,'tools_bb' )
self.tools_bjam_root = os.path.join( self.regression_root,'tools_bjam' )
self.tools_regression_root = os.path.join( self.regression_root,'tools_regression' )
self.xsl_reports_dir = os.path.join( self.tools_regression_root, 'xsl_reports' )
self.timestamp_path = os.path.join( self.regression_root, 'timestamp' )
if sys.platform == 'win32':
self.patch_boost = 'patch_boost.bat'
self.bjam = { 'name' : 'bjam.exe' }
self.process_jam_log = { 'name' : 'process_jam_log.exe' }
else:
self.patch_boost = 'patch_boost'
self.bjam = { 'name' : 'bjam' }
self.process_jam_log = { 'name' : 'process_jam_log' }
self.bjam = {
'name' : self.bjam['name'],
'build_cmd' : self.bjam_build_cmd,
'path' : os.path.join(self.regression_root,self.bjam['name']),
'source_dir' : self.tools_bjam_root,
'build_dir' : self.tools_bjam_root,
'build_args' : ''
}
self.process_jam_log = {
'name' : self.process_jam_log['name'],
'build_cmd' : self.bjam_cmd,
'path' : os.path.join(self.regression_root,self.process_jam_log['name']),
'source_dir' : os.path.join(self.tools_regression_root,'build'),
'build_dir' : os.path.join(self.tools_regression_root,'build'),
'build_args' : 'process_jam_log -d2'
}
if self.debug_level > 0:
self.log('Regression root = %s'%self.regression_root)
self.log('Boost root = %s'%self.boost_root)
self.log('Regression results = %s'%self.regression_results)
self.log('Regression log = %s'%self.regression_log)
self.log('BB root = %s'%self.tools_bb_root)
self.log('Bjam root = %s'%self.tools_bjam_root)
self.log('Tools root = %s'%self.tools_regression_root)
self.log('XSL reports dir = %s'%self.xsl_reports_dir)
self.log('Timestamp = %s'%self.timestamp_path)
self.log('Patch Boost script = %s'%self.patch_boost)
self.main()
#~ The various commands that make up the testing sequence...
def command_cleanup(self,*args):
if not args or args == None or args == []: args = [ 'source', 'bin' ]
if 'source' in args:
self.log( 'Cleaning up "%s" directory ...' % self.boost_root )
self.rmtree( self.boost_root )
if 'bin' in args:
boost_bin_dir = os.path.join( self.boost_root, 'bin' )
self.log( 'Cleaning up "%s" directory ...' % boost_bin_dir )
self.rmtree( boost_bin_dir )
boost_binv2_dir = os.path.join( self.boost_root, 'bin.v2' )
self.log( 'Cleaning up "%s" directory ...' % boost_binv2_dir )
self.rmtree( boost_binv2_dir )
self.log( 'Cleaning up "%s" directory ...' % self.regression_results )
self.rmtree( self.regression_results )
def command_get_tools(self):
#~ Get Boost.Build v2...
self.log( 'Getting Boost.Build v2...' )
if self.user and self.user != '':
os.chdir( os.path.dirname(self.tools_bb_root) )
self.svn_command( 'co %s %s' % (
self.svn_repository_url(repo_path['build']),
os.path.basename(self.tools_bb_root) ) )
else:
self.retry( lambda: self.download_tarball(
os.path.basename(self.tools_bb_root)+".tar.bz2",
self.tarball_url(repo_path['build']) ) )
self.unpack_tarball(
self.tools_bb_root+".tar.bz2",
os.path.basename(self.tools_bb_root) )
#~ Get Boost.Jam...
self.log( 'Getting Boost.Jam...' )
if self.user and self.user != '':
os.chdir( os.path.dirname(self.tools_bjam_root) )
self.svn_command( 'co %s %s' % (
self.svn_repository_url(repo_path['jam']),
os.path.basename(self.tools_bjam_root) ) )
else:
self.retry( lambda: self.download_tarball(
os.path.basename(self.tools_bjam_root)+".tar.bz2",
self.tarball_url(repo_path['jam']) ) )
self.unpack_tarball(
self.tools_bjam_root+".tar.bz2",
os.path.basename(self.tools_bjam_root) )
#~ Get the regression tools and utilities...
self.log( 'Getting regression tools an utilities...' )
if self.user and self.user != '':
os.chdir( os.path.dirname(self.tools_regression_root) )
self.svn_command( 'co %s %s' % (
self.svn_repository_url(repo_path['regression']),
os.path.basename(self.tools_regression_root) ) )
else:
self.retry( lambda: self.download_tarball(
os.path.basename(self.tools_regression_root)+".tar.bz2",
self.tarball_url(repo_path['regression']) ) )
self.unpack_tarball(
self.tools_regression_root+".tar.bz2",
os.path.basename(self.tools_regression_root) )
def command_get_source(self):
self.refresh_timestamp()
self.log( 'Getting sources (%s)...' % self.timestamp() )
if self.user and self.user != '':
self.retry( self.svn_checkout )
else:
self.retry( self.get_tarball )
pass
def command_update_source(self):
if self.user and self.user != '' \
or os.path.exists( os.path.join( self.boost_root, '.svn' ) ):
open( self.timestamp_path, 'w' ).close()
self.log( 'Updating sources from SVN (%s)...' % self.timestamp() )
self.retry( self.svn_update )
else:
self.command_get_source( )
pass
def command_patch(self):
self.import_utils()
patch_boost_path = os.path.join( self.regression_root, self.patch_boost )
if os.path.exists( patch_boost_path ):
self.log( 'Found patch file "%s". Executing it.' % patch_boost_path )
os.chdir( self.regression_root )
utils.system( [ patch_boost_path ] )
pass
def command_setup(self):
self.command_patch()
self.build_if_needed(self.bjam,self.bjam_toolset)
self.build_if_needed(self.process_jam_log,self.pjl_toolset)
def command_test(self, *args):
if not args or args == None or args == []: args = [ "test", "process" ]
self.import_utils()
self.log( 'Making "%s" directory...' % self.regression_results )
utils.makedirs( self.regression_results )
results_libs = os.path.join( self.regression_results, 'libs' )
results_status = os.path.join( self.regression_results, 'status' )
if "clean" in args:
self.command_test_clean()
if "test" in args:
self.command_test_run()
if "process" in args:
self.command_test_process()
def command_test_clean(self):
results_libs = os.path.join( self.regression_results, 'libs' )
results_status = os.path.join( self.regression_results, 'status' )
self.rmtree( results_libs )
self.rmtree( results_status )
def command_test_run(self):
self.import_utils()
test_cmd = '%s -d2 --dump-tests %s "--build-dir=%s" >>"%s" 2>&1' % (
self.bjam_cmd( self.toolsets ),
self.bjam_options,
self.regression_results,
self.regression_log )
self.log( 'Starting tests (%s)...' % test_cmd )
cd = os.getcwd()
os.chdir( os.path.join( self.boost_root, 'status' ) )
utils.system( [ test_cmd ] )
os.chdir( cd )
def command_test_process(self):
self.import_utils()
self.log( 'Getting test case results out of "%s"...' % self.regression_log )
cd = os.getcwd()
os.chdir( os.path.join( self.boost_root, 'status' ) )
utils.checked_system( [
'"%s" "%s" <"%s"' % (
self.tool_path(self.process_jam_log),
self.regression_results,
self.regression_log )
] )
os.chdir( cd )
def command_collect_logs(self):
self.import_utils()
comment_path = os.path.join( self.regression_root, self.comment )
if not os.path.exists( comment_path ):
self.log( 'Comment file "%s" not found; creating default comment.' % comment_path )
f = open( comment_path, 'w' )
f.write( '<p>Tests are run on %s platform.</p>' % self.platform_name() )
f.close()
if self.incremental:
run_type = 'incremental'
else:
run_type = 'full'
source = 'tarball'
revision = ''
svn_root_file = os.path.join( self.boost_root, '.svn' )
svn_info_file = os.path.join( self.boost_root, 'svn_info.txt' )
if os.path.exists( svn_root_file ):
source = 'SVN'
self.svn_command( 'info --xml "%s" >%s' % (self.boost_root,svn_info_file) )
if os.path.exists( svn_info_file ):
f = open( svn_info_file, 'r' )
svn_info = f.read()
f.close()
i = svn_info.find( 'Revision:' )
if i < 0: i = svn_info.find( 'revision=' ) # --xml format
if i >= 0:
i += 10
while svn_info[i] >= '0' and svn_info[i] <= '9':
revision += svn_info[i]
i += 1
from collect_and_upload_logs import collect_logs
collect_logs(
self.regression_results,
self.runner, self.tag, self.platform, comment_path,
self.timestamp_path,
self.user,
source, run_type,
self.dart_server, self.proxy,
revision )
def command_upload_logs(self):
self.import_utils()
from collect_and_upload_logs import upload_logs
self.retry(
lambda:
upload_logs(
self.regression_results,
self.runner, self.tag,
self.user,
self.ftp_proxy,
self.debug_level, self.send_bjam_log,
self.timestamp_path,
self.dart_server )
)
def command_regression(self):
import socket
import string
try:
mail_subject = 'Boost regression for %s on %s' % ( self.tag,
string.split(socket.gethostname(), '.')[0] )
start_time = time.localtime()
if self.mail:
self.log( 'Sending start notification to "%s"' % self.mail )
self.send_mail(
'%s started at %s.' % ( mail_subject, format_time( start_time ) )
)
self.command_get_tools()
if self.local is not None:
self.log( 'Using local file "%s"' % self.local )
b = os.path.basename( self.local )
tag = b[ 0: b.find( '.' ) ]
self.log( 'Tag: "%s"' % tag )
self.unpack_tarball( local, self.boost_root )
elif self.have_source:
if not self.incremental: self.command_cleanup( [ 'bin' ] )
else:
if self.incremental or self.force_update:
if not self.incremental: self.command_cleanup( [ 'bin' ] )
else:
self.command_cleanup()
self.command_get_source()
self.command_setup()
# Not specifying --toolset in command line is not enough
# that would mean to use Boost.Build default ones
# We can skip test only we were explictly
# told to have no toolsets in command line "--toolset="
if self.toolsets != '': # --toolset=,
if not self.skip_tests:
self.command_test()
self.command_collect_logs()
self.command_upload_logs()
if self.mail:
self.log( 'Sending report to "%s"' % self.mail )
end_time = time.localtime()
self.send_mail(
'%s completed successfully at %s.' % ( mail_subject, format_time( end_time ) )
)
except:
if self.mail:
self.log( 'Sending report to "%s"' % self.mail )
traceback_ = '\n'.join( apply( traceback.format_exception, sys.exc_info() ) )
end_time = time.localtime()
self.send_mail(
'%s failed at %s.' % ( mail_subject, format_time( end_time ) ),
traceback_ )
raise
def command_show_revision(self):
modified = '$Date$'
revision = '$Revision$'
import re
re_keyword_value = re.compile( r'^\$\w+:\s+(.*)\s+\$$' )
print '\n\tRevision: %s' % re_keyword_value.match( revision ).group( 1 )
print '\tLast modified on: %s\n' % re_keyword_value.match( modified ).group( 1 )
#~ Utilities...
def main(self):
for action in self.actions:
action_m = "command_"+action.replace('-','_')
if hasattr(self,action_m):
getattr(self,action_m)()
def platform_name(self):
# See http://article.gmane.org/gmane.comp.lib.boost.testing/933
if sys.platform == 'win32':
return 'Windows'
elif sys.platform == 'cygwin':
return 'Windows/Cygwin'
return platform.system()
def log(self,message):
sys.stdout.flush()
sys.stderr.flush()
sys.stderr.write( '# %s\n' % message )
sys.stderr.flush()
def rmtree(self,path):
if os.path.exists( path ):
import shutil
#~ shutil.rmtree( unicode( path ) )
if sys.platform == 'win32':
os.system( 'del /f /s /q "%s" >nul 2>&1' % path )
shutil.rmtree( unicode( path ) )
else:
os.system( 'rm -f -r "%s"' % path )
def refresh_timestamp( self ):
if os.path.exists( self.timestamp_path ):
os.unlink( self.timestamp_path )
open( self.timestamp_path, 'w' ).close()
def timestamp( self ):
return time.strftime(
'%Y-%m-%dT%H:%M:%SZ',
time.gmtime( os.stat( self.timestamp_path ).st_mtime ) )
def retry( self, f, max_attempts=5, sleep_secs=10 ):
for attempts in range( max_attempts, -1, -1 ):
try:
return f()
except Exception, msg:
self.log( '%s failed with message "%s"' % ( f.__name__, msg ) )
if attempts == 0:
self.log( 'Giving up.' )
raise
self.log( 'Retrying (%d more attempts).' % attempts )
time.sleep( sleep_secs )
def http_get( self, source_url, destination_file ):
import urllib
proxies = None
if hasattr(self,'proxy') and self.proxy is not None:
proxies = { 'http' : self.proxy }
src = urllib.urlopen( source_url, proxies = proxies )
f = open( destination_file, 'wb' )
while True:
data = src.read( 16*1024 )
if len( data ) == 0: break
f.write( data )
f.close()
src.close()
def import_utils(self):
global utils
if utils is None:
sys.path.append( self.xsl_reports_dir )
import utils as utils_module
utils = utils_module
def build_if_needed( self, tool, toolset ):
self.import_utils()
if os.path.exists( tool[ 'path' ] ):
self.log( 'Found preinstalled "%s"; will use it.' % tool[ 'path' ] )
return
self.log( 'Preinstalled "%s" is not found; building one...' % tool[ 'path' ] )
if toolset is None:
if self.toolsets is not None:
toolset = string.split( self.toolsets, ',' )[0]
else:
toolset = tool[ 'default_toolset' ]
self.log( 'Warning: No bootstrap toolset for "%s" was specified.' % tool[ 'name' ] )
self.log( ' Using default toolset for the platform (%s).' % toolset )
if os.path.exists( tool[ 'source_dir' ] ):
self.log( 'Found "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] ) )
build_cmd = tool[ 'build_cmd' ]( toolset, tool['build_args'] )
self.log( 'Building "%s" (%s)...' % ( tool[ 'name'], build_cmd ) )
utils.system( [ 'cd "%s"' % tool[ 'source_dir' ], build_cmd ] )
else:
raise 'Could not find "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] )
if not tool.has_key( 'build_path' ):
tool[ 'build_path' ] = self.tool_path( tool )
if not os.path.exists( tool[ 'build_path' ] ):
raise 'Failed to find "%s" after build.' % tool[ 'build_path' ]
self.log( '%s succesfully built in "%s" location' % ( tool[ 'name' ], tool[ 'build_path' ] ) )
def tool_path( self, name_or_spec ):
if isinstance( name_or_spec, basestring ):
return os.path.join( self.regression_root, name_or_spec )
if os.path.exists( name_or_spec[ 'path' ] ):
return name_or_spec[ 'path' ]
if name_or_spec.has_key( 'build_path' ):
return name_or_spec[ 'build_path' ]
build_dir = name_or_spec[ 'build_dir' ]
self.log( 'Searching for "%s" in "%s"...' % ( name_or_spec[ 'name' ], build_dir ) )
for root, dirs, files in os.walk( build_dir ):
if name_or_spec[ 'name' ] in files:
return os.path.join( root, name_or_spec[ 'name' ] )
raise Exception( 'Cannot find "%s" in any of the following locations:\n%s' % (
name_or_spec[ 'name' ]
, '\n'.join( [ name_or_spec[ 'path' ], build_dir ] )
) )
def bjam_build_cmd( self, *rest ):
if sys.platform == 'win32':
cmd = 'build.bat %s' % self.bjam_toolset
else:
cmd = './build.sh %s' % self.bjam_toolset
env_setup_key = 'BJAM_ENVIRONMENT_SETUP'
if os.environ.has_key( env_setup_key ):
return '%s & %s' % ( os.environ[env_setup_key], cmd )
return cmd
def bjam_cmd( self, toolsets, args = '', *rest ):
build_path = self.regression_root
if build_path[-1] == '\\': build_path += '\\'
if self.timeout > 0:
args += ' -l%s' % (self.timeout*60)
cmd = '"%(bjam)s" "-sBOOST_BUILD_PATH=%(bb)s" "-sBOOST_ROOT=%(boost)s" "--boost=%(boost)s" %(arg)s' % {
'bjam' : self.tool_path( self.bjam ),
'bb' : os.pathsep.join([build_path,self.tools_bb_root]),
'boost' : self.boost_root,
'arg' : args }
if toolsets:
import string
cmd += ' ' + string.join(string.split( toolsets, ',' ), ' ' )
return cmd
def send_mail( self, subject, msg = '' ):
import smtplib
if not self.smtp_login:
server_name = 'mail.%s' % mail.split( '@' )[-1]
user_name = None
password = None
else:
server_name = self.smtp_login.split( '@' )[-1]
( user_name, password ) = string.split( self.smtp_login.split( '@' )[0], ':' )
log( ' Sending mail through "%s"...' % server_name )
smtp_server = smtplib.SMTP( server_name )
smtp_server.set_debuglevel( self.debug_level )
if user_name:
smtp_server.login( user_name, password )
smtp_server.sendmail( self.mail, [ self.mail ],
'Subject: %s\nTo: %s\n\n%s' % ( subject, self.mail, msg ) )
#~ Dowloading source, from SVN...
def svn_checkout( self ):
os.chdir( self.regression_root )
self.svn_command( 'co %s %s' % (self.svn_repository_url(self.tag),'boost') )
def svn_update( self ):
os.chdir( self.boost_root )
self.svn_command( 'update' )
def svn_command( self, command ):
svn_anonymous_command_line = 'svn --non-interactive %(command)s'
svn_command_line = 'svn --non-interactive --username=%(user)s %(command)s'
if not hasattr(self,'user') or self.user is None or self.user == 'anonymous':
cmd = svn_anonymous_command_line % { 'command': command }
else:
cmd = svn_command_line % { 'user': self.user, 'command': command }
self.log( 'Executing SVN command "%s"' % cmd )
rc = os.system( cmd )
if rc != 0:
raise Exception( 'SVN command "%s" failed with code %d' % ( cmd, rc ) )
def svn_repository_url( self, path ):
if hasattr(self,'user') and self.user is not None and self.user != 'anonymous':
return '%s%s' % (repo_root['user'],path)
else:
return '%s%s' % (repo_root['anon'],path)
#~ Downloading and extracting source archives, from tarballs or zipballs...
def get_tarball( self, *args ):
if not args or args == []:
args = [ 'download', 'unpack' ]
tarball_path = None
if hasattr(self,'local') and self.local is not None:
tarball_path = self.local
elif 'download' in args:
tarball_path = self.download_tarball(self.boost_tarball_name(),self.boost_tarball_url())
if not tarball_path:
tarball_path = os.path.join( self.regression_root, self.boost_tarball_url() )
if 'unpack' in args:
self.unpack_tarball( tarball_path, self.boost_root )
pass
def download_tarball( self, tarball_name, tarball_url ):
tarball_path = os.path.join( self.regression_root, tarball_name )
self.log( 'Downloading "%s" to "%s"...' % ( tarball_url, os.path.dirname( tarball_path ) ) )
if os.path.exists( tarball_path ):
os.unlink( tarball_path )
self.http_get( tarball_url, tarball_path )
return tarball_path
def tarball_url( self, path ):
return 'http://beta.boost.org/development/snapshot.php/%s' % path
def boost_tarball_name( self ):
return 'boost-%s.tar.bz2' % self.tag.split( '/' )[-1]
def boost_tarball_url( self ):
return self.tarball_url( self.tag )
def unpack_tarball( self, tarball_path, target_path ):
self.log( 'Looking for old unpacked archives...' )
old_boost_dirs = self.find_boost_dirs( )
for old_boost_dir in old_boost_dirs:
if old_boost_dir != tarball_path:
self.log( 'Deleting old directory %s.' % old_boost_dir )
self.rmtree( old_boost_dir )
self.log( 'Unpacking boost tarball ("%s")...' % tarball_path )
tarball_name = os.path.basename( tarball_path )
extension = tarball_name[ tarball_name.find( '.' ) : ]
if extension in ( ".tar.gz", ".tar.bz2" ):
import tarfile
import stat
mode = os.path.splitext( extension )[1][1:]
tar = tarfile.open( tarball_path, 'r:%s' % mode )
for tarinfo in tar:
tar.extract( tarinfo, self.regression_root )
if sys.platform == 'win32' and not tarinfo.isdir():
# workaround what appears to be a Win32-specific bug in 'tarfile'
# (modification times for extracted files are not set properly)
f = os.path.join( self.regression_root, tarinfo.name )
os.chmod( f, stat.S_IWRITE )
os.utime( f, ( tarinfo.mtime, tarinfo.mtime ) )
tar.close()
elif extension in ( ".zip" ):
import zipfile
z = zipfile.ZipFile( tarball_path, 'r', zipfile.ZIP_DEFLATED )
for f in z.infolist():
destination_file_path = os.path.join( self.regression_root, f.filename )
if destination_file_path[-1] == "/": # directory
if not os.path.exists( destination_file_path ):
os.makedirs( destination_file_path )
else: # file
result = open( destination_file_path, 'wb' )
result.write( z.read( f.filename ) )
result.close()
z.close()
else:
raise 'Do not know how to unpack archives with extension \"%s\"' % extension
boost_dir = self.find_boost_dirs()[0]
self.log( ' Unpacked into directory "%s"' % boost_dir )
if os.path.exists( target_path ):
self.log( 'Deleting "%s" directory...' % target_path )
self.rmtree( target_path )
self.log( 'Renaming "%s" into "%s"' % ( boost_dir, target_path ) )
os.rename( boost_dir, target_path )
def find_boost_dirs( self ):
return [
x for x in
glob.glob( os.path.join( self.regression_root, 'boost[-_]*' ) )
if os.path.isdir( x )
]

View file

@ -0,0 +1,51 @@
#!/usr/bin/python
# Copyright Redshift Software, Inc. 2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import os.path
import shutil
import sys
import urllib
#~ The directory this file is in.
root = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
print '# Running regressions in %s...' % root
script_sources = [ 'collect_and_upload_logs.py', 'regression.py' ]
script_local = os.path.join(root,'tools','regression','src')
script_remote = 'http://svn.boost.org/svn/boost/trunk/tools/regression/src'
script_dir = os.path.join(root,'tools_regression_src')
#~ Bootstrap.
#~ * Clear out any old versions of the scripts
print '# Creating regression scripts at %s...' % script_dir
if os.path.exists(script_dir):
shutil.rmtree(script_dir)
os.mkdir(script_dir)
#~ * Get new scripts, either from local working copy, or from svn
if os.path.exists(script_local):
print '# Copying regression scripts from %s...' % script_local
for src in script_sources:
shutil.copyfile( os.path.join(script_local,src), os.path.join(script_dir,src) )
else:
print '# Dowloading regression scripts from %s...' % script_remote
proxy = None
for a in sys.argv[1:]:
if a.startswith('--proxy='):
proxy = {'http' : a.split('=')[1] }
print '--- %s' %(proxy['http'])
break
for src in script_sources:
urllib.FancyURLopener(proxy).retrieve(
'%s/%s' % (script_remote,src), os.path.join(script_dir,src) )
#~ * Make the scripts available to Python
sys.path.insert(0,os.path.join(root,'tools_regression_src'))
#~ Launch runner.
from regression import runner
runner(root)

197
tools/regression/src/smoke.py Executable file
View file

@ -0,0 +1,197 @@
# smoke test - every so many minutes, check svn revision, and if changed:
# update working copy, run tests, upload results
# Copyright Beman Dawes 2007
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# ---------------------------------------------------------------------------- #
import os
import sys
import platform
import time
import ftplib
# invoke the system command line processor
def cmd(command):
print "command:", command
os.system(command)
# update SVN working copy
def update_working_copy(boost_path):
os.chdir(boost_path)
cmd("svn update")
# get repository url
def repository_url(path, results_path):
url = ""
svn_info_file = results_path + "/svn_info.xml"
command = "svn info --xml " + path + " >" + svn_info_file
cmd(command)
f = open( svn_info_file, 'r' )
svn_info = f.read()
f.close()
i = svn_info.find('//svn.boost.org')
if i >= 0:
url = svn_info[i:svn_info.find("</url>")]
return url
# get revision number of a path, which may be a filesystem path or URL
def revision(path, results_path, test_name):
rev = 0
svn_info_file = results_path + "/" + test_name + "-svn_info.xml"
command = "svn info --xml " + path + " >" + svn_info_file
cmd(command)
f = open( svn_info_file, 'r' )
svn_info = f.read()
f.close()
i = svn_info.find( 'revision=' )
if i >= 0:
i += 10
while svn_info[i] >= '0' and svn_info[i] <= '9':
rev = rev*10 + int(svn_info[i])
i += 1
return rev
# run bjam in current directory
def bjam(boost_path, args, output_path, test_name):
# bjam seems to need BOOST_BUILD_PATH
#os.environ["BOOST_BUILD_PATH"]=boost_path + "/tools/build/v2"
print "Begin bjam..."
command = "bjam --v2 --dump-tests -l180"
if args != "": command += " " + args
command += " >" + output_path + "/" + test_name +"-bjam.log 2>&1"
cmd(command)
# run process_jam_log in current directory
def process_jam_log(boost_path, output_path, test_name):
print "Begin log processing..."
command = "process_jam_log " + boost_path + " <" +\
output_path + "/" + test_name +"-bjam.log"
cmd(command)
# run compiler_status in current directory
def compiler_status(boost_path, output_path, test_name):
print "Begin compiler status html creation... "
command = "compiler_status --v2 --ignore-pass --no-warn --locate-root " + boost_path + " " +\
boost_path + " " + output_path + "/" + test_name + "-results.html " +\
output_path + "/" + test_name + "-details.html "
cmd(command)
# upload results via ftp
def upload_to_ftp(results_path, test_name, ftp_url, user, psw, debug_level):
# to minimize the time web pages are not available, upload with temporary
# names and then rename to the permanent names
i = 0 # dummy variable
os.chdir(results_path)
tmp_results = "temp-" + test_name + "-results.html"
results = test_name + "-results.html"
tmp_details = "temp-" + test_name + "-details.html"
details = test_name + "-details.html"
print "Uploading results via ftp..."
ftp = ftplib.FTP( ftp_url, user, psw )
ftp.set_debuglevel( debug_level )
# ftp.cwd( site_path )
try: ftp.delete(tmp_results)
except: ++i
f = open( results, 'rb' )
ftp.storbinary( 'STOR %s' % tmp_results, f )
f.close()
try: ftp.delete(tmp_details)
except: ++i
f = open( details, 'rb' )
ftp.storbinary( 'STOR %s' % tmp_details, f )
f.close()
try: ftp.delete(results)
except: ++i
try: ftp.delete(details)
except: ++i
ftp.rename(tmp_results, results)
ftp.rename(tmp_details, details)
ftp.dir()
ftp.quit()
def commit_results(results_path, test_name, rev):
print "Commit results..."
cwd = os.getcwd()
os.chdir(results_path)
command = "svn commit --non-interactive -m "+'"'+str(rev)+'" '+test_name+"-results.html"
cmd(command)
os.chdir(cwd)
# ---------------------------------------------------------------------------- #
if len(sys.argv) < 7:
print "Invoke with: minutes boost-path test-name results-path ftp-url user psw [bjam-args]"
print " boost-path must be path for a boost svn working directory."
print " results-path must be path for a svn working directory where an"
print " svn commit test-name+'-results.html' is valid."
print "Warning: This program hangs or crashes on network failures."
exit()
minutes = int(sys.argv[1])
boost_path = sys.argv[2]
test_name = sys.argv[3]
results_path = sys.argv[4]
ftp_url = sys.argv[5]
user = sys.argv[6]
psw = sys.argv[7]
if len(sys.argv) > 8: bjam_args = sys.argv[8]
else: bjam_args = ""
os.chdir(boost_path) # convert possible relative path
boost_path = os.getcwd() # to absolute path
print "minutes is ", minutes
print "boost_path is ", boost_path
print "test_name is ", test_name
print "results_path is ", results_path
print "ftp_url is ", ftp_url
print "user is ", user
print "psw is ", psw
print 'bjam args are "' + bjam_args + '"'
url = repository_url(boost_path, results_path)
print "respository url is ", url
first = 1
while 1:
working_rev = revision(boost_path, results_path, test_name)
repos_rev = revision("http:" + url, results_path, test_name)
print "Working copy revision: ", working_rev, " repository revision: ", repos_rev
if first or working_rev != repos_rev:
first = 0
start_time = time.time()
print
print "start at", time.strftime("%H:%M:%S", time.localtime())
update_working_copy(boost_path)
os.chdir(boost_path+"/status")
bjam(boost_path, bjam_args, results_path, test_name)
process_jam_log(boost_path, results_path, test_name)
compiler_status(boost_path, results_path, test_name)
upload_to_ftp(results_path, test_name, ftp_url, user, psw, 0)
commit_results(results_path, test_name,revision(boost_path, results_path, test_name))
elapsed_time = time.time() - start_time
print elapsed_time/60.0, "minutes elapsed time"
print
print "sleep ", minutes, "minutes..."
time.sleep(60 * minutes)

View file

@ -0,0 +1,21 @@
test-suite testlib :
[ compile-fail compile-fail~fail.cpp ]
[ compile-fail compile-fail~pass.cpp ]
[ compile compile~fail.cpp ]
[ compile compile~pass.cpp ]
[ compile compile~warn.cpp ]
[ link link~fail.cpp ]
[ link link~pass.cpp ]
[ link-fail link-fail~fail.cpp ]
[ link-fail link-fail~pass.cpp ]
[ run-fail run-fail~compile-fail.cpp ]
[ run-fail run-fail~fail-warn.cpp ]
[ run-fail run-fail~fail.cpp ]
[ run-fail run-fail~pass.cpp ]
[ run run~fail.cpp ]
[ run run~note.cpp ]
[ run run~pass.cpp ]
[ run run~warn-note.cpp ]
[ run run~warn.cpp ]
;

View file

@ -0,0 +1,11 @@
rule failure
{
}
actions failure
{
dir _
echo a
}
failure f ;

View file

@ -0,0 +1,12 @@
rule failure
{
}
actions failure
{
dir _
if errorlevel 1 exit %errorlevel%
echo a
}
failure f ;

View file

@ -0,0 +1,9 @@
project
: requirements
<library>/boost/filesystem//boost_filesystem
<define>BOOST_ALL_NO_LIB
;
test-suite "missing_dependencies" :
[ run test.cpp lib//<link>static ]
;

View file

@ -0,0 +1,7 @@
SOURCES =
lib ;
lib lib
:
$(SOURCES).cpp
;

View file

@ -0,0 +1 @@
int main() { return 0; }

View file

@ -0,0 +1 @@
actual

View file

@ -0,0 +1,36 @@
boost-test(RUN) "statechart/DllTestNative" : "libs/statechart/test/TuTestMain.cpp"
boost-test(RUN) "statechart/DllTestNormal" : "libs/statechart/test/TuTestMain.cpp"
compile-c-c++ ..\..\..\bin.v2\libs\statechart\test\DllTestNormal.test\msvc-7.1\debug\threading-multi\TuTestMain.obj
TuTestMain.cpp
c:\Users\Misha\Stuff\boost\HEAD\boost\libs\statechart\test\TuTest.hpp(36) : warning C4275: non dll-interface class 'boost::statechart::event_base' used as base for dll-interface class 'boost::statechart::detail::rtti_policy::rtti_derived_type<MostDerived,Base>'
with
[
MostDerived=EvX,
Base=boost::statechart::event_base
]
..\..\..\boost\statechart\event_base.hpp(49) : see declaration of 'boost::statechart::event_base'
compile-c-c++ ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLibTuTest.obj
TuTest.cpp
c:\Users\Misha\Stuff\boost\HEAD\boost\libs\statechart\test\TuTest.hpp(36) : warning C4275: non dll-interface class 'boost::statechart::event_base' used as base for dll-interface class 'boost::statechart::detail::rtti_policy::rtti_derived_type<MostDerived,Base>'
with
[
MostDerived=EvX,
Base=boost::statechart::event_base
]
..\..\..\boost\statechart\event_base.hpp(49) : see declaration of 'boost::statechart::event_base'
msvc.link.dll ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib
Creating library ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib and object ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.exp
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" >nul
link /NOLOGO /INCREMENTAL:NO /DLL /DEBUG /subsystem:console /out:"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll" /IMPLIB:"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib" @"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.rsp"
if %errorlevel% 1 exit %errorlevel%
if exist "..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.manifest" (
mt -nologo -manifest "..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.manifest" "-outputresource:..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll;2"
)
...failed msvc.link.dll ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib...
...removing ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll
...removing ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib
...skipped <p..\..\..\bin.v2\libs\statechart\test\DllTestNormal.test\msvc-7.1\debug\threading-multi>DllTestNormal.exe for lack of <p..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi>DllTestNormalLib-vc71-mt-gd-1_35.lib...
...skipped <p..\..\..\bin.v2\libs\statechart\test\DllTestNormal.test\msvc-7.1\debug\threading-multi>DllTestNormal.run for lack of <p..\..\..\bin.v2\libs\statechart\test\DllTestNormal.test\msvc-7.1\debug\threading-multi>DllTestNormal.exe...

View file

@ -0,0 +1,27 @@
<test-log library="statechart" test-name="DllTestNormal" test-type="run" test-program="libs/statechart/test/TuTestMain.cpp" target-directory="bin.v2/libs/statechart/test/DllTestNormal.test/msvc-7.1/debug/threading-multi" toolset="msvc-7.1" show-run-output="false">
<lib result="fail" timestamp="">../../bin.v2/libs/statechart/test/msvc-7.1/debug/threading-multi</lib>
</test-log>
<test-log library="statechart" test-name="" test-type="" test-program="" target-directory="bin.v2/libs/statechart/test/msvc-7.1/debug/threading-multi" toolset="" show-run-output="true">
<compile result="succeed" timestamp="">
TuTest.cpp
c:\Users\Misha\Stuff\boost\HEAD\boost\libs\statechart\test\TuTest.hpp(36) : warning C4275: non dll-interface class 'boost::statechart::event_base' used as base for dll-interface class 'boost::statechart::detail::rtti_policy::rtti_derived_type&lt;MostDerived,Base&gt;'
with
[
MostDerived=EvX,
Base=boost::statechart::event_base
]
..\..\..\boost\statechart\event_base.hpp(49) : see declaration of 'boost::statechart::event_base'
</compile>
<link result="fail" timestamp="">
Creating library ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib and object ..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.exp
call "C:\Program Files\Microsoft Visual Studio .NET 2003\Vc7\bin\vcvars32.bat" &gt;nul
link /NOLOGO /INCREMENTAL:NO /DLL /DEBUG /subsystem:console /out:"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll" /IMPLIB:"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.lib" @"..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.rsp"
if %errorlevel% 1 exit %errorlevel%
if exist "..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.manifest" (
mt -nologo -manifest "..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll.manifest" "-outputresource:..\..\..\bin.v2\libs\statechart\test\msvc-7.1\debug\threading-multi\DllTestNormalLib-vc71-mt-gd-1_35.dll;2"
)
</link>
</test-log>

View file

@ -1,98 +0,0 @@
locate-root "g:\boost\rc-1-34\results\boost\bin.v2"
searching for python.exe in c:/local/python25/bin
boost-test(RUN) "config_link_test" : "libs/config/test/link/main.cpp"
...patience...
...patience...
...patience...
...patience...
...patience...
...found 84395 targets...
...updating 64629 targets...
MkDir1 g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2
mkdir "g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2"
MkDir1 g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug
mkdir "g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug"
borland.compile.c++ g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\main.obj
"c:/progra~1/borland/bds/4.0/bin/bcc32.exe" -j5 -g255 -q -c -P -Ve -Vx -a8 -b- -v -Od -tWC -tWR -tWC -WM- -DBOOST_ALL_NO_LIB=1 -DBOOST_CONFIG_NO_LIB=1 -DBOOST_DYN_LINK=1 -I".." -I"c:/progra~1/borland/bds/4.0/include/" -o"g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\main.obj" "g:\boost\rc-1-34\boost\libs\config\test\link\main.cpp"
g:\boost\rc-1-34\boost\libs\config\test\link\main.cpp:
MkDir1 g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2
mkdir "g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2"
MkDir1 g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2\debug
mkdir "g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2\debug"
borland.compile.c++ g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2\debug\link_test.obj
"c:/progra~1/borland/bds/4.0/bin/bcc32.exe" -j5 -g255 -q -c -P -Ve -Vx -a8 -b- -v -Od -tWC -tWR -tWC -tWD -WM- -DBOOST_ALL_NO_LIB=1 -DBOOST_DYN_LINK=1 -I".." -I"c:/progra~1/borland/bds/4.0/include/" -o"g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2\debug\link_test.obj" "..\libs\config\test\link\link_test.cpp"
..\libs\config\test\link\link_test.cpp:
file g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2\debug\link_test-bcb-d-1_34.dll.rsp
"g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2\debug\link_test.obj"
borland.link.dll g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2\debug\link_test-bcb-d-1_34.dll g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2\debug\link_test-bcb-d-1_34.lib
set "PATH=c:/progra~1/borland/bds/4.0/bin/;%PATH%"
"c:/progra~1/borland/bds/4.0/bin/bcc32.exe" -v -q -v -v -tWD -tWC -tWR -tWC -tWD -WM- -L"c:/progra~1/borland/bds/4.0/lib" -e"g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2\debug\link_test-bcb-d-1_34.dll" @"g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2\debug\link_test-bcb-d-1_34.dll.rsp" && "c:/progra~1/borland/bds/4.0/bin/implib" "g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2\debug\link_test-bcb-d-1_34.lib" "g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2\debug\link_test-bcb-d-1_34.dll"
Borland Implib Version 3.0.22 Copyright (c) 1991, 2000 Inprise Corporation
file g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.exe.rsp
"g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\main.obj"
"g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2\debug\link_test-bcb-d-1_34.lib"
borland.link g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.exe
set "PATH=c:/progra~1/borland/bds/4.0/bin/;%PATH%"
"c:/progra~1/borland/bds/4.0/bin/bcc32.exe" -v -q -v -v -tWD -tWC -tWR -tWC -WM- -L"c:/progra~1/borland/bds/4.0/lib" -e"g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.exe" @"g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.exe.rsp"
testing.capture-output g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.run
set PATH=g:\boost\rc-1-34\results\boost\bin.v2\libs\config\test\link\borland-5.8.2\debug;c:\progra~1\borland\bds\4.0\bin;%PATH%
"g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.exe" > "g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.output" 2>&1
set status=%ERRORLEVEL%
echo. >> "g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.output"
echo EXIT STATUS: %status% >> "g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.output"
if %status% EQU 0 (
copy "g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.output" "g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.run"
)
set verbose=0
if %status% NEQ 0 (
set verbose=1
)
if %verbose% EQU 1 (
echo ====== BEGIN OUTPUT ======
type "g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.output"
echo ====== END OUTPUT ======
)
exit %status%
1 file(s) copied.
RmTemps g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.run
del /f /q "g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.exe"
**passed** g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.test
echo passed > g:\boost\rc-1-34\results\boost\bin.v2\status\config_link_test.test\borland-5.8.2\debug\config_link_test.test
...failed updating 1716 targets...
...skipped 6455 targets...
...updated 56458 targets...

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
# Copyright (c) MetaCommunications, Inc. 2003-2005
# Copyright (c) MetaCommunications, Inc. 2003-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
@ -188,10 +188,11 @@ class action:
os.unlink( result )
class merge_xml_action( action ):
def __init__( self, source, destination, expected_results_file, failures_markup_file ):
def __init__( self, source, destination, expected_results_file, failures_markup_file, tag ):
action.__init__( self, destination )
self.source_ = source
self.destination_ = destination
self.tag_ = tag
self.expected_results_file_ = expected_results_file
self.failures_markup_file_ = failures_markup_file
@ -261,6 +262,7 @@ class merge_xml_action( action ):
, {
"expected_results_file" : self.expected_results_file_
, "failures_markup_file": self.failures_markup_file_
, "source" : self.tag_
}
)
@ -304,19 +306,22 @@ class make_links_action( action ):
utils.makedirs( os.path.join( os.path.dirname( self.links_file_path_ ), "developer", "output" ) )
utils.makedirs( os.path.join( os.path.dirname( self.links_file_path_ ), "user", "output" ) )
utils.log( ' Making test output files...' )
utils.libxslt(
utils.log
, self.source_
, xsl_path( 'links_page.xsl' )
, self.links_file_path_
, {
'source': self.tag_
, 'run_date': self.run_date_
, 'comment_file': self.comment_file_
, 'explicit_markup_file': self.failures_markup_file_
}
)
try:
utils.libxslt(
utils.log
, self.source_
, xsl_path( 'links_page.xsl' )
, self.links_file_path_
, {
'source': self.tag_
, 'run_date': self.run_date_
, 'comment_file': self.comment_file_
, 'explicit_markup_file': self.failures_markup_file_
}
)
except Exception, msg:
utils.log( ' Skipping "%s" due to errors (%s)' % ( self.source_, msg ) )
open( self.file_path_, "w" ).close()
@ -381,7 +386,7 @@ def unzip_archives_task( source_dir, processed_dir, unzip_func ):
for a in actions:
a.run()
def merge_xmls_task( source_dir, processed_dir, merged_dir, expected_results_file, failures_markup_file ):
def merge_xmls_task( source_dir, processed_dir, merged_dir, expected_results_file, failures_markup_file, tag ):
utils.log( '' )
utils.log( 'merge_xmls_task: merging updated XMLs in "%s"...' % source_dir )
__log__ = 1
@ -391,7 +396,8 @@ def merge_xmls_task( source_dir, processed_dir, merged_dir, expected_results_fil
actions = [ merge_xml_action( os.path.join( processed_dir, os.path.basename( x ) )
, x
, expected_results_file
, failures_markup_file ) for x in target_files ]
, failures_markup_file
, tag ) for x in target_files ]
for a in actions:
a.run()
@ -484,7 +490,7 @@ def execute_tasks(
ftp_task( ftp_site, site_path, incoming_dir )
unzip_archives_task( incoming_dir, processed_dir, utils.unzip )
merge_xmls_task( incoming_dir, processed_dir, merged_dir, expected_results_file, failures_markup_file )
merge_xmls_task( incoming_dir, processed_dir, merged_dir, expected_results_file, failures_markup_file, tag )
make_links_task( merged_dir
, output_dir
, tag
@ -666,18 +672,20 @@ def make_result_pages(
def fix_file_names( dir ):
"""
The current version of xslproc doesn't correctly handle
spaces on posix systems. We have to manually go through the
result set and correct decode encoded spaces (%20).
spaces. We have to manually go through the
result set and decode encoded spaces (%20).
"""
if os.name == 'posix':
for root, dirs, files in os.walk( dir ):
for file in files:
if file.find( "%20" ) > -1:
new_name = file.replace( "%20", " " )
old_file_path = os.path.join( root, file )
new_file_path = os.path.join( root, new_name )
print "renaming %s %s" % ( old_file_path, new_file_path )
os.rename ( old_file_path, new_file_path )
utils.log( 'Fixing encoded file names...' )
for root, dirs, files in os.walk( dir ):
for file in files:
if file.find( "%20" ) > -1:
new_name = file.replace( "%20", " " )
utils.rename(
utils.log
, os.path.join( root, file )
, os.path.join( root, new_name )
)
def build_xsl_reports(
locate_root_dir
@ -793,7 +801,7 @@ def usage():
print 'Usage: %s [options]' % os.path.basename( sys.argv[0] )
print '''
\t--locate-root the same as --locate-root in compiler_status
\t--tag the tag for the results (i.e. 'CVS-HEAD')
\t--tag the tag for the results (i.e. 'trunk')
\t--expected-results the file with the results to be compared with
\t the current run
\t--failures-markup the file with the failures markup

View file

@ -0,0 +1,122 @@
#!/bin/sh
#~ Copyright Redshift Software, Inc. 2007
#~ Distributed under the Boost Software License, Version 1.0.
#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
set -e
build_all()
{
update_tools ${1}
build_results ${1}
upload_results ${1}
}
update_tools()
{
cwd=`pwd`
cd boost
svn up
cd "${cwd}"
}
report_info()
{
cat - > comment.html <<HTML
<table style="border-spacing: 0.5em;">
<tr>
<td style="vertical-align: top;"><tt>uname</tt></td>
<td>
<pre style="border: 1px solid #666; overflow: auto;">
`uname -a`
</pre>
</td>
</tr>
<tr>
<td style="vertical-align: top;"><tt>uptime</tt></td>
<td>
<pre style="border: 1px solid #666; overflow: auto;">
`uptime`
</pre>
</td>
</tr>
<tr>
<td style="vertical-align: top;"><tt>vmstat</tt></td>
<td>
<pre style="border: 1px solid #666; overflow: auto;">
`vmstat`
</pre>
</td>
</tr>
<tr>
<td style="vertical-align: top;"><tt>xsltproc</tt></td>
<td>
<pre style="border: 1px solid #666; overflow: auto;">
`xsltproc --version`
</pre>
</td>
</tr>
<tr>
<td style="vertical-align: top;"><tt>python</tt></td>
<td>
<pre style="border: 1px solid #666; overflow: auto;">
`python --version 2>&1`
</pre>
</td>
</tr>
<tr>
<td style="vertical-align: top;">previous run</td>
<td>
<pre style="border: 1px solid #666; overflow: auto;">
`cat previous.txt`
</pre>
</td>
</tr>
<tr>
<td style="vertical-align: top;">current run</td>
<td>
<pre style="border: 1px solid #666; overflow: auto;">
`date -u`
</pre>
</td>
</tr>
</table>
HTML
date -u > previous.txt
}
build_results()
{
cwd=`pwd`
cd ${1}
root=`pwd`
boost=${cwd}/boost
case ${1} in
trunk) tag=trunk ;;
release) tag=branches/release ;;
esac
report_info
python "${boost}/tools/regression/xsl_reports/boost_wide_report.py" \
--locate-root="${root}" \
--tag=${tag} \
--expected-results="${boost}/status/expected_results.xml" \
--failures-markup="${boost}/status/explicit-failures-markup.xml" \
--comment="comment.html" \
--user=""
cd "${cwd}"
}
upload_results()
{
cwd=`pwd`
cd ${1}/all
rm -f ../../${1}.zip*
zip -r -9 ../../${1} * -x '*.xml'
cd "${cwd}"
bzip2 -9 ${1}.zip
scp ${1}.zip.bz2 grafik@beta.boost.org:/home/grafik/www.boost.org/testing/incoming/
ssh grafik@beta.boost.org bunzip2 /home/grafik/www.boost.org/testing/incoming/${1}.zip.bz2
}
build_all ${1}

View file

@ -1,10 +0,0 @@
import os
import sys
import win32com.client
objBL = win32com.client.Dispatch("SQLXMLBulkLoad.SQLXMLBulkload.3.0")
objBL.ConnectionString = "provider=SQLOLEDB;data source=localhost;database=boost;integrated security=SSPI"
objBL.ErrorLogFile = "c:\\error.log"
#objBL.Execute( os.path.join( os.path.dirname( __file__ ), "test-runs.xsd" ), "Huang-WinXP-x64.xml" )
#objBL.Execute( os.path.join( os.path.dirname( __file__ ), "test-runs.xsd" ), "extended_test_results.xml" )
objBL.Execute( os.path.join( os.path.dirname( __file__ ), "test-runs.xsd" ), sys.argv[1] )

View file

@ -1,82 +0,0 @@
drop table [test-log]
go
drop table [test-run]
go
CREATE TABLE [test-run] (
[runner] [varchar] (32) NOT NULL ,
[timestamp] [varchar] (32) NOT NULL ,
[source] [varchar] (32) COLLATE SQL_Latin1_General_CP1_CI_AS NULL ,
[tag] [varchar] (32) COLLATE SQL_Latin1_General_CP1_CI_AS NULL ,
[platform] [varchar] (32),
[run-type] [varchar] (32) COLLATE SQL_Latin1_General_CP1_CI_AS NULL,
[comment] [text] NULL,
[toolset] varchar (32) not null
PRIMARY KEY ( runner, timestamp )
)
GO
go
CREATE TABLE [test-log]
(
[runner] [varchar] (32) NOT NULL ,
[timestamp] [varchar] (32) NOT NULL ,
[test-name] [varchar] (64) COLLATE SQL_Latin1_General_CP1_CI_AS NULL ,
[test-run] [varchar] (32) COLLATE SQL_Latin1_General_CP1_CI_AS NULL ,
[test-type] [varchar] (16) COLLATE SQL_Latin1_General_CP1_CI_AS NULL ,
[test-program] [varchar] (512) COLLATE SQL_Latin1_General_CP1_CI_AS NULL ,
[target-directory] [varchar] (512) NOT NULL ,
[library] varchar(64)
PRIMARY KEY ( runner, timestamp, [target-directory] )
)
GO
alter table [test-log] add CONSTRAINT parent FOREIGN KEY ( runner, timestamp ) REFERENCES [test-run] ( runner, timestamp )
go
drop table [compile]
go
create table [compile]
(
[runner] [varchar](32) NOT NULL ,
[target-directory] [varchar](512) NOT NULL ,
[output] [text],
[timestamp] [varchar](32) NULL ,
result [varchar](16)
)
drop table [link]
go
create table [link]
(
[runner] [varchar](32) NOT NULL ,
[target-directory] [varchar](512) NOT NULL ,
[output] [text],
[timestamp] [varchar](32) NULL ,
result [varchar](16)
)
drop table [run]
go
create table [run]
(
[runner] [varchar](32) NOT NULL ,
[target-directory] [varchar](512) NOT NULL ,
[output] [text],
[timestamp] [varchar](32) NULL ,
result [varchar](16)
)
drop table [note]
go
create table [note]
(
[runner] [varchar](32) NOT NULL ,
[target-directory] [varchar](512) NOT NULL ,
[note] [text]
)
select * from [test-run]
select * from [test-log]
select * from run
select * from note

View file

@ -1,126 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified"
xmlns:sql="urn:schemas-microsoft-com:mapping-schema">
<xs:annotation>
<xs:appinfo>
<sql:relationship name="test-run-test-log"
parent="[test-run]"
parent-key="runner timestamp"
child="[test-log]"
child-key="runner timestamp" />
<sql:relationship name="test-log-compile"
parent="[test-log]"
parent-key="runner [target-directory]"
child="[compile]"
child-key="runner [target-directory]" />
<sql:relationship name="test-log-link"
parent="[test-log]"
parent-key="runner [target-directory]"
child="[link]"
child-key="runner [target-directory]" />
<sql:relationship name="test-log-run"
parent="[test-log]"
parent-key="runner [target-directory]"
child="[run]"
child-key="runner [target-directory]" />
<sql:relationship name="test-log-note"
parent="[test-log]"
parent-key="runner [target-directory]"
child="[notes]"
child-key="runner [target-directory]" />
</xs:appinfo>
</xs:annotation>
<xs:element name="all-test-runs" sql:mapped="false" >
<xs:complexType >
<xs:sequence >
<xs:element name="test-run" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="comment" type="xs:string"/>
<xs:element name="test-run" sql:relation="[test-run]">
<xs:complexType >
<xs:sequence>
<xs:element name="comment" sql:field="[comment]" minOccurs="0" maxOccurs="1"/>
<xs:element name="test-log" minOccurs="0" maxOccurs="unbounded" sql:relation="[test-log]" sql:relationship="test-run-test-log" >
<xs:complexType>
<xs:sequence>
<xs:element name="notes" minOccurs="0" maxOccurs="1" sql:is-constant ="1">
<xs:complexType>
<xs:sequence>
<xs:element name="note" minOccurs="0" maxOccurs="unbounded" sql:relation="[notes]" sql:relationship="test-log-note" sql:overflow-field="note" >
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute name="author"/>
<xs:attribute name="refid"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="compile" minOccurs="0" maxOccurs="unbounded" sql:relation="[compile]" sql:relationship ="test-log-compile" sql:overflow-field="output" sql:field="output">
<xs:complexType >
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute name="result"/>
<xs:attribute name="timestamp"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="link" sql:relation="[link]" sql:relationship ="test-log-link" sql:overflow-field="output" sql:field="output">
<xs:complexType >
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute name="result"/>
<xs:attribute name="timestamp"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="run" sql:relation="[run]" sql:relationship ="test-log-run" sql:overflow-field="output" sql:field="output">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute name="result"/>
<xs:attribute name="timestamp"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
</xs:sequence>
<xs:attribute name="library" sql:field="[library]"/>
<xs:attribute name="test-name" sql:field="[test-name]"/>
<xs:attribute name="test-type" sql:field="[test-type]"/>
<xs:attribute name="test-program" sql:field="[test-program]"/>
<xs:attribute name="target-directory" sql:field="[target-directory]"/>
<xs:attribute name="toolset" sql:field="[toolset]"/>
<xs:attribute name="show-run-output" sql:field="[show-run-output]"/>
<xs:attribute name="result" sql:field="[result]"/>
<xs:attribute name="expected-result" sql:field="[expected-result]"/>
<xs:attribute name="expected-reason" sql:field="[expected-reason]"/>
<xs:attribute name="status" sql:field="[status]"/>
<xs:attribute name="is-new" sql:field="[is-new]"/>
<xs:attribute name="category" sql:field="[category]"/>
</xs:complexType>
</xs:element>
</xs:sequence>
<xs:attribute name="source" sql:field="source"/>
<xs:attribute name="runner" sql:field="runner"/>
<xs:attribute name="timestamp" sql:field="timestamp"/>
<xs:attribute name="platform" sql:field="platform"/>
<xs:attribute name="tag" sql:field="tag"/>
<xs:attribute name="run-type" sql:field="[run-type]"/>
</xs:complexType>
</xs:element>
</xs:schema>

View file

@ -1,5 +1,5 @@
#
# Copyright (C) 2005 The Trustees of Indiana University
# Copyright (C) 2005, 2007 The Trustees of Indiana University
# Author: Douglas Gregor
#
# Distributed under the Boost Software License, Version 1.0. (See
@ -14,7 +14,7 @@ import string
import datetime
import sys
report_author = "Douglas Gregor <dgregor@cs.indiana.edu>"
report_author = "Douglas Gregor <dgregor@osl.iu.edu>"
boost_dev_list = "Boost Developer List <boost@lists.boost.org>"
def sorted_keys( dict ):
@ -150,12 +150,12 @@ class Maintainer:
return None
# Build the message header
message = """From: Douglas Gregor <dgregor@cs.indiana.edu>
message = """From: Douglas Gregor <dgregor@osl.iu.edu>
To: """
message += self.name + ' <' + self.email + '>'
message += """
Reply-To: boost@lists.boost.org
Subject: Regressions in your Boost libraries as of """
Subject: Failures in your Boost libraries as of """
message += str(datetime.date.today()) + " [" + report.branch + "]"
message += """
@ -204,7 +204,7 @@ class Report:
"""
The complete report of all failing test cases.
"""
def __init__(self, branch = 'HEAD'):
def __init__(self, branch = 'trunk'):
self.branch = branch
self.date = None
self.url = None
@ -287,11 +287,12 @@ class Report:
def getIssuesEmail(self):
"""
Retrieve the issues email from MetaComm, trying a few times in
case something wonky is happening. If we can retrieve the file,
calls parseIssuesEmail and return True; otherwise, return False.
Retrieve the issues email from beta.boost.org, trying a few
times in case something wonky is happening. If we can retrieve
the file, calls parseIssuesEmail and return True; otherwise,
return False.
"""
base_url = "http://engineering.meta-comm.com/boost-regression/CVS-"
base_url = "http://beta.boost.org/development/tests/"
base_url += self.branch
base_url += "/developer/";
got_issues = False
@ -368,17 +369,15 @@ class Report:
Compose a message to send to the Boost developer's
list. Return the message and return it.
"""
message = """From: Douglas Gregor <dgregor@cs.indiana.edu>
message = """From: Douglas Gregor <dgregor@osl.iu.edu>
To: boost@lists.boost.org
Reply-To: boost@lists.boost.org
Subject: Boost regression notification ("""
message += str(datetime.date.today()) + " [" + branch + "]"
message += ")"
Subject: [Report] """
message += str(self.numFailures()) + " failures on " + branch
message += " (" + str(datetime.date.today()) + ")"
message += """
Boost Regression test failures
Boost regression test failures
"""
message += "Report time: " + self.date + """
@ -408,8 +407,7 @@ Detailed report:
message += (str(self.numFailures()) + ' failures in ' +
str(len(self.libraries)) + ' libraries')
if any_broken_platforms:
diff = self.numFailures() - self.numReportableFailures()
message += ' (' + str(diff) + ' are from non-broken platforms)'
message += ' (' + str(self.numReportableFailures()) + ' are from non-broken platforms)'
message += '\n'
# Display the number of failures per library
@ -507,7 +505,7 @@ def send_individualized_message (branch, person, maintainers):
if '--send' in sys.argv:
print "Sending..."
smtp = smtplib.SMTP('milliways.osl.iu.edu')
smtp.sendmail(from_addr = 'Douglas Gregor <dgregor@cs.indiana.edu>',
smtp.sendmail(from_addr = 'Douglas Gregor <dgregor@osl.iu.edu>',
to_addrs = person[1],
msg = message)
print "Done."
@ -516,12 +514,12 @@ def send_individualized_message (branch, person, maintainers):
# Send a message to the developer's list
def send_boost_developers_message(branch, maintainers, failing_libraries):
to_line = 'boost@lists.boost.org'
from_line = 'Douglas Gregor <dgregor@cs.indiana.edu>'
from_line = 'Douglas Gregor <dgregor@osl.iu.edu>'
message = """From: Douglas Gregor <dgregor@cs.indiana.edu>
message = """From: Douglas Gregor <dgregor@osl.iu.edu>
To: boost@lists.boost.org
Reply-To: boost@lists.boost.org
Subject: Boost regression notification ("""
Subject: Boost regression testing notification ("""
message += str(datetime.date.today()) + " [" + branch + "]"
message += ")"
@ -567,7 +565,7 @@ entry to libs/maintainers.txt to eliminate this message.
###############################################################################
# Parse command-line options
branch = "HEAD"
branch = "trunk"
for arg in sys.argv:
if arg.startswith("--branch="):
branch = arg[len("--branch="):]
@ -583,14 +581,14 @@ else:
if not okay:
print 'Aborting.'
if '--send' in sys.argv:
message = """From: Douglas Gregor <dgregor@cs.indiana.edu>
To: Douglas Gregor <dgregor@cs.indiana.edu>
message = """From: Douglas Gregor <dgregor@osl.iu.edu>
To: Douglas Gregor <dgregor@osl.iu.edu>
Reply-To: boost@lists.boost.org
Subject: Regression status script failed on """
message += str(datetime.date.today()) + " [" + branch + "]"
smtp = smtplib.SMTP('milliways.osl.iu.edu')
smtp.sendmail(from_addr = 'Douglas Gregor <dgregor@cs.indiana.edu>',
to_addrs = 'dgregor@cs.indiana.edu',
smtp.sendmail(from_addr = 'Douglas Gregor <dgregor@osl.iu.edu>',
to_addrs = 'dgregor@osl.iu.edu',
msg = message)
sys.exit(1)

View file

@ -1,5 +1,5 @@
# Copyright (c) MetaCommunications, Inc. 2003-2005
# Copyright (c) MetaCommunications, Inc. 2003-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
@ -37,27 +37,24 @@ def rmtree( path ):
os.system( 'rm -f -r "%s"' % path )
def cvs_command( user, command ):
cmd = 'cvs -d:ext:%(user)s@cvs.sourceforge.net:/cvsroot/boost -z9 %(command)s' \
% { 'user': user, 'command': command }
utils.log( 'Executing CVS command "%s"' % cmd )
rc = os.system( cmd )
def svn_command( command ):
utils.log( 'Executing SVN command "%s"' % command )
rc = os.system( command )
if rc != 0:
raise Exception( 'CVS command "%s" failed with code %d' % ( cmd, rc ) )
raise Exception( 'SVN command "%s" failed with code %d' % ( command, rc ) )
def cvs_export( working_dir, user, tag ):
if tag != 'CVS-HEAD':
command = 'export -r %s boost' % tag
def svn_export( sources_dir, user, tag ):
if user is None or user == 'anonymous':
command = 'svn export --force http://svn.boost.org/svn/boost/%s %s' % ( tag, sources_dir )
else:
command = 'export -r HEAD boost'
command = 'svn export --force --non-interactive --username=%s https://svn.boost.org/svn/boost/%s %s' \
% ( user, tag, sources_dir )
os.chdir( working_dir )
retry(
cvs_command
, ( user, command )
os.chdir( os.path.basename( sources_dir ) )
retry(
svn_command
, ( command, )
)
@ -67,46 +64,47 @@ def make_tarball(
, user
, site_dir
):
timestamp = time.time()
timestamp_suffix = time.strftime( '%y-%m-%d-%H%M', time.gmtime( timestamp ) )
tag_suffix = tag.split( '/' )[-1]
sources_dir = os.path.join(
working_dir
, 'boost-%s-%s' % ( tag_suffix, timestamp_suffix )
)
sources_dir = os.path.join( working_dir, 'boost' )
if os.path.exists( sources_dir ):
utils.log( 'Directory "%s" already exists, cleaning it up...' % sources_dir )
rmtree( sources_dir )
try:
os.mkdir( sources_dir )
utils.log( 'Exporting files from CVS...' )
cvs_export( working_dir, user, tag )
utils.log( 'Exporting files from SVN...' )
svn_export( sources_dir, user, tag )
except:
utils.log( 'Cleaning up...' )
rmtree( sources_dir )
raise
timestamp = time.time()
timestamped_dir_name = 'boost-%s-%s' % ( tag, time.strftime( '%y-%m-%d-%H%M', time.gmtime( timestamp ) ) )
timestamped_dir = os.path.join( working_dir, timestamped_dir_name )
utils.log( 'Renaming "%s" to "%s"...' % ( sources_dir, timestamped_dir ) )
os.rename( sources_dir, timestamped_dir )
tarball_name = 'boost-%s.tar.bz2' % tag
tarball_name = 'boost-%s.tar.bz2' % tag_suffix
tarball_path = os.path.join( working_dir, tarball_name )
utils.log( 'Archiving "%s" to "%s"...' % ( timestamped_dir, tarball_path ) )
utils.log( 'Archiving "%s" to "%s"...' % ( sources_dir, tarball_path ) )
tar = tarfile.open( tarball_path, 'w|bz2' )
tar.posix = False # see http://tinyurl.com/4ebd8
tar.add( timestamped_dir, timestamped_dir_name )
tar.add( sources_dir, os.path.basename( sources_dir ) )
tar.close()
tarball_timestamp_path = os.path.join( working_dir, 'boost-%s.timestamp' % tag )
tarball_timestamp_path = os.path.join( working_dir, 'boost-%s.timestamp' % tag_suffix )
utils.log( 'Writing timestamp into "%s"...' % tarball_timestamp_path )
timestamp_file = open( tarball_timestamp_path, 'w' )
timestamp_file.write( '%f' % timestamp )
timestamp_file.close()
md5sum_path = os.path.join( working_dir, 'boost-%s.md5' % tag )
md5sum_path = os.path.join( working_dir, 'boost-%s.md5' % tag_suffix )
utils.log( 'Writing md5 checksum into "%s"...' % md5sum_path )
old_dir = os.getcwd()
os.chdir( os.path.dirname( tarball_path ) )
@ -123,8 +121,8 @@ def make_tarball(
shutil.move( os.path.join( temp_site_dir, tarball_name ), site_dir )
shutil.move( tarball_timestamp_path, site_dir )
shutil.move( md5sum_path, site_dir )
utils.log( 'Removing "%s"...' % timestamped_dir )
rmtree( timestamped_dir )
utils.log( 'Removing "%s"...' % sources_dir )
rmtree( sources_dir )
def accept_args( args ):
@ -138,7 +136,8 @@ def accept_args( args ):
]
options = {
'--tag': 'CVS-HEAD'
'--tag': 'trunk'
, '--user': None
, '--site-dir': None
}
@ -156,8 +155,8 @@ def usage():
print 'Usage: %s [options]' % os.path.basename( sys.argv[0] )
print '''
\t--working-dir working directory
\t--tag snapshot tag (i.e. 'CVS-HEAD')
\t--user SourceForge user name for a CVS account
\t--tag snapshot tag (i.e. 'trunk')
\t--user Boost SVN user ID (optional)
\t--site-dir site directory to copy the snapshot to (optional)
'''

View file

@ -71,7 +71,9 @@ def make_result_pages(
, test_results_file
, xsl_path( 'add_expected_results.xsl', v2 )
, extended_test_results
, { 'expected_results_file': expected_results_file, 'failures_markup_file' : failures_markup_file }
, { 'expected_results_file': expected_results_file
, 'failures_markup_file' : failures_markup_file
, 'source' : tag }
)
links = os.path.join( output_dir, 'links.html' )

View file

@ -10,6 +10,9 @@ import zipfile
import ftplib
import time
import stat
import xml.dom.minidom
import xmlrpclib
import httplib
import os.path
import string
@ -42,6 +45,136 @@ def collect_test_logs( input_dirs, test_results_writer ):
utils.log( 'Walking directory "%s" ...' % input_dir )
os.path.walk( input_dir, process_test_log_files, test_results_writer )
dart_status_from_result = {
'succeed': 'passed',
'fail': 'failed',
'note': 'passed',
'': 'notrun'
}
dart_project = {
'trunk': 'Boost_HEAD',
'': 'Boost_HEAD'
}
dart_track = {
'full': 'Nightly',
'incremental': 'Continuous',
'': 'Experimental'
}
ascii_only_table = ""
for i in range(0,256):
if chr(i) == '\n' or chr(i) == '\r':
ascii_only_table += chr(i)
elif i < 32 or i >= 0x80:
ascii_only_table += '?'
else:
ascii_only_table += chr(i)
class xmlrpcProxyTransport(xmlrpclib.Transport):
def __init__(self, proxy):
self.proxy = proxy
def make_connection(self, host):
self.realhost = host
return httplib.HTTP(self.proxy)
def send_request(self, connection, handler, request_body):
connection.putrequest('POST','http://%s%s' % (self.realhost,handler))
def send_host(self, connection, host):
connection.putheader('Host',self.realhost)
def publish_test_logs(
input_dirs,
runner_id, tag, platform, comment_file, timestamp, user, source, run_type,
dart_server = None,
http_proxy = None,
**unused
):
__log__ = 1
utils.log( 'Publishing test logs ...' )
dart_rpc = None
dart_dom = {}
def _publish_test_log_files_ ( unused, dir, names ):
for file in names:
if os.path.basename( file ) == 'test_log.xml':
utils.log( 'Publishing test log "%s"' % os.path.join(dir,file) )
if dart_server:
log_xml = open(os.path.join(dir,file)).read().translate(ascii_only_table)
#~ utils.log( '--- XML:\n%s' % log_xml)
#~ It seems possible to get an empty XML result file :-(
if log_xml == "": continue
log_dom = xml.dom.minidom.parseString(log_xml)
test = {
'library': log_dom.documentElement.getAttribute('library'),
'test-name': log_dom.documentElement.getAttribute('test-name'),
'toolset': log_dom.documentElement.getAttribute('toolset')
}
if not test['test-name'] or test['test-name'] == '':
test['test-name'] = 'unknown'
if not test['toolset'] or test['toolset'] == '':
test['toolset'] = 'unknown'
if not dart_dom.has_key(test['toolset']):
dart_dom[test['toolset']] = xml.dom.minidom.parseString(
'''<?xml version="1.0" encoding="UTF-8"?>
<DartSubmission version="2.0" createdby="collect_and_upload_logs.py">
<Site>%(site)s</Site>
<BuildName>%(buildname)s</BuildName>
<Track>%(track)s</Track>
<DateTimeStamp>%(datetimestamp)s</DateTimeStamp>
</DartSubmission>
''' % {
'site': runner_id,
'buildname': "%s -- %s (%s)" % (platform,test['toolset'],run_type),
'track': dart_track[run_type],
'datetimestamp' : timestamp
} )
submission_dom = dart_dom[test['toolset']]
for node in log_dom.documentElement.childNodes:
if node.nodeType == xml.dom.Node.ELEMENT_NODE:
if node.firstChild:
log_data = xml.sax.saxutils.escape(node.firstChild.data)
else:
log_data = ''
test_dom = xml.dom.minidom.parseString('''<?xml version="1.0" encoding="UTF-8"?>
<Test>
<Name>.Test.Boost.%(tag)s.%(library)s.%(test-name)s.%(type)s</Name>
<Status>%(result)s</Status>
<Measurement name="Toolset" type="text/string">%(toolset)s</Measurement>
<Measurement name="Timestamp" type="text/string">%(timestamp)s</Measurement>
<Measurement name="Log" type="text/text">%(log)s</Measurement>
</Test>
''' % {
'tag': tag,
'library': test['library'],
'test-name': test['test-name'],
'toolset': test['toolset'],
'type': node.nodeName,
'result': dart_status_from_result[node.getAttribute('result')],
'timestamp': node.getAttribute('timestamp'),
'log': log_data
})
submission_dom.documentElement.appendChild(
test_dom.documentElement.cloneNode(1) )
for input_dir in input_dirs:
utils.log( 'Walking directory "%s" ...' % input_dir )
os.path.walk( input_dir, _publish_test_log_files_, None )
if dart_server:
try:
rpc_transport = None
if http_proxy:
rpc_transport = xmlrpcProxyTransport(http_proxy)
dart_rpc = xmlrpclib.ServerProxy(
'http://%s/%s/Command/' % (dart_server,dart_project[tag]),
rpc_transport )
for dom in dart_dom.values():
#~ utils.log('Dart XML: %s' % dom.toxml('utf-8'))
dart_rpc.Submit.put(xmlrpclib.Binary(dom.toxml('utf-8')))
except Exception, e:
utils.log('Dart server error: %s' % e)
def upload_to_ftp( tag, results_file, ftp_proxy, debug_level ):
ftp_site = 'fx.meta-comm.com'
@ -63,8 +196,9 @@ def upload_to_ftp( tag, results_file, ftp_proxy, debug_level ):
try:
ftp.cwd( tag )
except ftplib.error_perm:
ftp.mkd( tag )
ftp.cwd( tag )
for dir in tag.split( '/' ):
ftp.mkd( dir )
ftp.cwd( dir )
f = open( results_file, 'rb' )
ftp.storbinary( 'STOR %s' % os.path.basename( results_file ), f )
@ -132,9 +266,20 @@ def collect_logs(
, user
, source
, run_type
, dart_server = None
, http_proxy = None
, revision = ''
, **unused
):
timestamp = time.strftime( '%Y-%m-%dT%H:%M:%SZ', read_timestamp( timestamp_file ) )
if dart_server:
publish_test_logs( [ results_dir ],
runner_id, tag, platform, comment_file, timestamp, user, source, run_type,
dart_server = dart_server,
http_proxy = http_proxy )
results_file = os.path.join( results_dir, '%s.xml' % runner_id )
results_writer = open( results_file, 'w' )
utils.log( 'Collecting test logs into "%s"...' % results_file )
@ -147,9 +292,10 @@ def collect_logs(
'tag': tag
, 'platform': platform
, 'runner': runner_id
, 'timestamp': time.strftime( '%Y-%m-%dT%H:%M:%SZ', read_timestamp( timestamp_file ) )
, 'timestamp': timestamp
, 'source': source
, 'run-type': run_type
, 'revision': revision
}
)
@ -176,6 +322,7 @@ def upload_logs(
, debug_level
, send_bjam_log = False
, timestamp_file = None
, dart_server = None
, **unused
):
@ -202,9 +349,12 @@ def collect_and_upload_logs(
, user
, source
, run_type
, revision = None
, ftp_proxy = None
, debug_level = 0
, send_bjam_log = False
, dart_server = None
, http_proxy = None
, **unused
):
@ -218,6 +368,9 @@ def collect_and_upload_logs(
, user
, source
, run_type
, revision = revision
, dart_server = dart_server
, http_proxy = http_proxy
)
upload_logs(
@ -229,6 +382,7 @@ def collect_and_upload_logs(
, debug_level
, send_bjam_log
, timestamp_file
, dart_server = dart_server
)
@ -244,21 +398,28 @@ def accept_args( args ):
, 'run-type='
, 'user='
, 'ftp-proxy='
, 'proxy='
, 'debug-level='
, 'send-bjam-log'
, 'help'
, 'dart-server='
, 'revision='
]
options = {
'--tag' : 'CVS-HEAD'
'--tag' : 'trunk'
, '--platform' : sys.platform
, '--comment' : 'comment.html'
, '--timestamp' : 'timestamp'
, '--user' : None
, '--source' : 'CVS'
, '--source' : 'SVN'
, '--run-type' : 'full'
, '--ftp-proxy' : None
, '--proxy' : None
, '--debug-level' : 0
, '--dart-server' : 'beta.boost.org:8081'
, '--revision' : None
}
utils.accept_args( args_spec, args, options, usage )
@ -274,8 +435,11 @@ def accept_args( args ):
, 'source' : options[ '--source' ]
, 'run_type' : options[ '--run-type' ]
, 'ftp_proxy' : options[ '--ftp-proxy' ]
, 'http_proxy' : options[ '--proxy' ]
, 'debug_level' : int(options[ '--debug-level' ])
, 'send_bjam_log' : options.has_key( '--send-bjam-log' )
, 'dart_server' : options[ '--dart-server' ]
, 'revision ' : options[ '--revision' ]
}
@ -298,16 +462,19 @@ Options:
\t as a timestamp of the run ("timestamp" by default)
\t--comment an HTML comment file to be inserted in the reports
\t ("comment.html" by default)
\t--tag the tag for the results ("CVS-HEAD" by default)
\t--tag the tag for the results ("trunk" by default)
\t--user SourceForge user name for a shell account (optional)
\t--source where Boost sources came from (e.g. "CVS", "tarball",
\t "anonymous CVS"; "CVS" by default)
\t--source where Boost sources came from ("SVN" or "tarball";
\t "SVN" by default)
\t--run-type "incremental" or "full" ("full" by default)
\t--send-bjam-log in addition to regular XML results, send in full bjam
\t log of the regression run
\t--proxy HTTP proxy server address and port (e.g.
\t 'http://www.someproxy.com:3128', optional)
\t--ftp-proxy FTP proxy server (e.g. 'ftpproxy', optional)
\t--debug-level debugging level; controls the amount of debugging
\t output printed; 0 by default (no debug output)
\t--dart-server The dart server to send results to.
''' % '\n\t'.join( commands.keys() )

View file

@ -70,9 +70,6 @@ div.dedication p.topic-title {
div.figure {
margin-left: 2em }
div.footer, div.header {
font-size: smaller }
div.sidebar {
margin-left: 1em ;
border: medium outset ;
@ -109,9 +106,6 @@ h1.title {
h2.subtitle {
text-align: center }
hr {
width: 75% }
ol.simple, ul.simple {
margin-bottom: 1em }

View file

@ -1,525 +1,109 @@
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Docutils 0.4.1: http://docutils.sourceforge.net/" />
<meta http-equiv="Content-Language" content="en-us">
<meta name="GENERATOR" content="Microsoft FrontPage 5.0">
<meta name="ProgId" content="FrontPage.Editor.Document">
<meta http-equiv="Content-Type" content="text/html; charset=windows-1252">
<title>Running Boost Regression Tests</title>
<style type="text/css">
/*
:Author: David Goodger
:Contact: goodger@users.sourceforge.net
:Date: $Date$
:Revision: $Revision$
:Copyright: This stylesheet has been placed in the public domain.
Default cascading style sheet for the HTML output of Docutils.
See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to
customize this style sheet.
*/
/* used to remove borders from tables and images */
.borderless, table.borderless td, table.borderless th {
border: 0 }
table.borderless td, table.borderless th {
/* Override padding for "table.docutils td" with "! important".
The right padding separates the table cells. */
padding: 0 0.5em 0 0 ! important }
.first {
/* Override more specific margin styles with "! important". */
margin-top: 0 ! important }
.last, .with-subtitle {
margin-bottom: 0 ! important }
.hidden {
display: none }
a.toc-backref {
text-decoration: none ;
color: black }
blockquote.epigraph {
margin: 2em 5em ; }
dl.docutils dd {
margin-bottom: 0.5em }
/* Uncomment (and remove this text!) to get bold-faced definition list terms
dl.docutils dt {
font-weight: bold }
*/
div.abstract {
margin: 2em 5em }
div.abstract p.topic-title {
font-weight: bold ;
text-align: center }
div.admonition, div.attention, div.caution, div.danger, div.error,
div.hint, div.important, div.note, div.tip, div.warning {
margin: 2em ;
border: medium outset ;
padding: 1em }
div.admonition p.admonition-title, div.hint p.admonition-title,
div.important p.admonition-title, div.note p.admonition-title,
div.tip p.admonition-title {
font-weight: bold ;
font-family: sans-serif }
div.attention p.admonition-title, div.caution p.admonition-title,
div.danger p.admonition-title, div.error p.admonition-title,
div.warning p.admonition-title {
color: red ;
font-weight: bold ;
font-family: sans-serif }
/* Uncomment (and remove this text!) to get reduced vertical space in
compound paragraphs.
div.compound .compound-first, div.compound .compound-middle {
margin-bottom: 0.5em }
div.compound .compound-last, div.compound .compound-middle {
margin-top: 0.5em }
*/
div.dedication {
margin: 2em 5em ;
text-align: center ;
font-style: italic }
div.dedication p.topic-title {
font-weight: bold ;
font-style: normal }
div.figure {
margin-left: 2em ;
margin-right: 2em }
div.footer, div.header {
clear: both;
font-size: smaller }
div.line-block {
display: block ;
margin-top: 1em ;
margin-bottom: 1em }
div.line-block div.line-block {
margin-top: 0 ;
margin-bottom: 0 ;
margin-left: 1.5em }
div.sidebar {
margin-left: 1em ;
border: medium outset ;
padding: 1em ;
background-color: #ffffee ;
width: 40% ;
float: right ;
clear: right }
div.sidebar p.rubric {
font-family: sans-serif ;
font-size: medium }
div.system-messages {
margin: 5em }
div.system-messages h1 {
color: red }
div.system-message {
border: medium outset ;
padding: 1em }
div.system-message p.system-message-title {
color: red ;
font-weight: bold }
div.topic {
margin: 2em }
h1.section-subtitle, h2.section-subtitle, h3.section-subtitle,
h4.section-subtitle, h5.section-subtitle, h6.section-subtitle {
margin-top: 0.4em }
h1.title {
text-align: center }
h2.subtitle {
text-align: center }
hr.docutils {
width: 75% }
img.align-left {
clear: left }
img.align-right {
clear: right }
ol.simple, ul.simple {
margin-bottom: 1em }
ol.arabic {
list-style: decimal }
ol.loweralpha {
list-style: lower-alpha }
ol.upperalpha {
list-style: upper-alpha }
ol.lowerroman {
list-style: lower-roman }
ol.upperroman {
list-style: upper-roman }
p.attribution {
text-align: right ;
margin-left: 50% }
p.caption {
font-style: italic }
p.credits {
font-style: italic ;
font-size: smaller }
p.label {
white-space: nowrap }
p.rubric {
font-weight: bold ;
font-size: larger ;
color: maroon ;
text-align: center }
p.sidebar-title {
font-family: sans-serif ;
font-weight: bold ;
font-size: larger }
p.sidebar-subtitle {
font-family: sans-serif ;
font-weight: bold }
p.topic-title {
font-weight: bold }
pre.address {
margin-bottom: 0 ;
margin-top: 0 ;
font-family: serif ;
font-size: 100% }
pre.literal-block, pre.doctest-block {
margin-left: 2em ;
margin-right: 2em ;
background-color: #eeeeee }
span.classifier {
font-family: sans-serif ;
font-style: oblique }
span.classifier-delimiter {
font-family: sans-serif ;
font-weight: bold }
span.interpreted {
font-family: sans-serif }
span.option {
white-space: nowrap }
span.pre {
white-space: pre }
span.problematic {
color: red }
span.section-subtitle {
/* font-size relative to parent (h1..h6 element) */
font-size: 80% }
table.citation {
border-left: solid 1px gray;
margin-left: 1px }
table.docinfo {
margin: 2em 4em }
table.docutils {
margin-top: 0.5em ;
margin-bottom: 0.5em }
table.footnote {
border-left: solid 1px black;
margin-left: 1px }
table.docutils td, table.docutils th,
table.docinfo td, table.docinfo th {
padding-left: 0.5em ;
padding-right: 0.5em ;
vertical-align: top }
table.docutils th.field-name, table.docinfo th.docinfo-name {
font-weight: bold ;
text-align: left ;
white-space: nowrap ;
padding-left: 0 }
h1 tt.docutils, h2 tt.docutils, h3 tt.docutils,
h4 tt.docutils, h5 tt.docutils, h6 tt.docutils {
font-size: 100% }
tt.docutils {
background-color: #eeeeee }
ul.auto-toc {
list-style-type: none }
</style>
<link rel="stylesheet" type="text/css" href="../../../../doc/html/minimal.css">
</head>
<body>
<div class="document" id="running-boost-regression-tests">
<h1 class="title">Running Boost Regression Tests</h1>
<div class="section">
<h1><a id="requirements" name="requirements">Requirements</a></h1>
<ul class="simple">
<li>Python 2.3 or higher</li>
<li>Some spare disk space (~5 Gb per each tested compiler)</li>
</ul>
<p>That's it! You don't even need a CVS client installed.</p>
</div>
<div class="section">
<h1><a id="installation" name="installation">Installation</a></h1>
<ul class="simple">
<li>Download regression driver <tt class="docutils literal"><span class="pre">regression.py</span></tt> from <a class="reference" href="http://boost.cvs.sourceforge.net/*checkout*/boost/boost/tools/regression/xsl_reports/runner/regression.py">here</a> (<a class="reference" href="http://tinyurl.com/uufav">http://tinyurl.com/uufav</a>)
and put it in the directory where you want all the regression
test files to be placed.</li>
</ul>
<table border="0" cellpadding="5" cellspacing="0" style="border-collapse: collapse" bordercolor="#111111" width="831">
<tr>
<td width="277">
<a href="../../../../index.htm">
<img src="../../../../boost.png" alt="boost.png (6897 bytes)" align="middle" width="277" height="86" border="0"></a></td>
<td width="531" align="middle">
<font size="7">Running Boost Regression Tests</font>
</td>
</tr>
</table>
<h2>Requirements</h2>
<ul>
<li><p class="first"><strong>Optional</strong>: If you already have <tt class="docutils literal"><span class="pre">bjam</span></tt> and/or <tt class="docutils literal"><span class="pre">process_jam_log</span></tt> executables
you'd like to use, just put them in the same directory with <tt class="docutils literal"><span class="pre">regression.py</span></tt>, e.g.:</p>
<pre class="literal-block">
my_boost_regressions/
regression.py
bjam<em>[.exe]</em>
</pre>
</li>
<li>Python 2.3 or later.<br>
&nbsp;</li>
<li>Subversion 1.4 or later.<br>
&nbsp;</li>
<li>At least 5 gigabytes of disk space per compiler to be tested.</li>
</ul>
</div>
<div class="section">
<h1><a id="running-tests" name="running-tests">Running tests</a></h1>
<p>To start a regression run, simply run <tt class="docutils literal"><span class="pre">regression.py</span></tt> providing it with the following
two arguments:</p>
<ul class="simple">
<li>runner id (something unique of your choice that will identify your
results in the reports <a class="footnote-reference" href="#runnerid1" id="id2" name="id2">[1]</a>, <a class="footnote-reference" href="#runnerid2" id="id3" name="id3">[2]</a>)</li>
<li>a particular set of toolsets you want to test with <a class="footnote-reference" href="#toolsets" id="id4" name="id4">[3]</a>.</li>
</ul>
<p>For example:</p>
<pre class="literal-block">
python regression.py --runner=Metacomm --toolsets=gcc,vc7
</pre>
<p>If you are interested in seeing all available options, run <tt class="docutils literal"><span class="pre">python</span> <span class="pre">regression.py</span></tt>
or <tt class="docutils literal"><span class="pre">python</span> <span class="pre">regression.py</span> <span class="pre">--help</span></tt>. See also the <a class="reference" href="#advanced-use">Advanced use</a> section below.</p>
<p><strong>Note</strong>: If you are behind a firewall/proxy server, everything should still &quot;just work&quot;.
In the rare cases when it doesn't, you can explicitly specify the proxy server
parameters through the <tt class="docutils literal"><span class="pre">--proxy</span></tt> option, e.g.:</p>
<pre class="literal-block">
python regression.py ... <strong>--proxy=http://www.someproxy.com:3128</strong>
</pre>
</div>
<div class="section">
<h1><a id="details" name="details">Details</a></h1>
<p>The regression run procedure will:</p>
<ul class="simple">
<li>Download the most recent tarball from <a class="reference" href="http://www.meta-comm.com/engineering/boost/snapshot/">http://www.meta-comm.com/engineering/boost/snapshot/</a>,
unpack it in the subdirectory <tt class="docutils literal"><span class="pre">boost</span></tt>.</li>
<li>Build <tt class="docutils literal"><span class="pre">bjam</span></tt> and <tt class="docutils literal"><span class="pre">process_jam_log</span></tt> if needed. (<tt class="docutils literal"><span class="pre">process_jam_log</span></tt> is an
utility, which extracts the test results from the log file produced by
Boost.Build).</li>
<li>Run regression tests, process and collect the results.</li>
<li>Upload the results to <a class="reference" href="ftp://fx.meta-comm.com/boost-regression">ftp://fx.meta-comm.com/boost-regression</a>.</li>
</ul>
<p>The report merger process running continuously on MetaCommunications site will
merge all submitted test runs and publish them at
<a class="reference" href="http://boost.sourceforge.net/regression-logs/developer">http://boost.sourceforge.net/regression-logs/developer</a>.</p>
</div>
<div class="section">
<h1><a id="advanced-use" name="advanced-use">Advanced use</a></h1>
<div class="section">
<h2><a id="providing-detailed-information-about-your-environment" name="providing-detailed-information-about-your-environment">Providing detailed information about your environment</a></h2>
<p>Once you have your regression results displayed in the Boost-wide
reports, you may consider providing a bit more information about
yourself and your test environment. This additional information will
be presented in the reports on a page associated with your runner ID.</p>
<p>By default, the page's content is just a single line coming from the
<tt class="docutils literal"><span class="pre">comment.html</span></tt> file in your <tt class="docutils literal"><span class="pre">regression.py</span></tt> directory, specifying
the tested platform. You can put online a more detailed description of
your environment, such as your hardware configuration, compiler builds,
and test schedule, by simply altering the file's content. Also, please
consider providing your name and email address for cases where Boost
developers have questions specific to your particular set of results.</p>
</div>
<div class="section">
<h2><a id="incremental-runs" name="incremental-runs">Incremental runs</a></h2>
<p>You can run <tt class="docutils literal"><span class="pre">regression.py</span></tt> in incremental mode <a class="footnote-reference" href="#incremental" id="id5" name="id5">[4]</a> by simply passing
it an identically named command-line flag:</p>
<pre class="literal-block">
python regression.py ... <strong>--incremental</strong>
</pre>
</div>
<div class="section">
<h2><a id="dealing-with-misbehaved-tests-compilers" name="dealing-with-misbehaved-tests-compilers">Dealing with misbehaved tests/compilers</a></h2>
<p>Depending on the environment/C++ runtime support library the test is compiled with,
a test failure/termination may cause an appearance of a dialog window, requiring
human intervention to proceed. Moreover, the test (or even of the compiler itself)
can fall into infinite loop, or simply run for too long. To allow <tt class="docutils literal"><span class="pre">regression.py</span></tt>
to take care of these obstacles, add the <tt class="docutils literal"><span class="pre">--monitored</span></tt> flag to the script
invocation:</p>
<pre class="literal-block">
python regression.py ... <strong>--monitored</strong>
</pre>
<p>That's it. Knowing your intentions, the script will be able to automatically deal
with the listed issues <a class="footnote-reference" href="#monitored" id="id6" name="id6">[5]</a>.</p>
</div>
<div class="section">
<h2><a id="getting-sources-from-cvs" name="getting-sources-from-cvs">Getting sources from CVS</a></h2>
<p>If you already have a CVS client installed and configured, you might prefer to get
the sources directly from the Boost CVS repository. To communicate this to the
script, you just need to pass it your SourceForge user ID using the <tt class="docutils literal"><span class="pre">--user</span></tt>
option; for instance:</p>
<pre class="literal-block">
python regression.py ... <strong>--user=agurtovoy</strong>
</pre>
<p>You can also specify the user as <tt class="docutils literal"><span class="pre">anonymous</span></tt>, requesting anonymous CVS access.
Note, though, that the files obtained this way tend to lag behind the actual CVS
state by several hours, sometimes up to twelve. By contrast, the tarball the script
downloads by default is at most one hour behind.</p>
</div>
<div class="section">
<h2><a id="integration-with-a-custom-driver-script" name="integration-with-a-custom-driver-script">Integration with a custom driver script</a></h2>
<p>Even if you've already been using a custom driver script, and for some
reason you don't want <tt class="docutils literal"><span class="pre">regression.py</span></tt> to take over of the entire test cycle,
getting your regression results into <a class="reference" href="http://www.boost.org/regression-logs/developer/">Boost-wide reports</a> is still easy!</p>
<p>In fact, it's just a matter of modifying your script to perform two straightforward
operations:</p>
<ol class="arabic">
<li><p class="first"><em>Timestamp file creation</em> needs to be done before the CVS update/checkout.
The file's location doesn't matter (nor does the content), as long as you know how
to access it later. Making your script to do something as simple as
<tt class="docutils literal"><span class="pre">echo</span> <span class="pre">&gt;timestamp</span></tt> would work just fine.</p>
</li>
<li><p class="first"><em>Collecting and uploading logs</em> can be done any time after <tt class="docutils literal"><span class="pre">process_jam_log</span></tt>' s
run, and is as simple as an invocation of the local copy of
<tt class="docutils literal"><span class="pre">$BOOST_ROOT/tools/regression/xsl_reports/runner/collect_and_upload_logs.py</span></tt>
script that was just obtained from the CVS with the rest of the sources.
You'd need to provide <tt class="docutils literal"><span class="pre">collect_and_upload_logs.py</span></tt> with the following three
arguments:</p>
<pre class="literal-block">
--locate-root directory to to scan for &quot;test_log.xml&quot; files
--runner runner ID (e.g. &quot;Metacomm&quot;)
--timestamp path to a file which modification time will be used
as a timestamp of the run (&quot;timestamp&quot; by default)
</pre>
<p>For example, assuming that the run's resulting binaries are in the
<tt class="docutils literal"><span class="pre">$BOOST_ROOT/bin</span></tt> directory (the default Boost.Build setup), the
<tt class="docutils literal"><span class="pre">collect_and_upload_logs.py</span></tt> invocation might look like this:</p>
<pre class="literal-block">
python $BOOST_ROOT/tools/regression/xsl_reports/runner/collect_and_upload_logs.py
--locate-root=$BOOST_ROOT/bin
--runner=Metacomm
--timestamp=timestamp
</pre>
</li>
<h2>Step by step instructions</h2>
<ol>
<li>Create a new directory for the branch you want to test.<br>
&nbsp;</li>
<li>Download the
<a href="http://svn.boost.org/svn/boost/trunk/tools/regression/src/run.py">
run.py</a> script into that directory.<br>
&nbsp;</li>
<li>Run &quot;<code>python run.py [options] [commands]</code>&quot;.</li>
</ol>
</div>
<div class="section">
<h2><a id="patching-boost-sources" name="patching-boost-sources">Patching Boost sources</a></h2>
<p>You might encounter an occasional need to make local modifications to
the Boost codebase before running the tests, without disturbing the
automatic nature of the regression process. To implement this under
<tt class="docutils literal"><span class="pre">regression.py</span></tt>:</p>
<ol class="arabic simple">
<li>Codify applying the desired modifications to the sources
located in the <tt class="docutils literal"><span class="pre">./boost</span></tt> subdirectory in a single executable
script named <tt class="docutils literal"><span class="pre">patch_boost</span></tt> (<tt class="docutils literal"><span class="pre">patch_boost.bat</span></tt> on Windows).</li>
<li>Place the script in the <tt class="docutils literal"><span class="pre">regression.py</span></tt> directory.</li>
</ol>
<p>The driver will check for the existence of the <tt class="docutils literal"><span class="pre">patch_boost</span></tt> script,
and, if found, execute it after obtaining the Boost sources.</p>
</div>
</div>
<div class="section">
<h1><a id="feedback" name="feedback">Feedback</a></h1>
<p>Please send all comments/suggestions regarding this document and the testing procedure
itself to the <a class="reference" href="http://lists.boost.org/mailman/listinfo.cgi/boost-testing">Boost Testing list</a>.</p>
</div>
<div class="section">
<h1><a id="notes" name="notes">Notes</a></h1>
<table class="docutils footnote" frame="void" id="runnerid1" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label"><a class="fn-backref" href="#id2" name="runnerid1">[1]</a></td><td>If you are running regressions interlacingly with a different
set of compilers (e.g. for Intel in the morning and GCC at the end of the day), you need
to provide a <em>different</em> runner id for each of these runs, e.g. <tt class="docutils literal"><span class="pre">your_name-intel</span></tt>, and
<tt class="docutils literal"><span class="pre">your_name-gcc</span></tt>.</td></tr>
</tbody>
</table>
<table class="docutils footnote" frame="void" id="runnerid2" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label"><a class="fn-backref" href="#id3" name="runnerid2">[2]</a></td><td>The limitations of the reports' format/medium impose a direct dependency
between the number of compilers you are testing with and the amount of space available
for your runner id. If you are running regressions for a single compiler, please make
sure to choose a short enough id that does not significantly disturb the reports' layout.</td></tr>
</tbody>
</table>
<table class="docutils footnote" frame="void" id="toolsets" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label"><a class="fn-backref" href="#id4" name="toolsets">[3]</a></td><td>If <tt class="docutils literal"><span class="pre">--toolsets</span></tt> option is not provided, the script will try to use the
platform's default toolset (<tt class="docutils literal"><span class="pre">gcc</span></tt> for most Unix-based systems).</td></tr>
</tbody>
</table>
<table class="docutils footnote" frame="void" id="incremental" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label"><a class="fn-backref" href="#id5" name="incremental">[4]</a></td><td><p class="first">By default, the script runs in what is known as <em>full mode</em>: on
each <tt class="docutils literal"><span class="pre">regression.py</span></tt> invocation all the files that were left in place by the
previous run -- including the binaries for the successfully built tests and libraries
-- are deleted, and everything is rebuilt once again from scratch. By contrast, in
<em>incremental mode</em> the already existing binaries are left intact, and only the
tests and libraries which source files has changed since the previous run are
re-built and re-tested.</p>
<p>The main advantage of incremental runs is a significantly shorter turnaround time,
but unfortunately they don't always produce reliable results. Some type of changes
to the codebase (changes to the bjam testing subsystem in particular)
often require switching to a full mode for one cycle in order to produce
trustworthy reports.</p>
<p class="last">As a general guideline, if you can afford it, testing in full mode is preferable.</p>
</td></tr>
</tbody>
</table>
<table class="docutils footnote" frame="void" id="monitored" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label"><a class="fn-backref" href="#id6" name="monitored">[5]</a></td><td>Note that at the moment this functionality is available only if you
are running on a Windows platform. Contributions are welcome!</td></tr>
</tbody>
</table>
</div>
</div>
</body>
</html>
<dl>
<dd>
<pre>commands: cleanup, collect-logs, get-source, get-tools, patch,
regression, setup, show-revision, test, test-clean, test-process,
test-run, update-source, upload-logs
options:
-h, --help show this help message and exit
--runner=RUNNER runner ID (e.g. 'Metacomm')
--comment=COMMENT an HTML comment file to be inserted in the
reports
--tag=TAG the tag for the results
--toolsets=TOOLSETS comma-separated list of toolsets to test with
--incremental do incremental run (do not remove previous
binaries)
--timeout=TIMEOUT specifies the timeout, in minutes, for a single
test run/compilation
--bjam-options=BJAM_OPTIONS
options to pass to the regression test
--bjam-toolset=BJAM_TOOLSET
bootstrap toolset for 'bjam' executable
--pjl-toolset=PJL_TOOLSET
bootstrap toolset for 'process_jam_log'
executable
--platform=PLATFORM
--user=USER Boost SVN user ID
--local=LOCAL the name of the boost tarball
--force-update=FORCE_UPDATE
do an SVN update (if applicable) instead of a
clean checkout, even when performing a full run
--have-source=HAVE_SOURCE
do neither a tarball download nor an SVN update;
used primarily for testing script changes
--proxy=PROXY HTTP proxy server address and port
(e.g.'<a rel="nofollow" href="http://www.someproxy.com:3128'" target="_top">http://www.someproxy.com:3128'</a>)
--ftp-proxy=FTP_PROXY
FTP proxy server (e.g. 'ftpproxy')
--dart-server=DART_SERVER
the dart server to send results to
--debug-level=DEBUG_LEVEL
debugging level; controls the amount of
debugging output printed
--send-bjam-log send full bjam log of the regression run
--mail=MAIL email address to send run notification to
--smtp-login=SMTP_LOGIN
STMP server address/login information, in the
following form:
&lt;user&gt;:&lt;password&gt;@&lt;host&gt;[:&lt;port&gt;]
--skip-tests=SKIP_TESTS
do not run bjam; used for testing script changes</pre>
</dd>
</dl>
<p>To test trunk use &quot;<code>--tag=trunk</code>&quot; (the default), and to test the
release use &quot;<code>--tag=branches/release</code>&quot;. Or substitute any Boost tree
of your choice.</p>
<hr>
<p>© Copyright Rene Rivera, 2007<br>
Distributed under the Boost Software License, Version 1.0. See
<a href="http://www.boost.org/LICENSE_1_0.txt">www.boost.org/LICENSE_1_0.txt</a></p>
<p>Revised
<!--webbot bot="Timestamp" S-Type="EDITED" S-Format="%B %d, %Y" startspan -->November 14, 2007<!--webbot bot="Timestamp" endspan i-checksum="39589" --> </font>
</p>
</body>

View file

@ -1,252 +0,0 @@
Running Boost Regression Tests
==============================
Requirements
------------
* Python 2.3 or higher
* Some spare disk space (~5 Gb per each tested compiler)
That's it! You don't even need a CVS client installed.
Installation
------------
* Download regression driver ``regression.py`` from here__ (http://tinyurl.com/uufav)
and put it in the directory where you want all the regression
test files to be placed.
__ http://boost.cvs.sourceforge.net/*checkout*/boost/boost/tools/regression/xsl_reports/runner/regression.py
* **Optional**: If you already have ``bjam`` and/or ``process_jam_log`` executables
you'd like to use, just put them in the same directory with ``regression.py``, e.g.:
.. parsed-literal::
my_boost_regressions/
regression.py
bjam\ *[.exe]*
Running tests
-------------
To start a regression run, simply run ``regression.py`` providing it with the following
two arguments:
- runner id (something unique of your choice that will identify your
results in the reports [#runnerid1]_, [#runnerid2]_)
- a particular set of toolsets you want to test with [#toolsets]_.
For example::
python regression.py --runner=Metacomm --toolsets=gcc,vc7
If you are interested in seeing all available options, run ``python regression.py``
or ``python regression.py --help``. See also the `Advanced use`_ section below.
**Note**: If you are behind a firewall/proxy server, everything should still "just work".
In the rare cases when it doesn't, you can explicitly specify the proxy server
parameters through the ``--proxy`` option, e.g.:
.. parsed-literal::
python regression.py ... **--proxy=http://www.someproxy.com:3128**
Details
-------
The regression run procedure will:
* Download the most recent tarball from http://www.meta-comm.com/engineering/boost/snapshot/,
unpack it in the subdirectory ``boost``.
* Build ``bjam`` and ``process_jam_log`` if needed. (``process_jam_log`` is an
utility, which extracts the test results from the log file produced by
Boost.Build).
* Run regression tests, process and collect the results.
* Upload the results to ftp://fx.meta-comm.com/boost-regression.
The report merger process running continuously on MetaCommunications site will
merge all submitted test runs and publish them at
http://boost.sourceforge.net/regression-logs/developer.
Advanced use
------------
Providing detailed information about your environment
.....................................................
Once you have your regression results displayed in the Boost-wide
reports, you may consider providing a bit more information about
yourself and your test environment. This additional information will
be presented in the reports on a page associated with your runner ID.
By default, the page's content is just a single line coming from the
``comment.html`` file in your ``regression.py`` directory, specifying
the tested platform. You can put online a more detailed description of
your environment, such as your hardware configuration, compiler builds,
and test schedule, by simply altering the file's content. Also, please
consider providing your name and email address for cases where Boost
developers have questions specific to your particular set of results.
Incremental runs
................
You can run ``regression.py`` in incremental mode [#incremental]_ by simply passing
it an identically named command-line flag:
.. parsed-literal::
python regression.py ... **--incremental**
Dealing with misbehaved tests/compilers
.......................................
Depending on the environment/C++ runtime support library the test is compiled with,
a test failure/termination may cause an appearance of a dialog window, requiring
human intervention to proceed. Moreover, the test (or even of the compiler itself)
can fall into infinite loop, or simply run for too long. To allow ``regression.py``
to take care of these obstacles, add the ``--monitored`` flag to the script
invocation:
.. parsed-literal::
python regression.py ... **--monitored**
That's it. Knowing your intentions, the script will be able to automatically deal
with the listed issues [#monitored]_.
Getting sources from CVS
........................
If you already have a CVS client installed and configured, you might prefer to get
the sources directly from the Boost CVS repository. To communicate this to the
script, you just need to pass it your SourceForge user ID using the ``--user``
option; for instance:
.. parsed-literal::
python regression.py ... **--user=agurtovoy**
You can also specify the user as ``anonymous``, requesting anonymous CVS access.
Note, though, that the files obtained this way tend to lag behind the actual CVS
state by several hours, sometimes up to twelve. By contrast, the tarball the script
downloads by default is at most one hour behind.
Integration with a custom driver script
.......................................
Even if you've already been using a custom driver script, and for some
reason you don't want ``regression.py`` to take over of the entire test cycle,
getting your regression results into `Boost-wide reports`__ is still easy!
In fact, it's just a matter of modifying your script to perform two straightforward
operations:
1. *Timestamp file creation* needs to be done before the CVS update/checkout.
The file's location doesn't matter (nor does the content), as long as you know how
to access it later. Making your script to do something as simple as
``echo >timestamp`` would work just fine.
2. *Collecting and uploading logs* can be done any time after ``process_jam_log``' s
run, and is as simple as an invocation of the local copy of
``$BOOST_ROOT/tools/regression/xsl_reports/runner/collect_and_upload_logs.py``
script that was just obtained from the CVS with the rest of the sources.
You'd need to provide ``collect_and_upload_logs.py`` with the following three
arguments::
--locate-root directory to to scan for "test_log.xml" files
--runner runner ID (e.g. "Metacomm")
--timestamp path to a file which modification time will be used
as a timestamp of the run ("timestamp" by default)
For example, assuming that the run's resulting binaries are in the
``$BOOST_ROOT/bin`` directory (the default Boost.Build setup), the
``collect_and_upload_logs.py`` invocation might look like this::
python $BOOST_ROOT/tools/regression/xsl_reports/runner/collect_and_upload_logs.py
--locate-root=$BOOST_ROOT/bin
--runner=Metacomm
--timestamp=timestamp
__ http://www.boost.org/regression-logs/developer/
Patching Boost sources
......................
You might encounter an occasional need to make local modifications to
the Boost codebase before running the tests, without disturbing the
automatic nature of the regression process. To implement this under
``regression.py``:
1. Codify applying the desired modifications to the sources
located in the ``./boost`` subdirectory in a single executable
script named ``patch_boost`` (``patch_boost.bat`` on Windows).
2. Place the script in the ``regression.py`` directory.
The driver will check for the existence of the ``patch_boost`` script,
and, if found, execute it after obtaining the Boost sources.
Feedback
--------
Please send all comments/suggestions regarding this document and the testing procedure
itself to the `Boost Testing list`__.
__ http://lists.boost.org/mailman/listinfo.cgi/boost-testing
Notes
-----
.. [#runnerid1] If you are running regressions interlacingly with a different
set of compilers (e.g. for Intel in the morning and GCC at the end of the day), you need
to provide a *different* runner id for each of these runs, e.g. ``your_name-intel``, and
``your_name-gcc``.
.. [#runnerid2] The limitations of the reports' format/medium impose a direct dependency
between the number of compilers you are testing with and the amount of space available
for your runner id. If you are running regressions for a single compiler, please make
sure to choose a short enough id that does not significantly disturb the reports' layout.
.. [#toolsets] If ``--toolsets`` option is not provided, the script will try to use the
platform's default toolset (``gcc`` for most Unix-based systems).
.. [#incremental] By default, the script runs in what is known as *full mode*: on
each ``regression.py`` invocation all the files that were left in place by the
previous run -- including the binaries for the successfully built tests and libraries
-- are deleted, and everything is rebuilt once again from scratch. By contrast, in
*incremental mode* the already existing binaries are left intact, and only the
tests and libraries which source files has changed since the previous run are
re-built and re-tested.
The main advantage of incremental runs is a significantly shorter turnaround time,
but unfortunately they don't always produce reliable results. Some type of changes
to the codebase (changes to the bjam testing subsystem in particular)
often require switching to a full mode for one cycle in order to produce
trustworthy reports.
As a general guideline, if you can afford it, testing in full mode is preferable.
.. [#monitored] Note that at the moment this functionality is available only if you
are running on a Windows platform. Contributions are welcome!

View file

@ -32,9 +32,9 @@ boost_root = os.path.join( regression_root, 'boost' )
xsl_reports_dir = os.path.join( boost_root, 'tools', 'regression', 'xsl_reports' )
timestamp_path = os.path.join( regression_root, 'timestamp' )
cvs_command_line = 'cvs -z9 %(command)s'
cvs_ext_command_line = 'cvs -d:ext:%(user)s@boost.cvs.sourceforge.net:/cvsroot/boost -z9 %(command)s'
cvs_pserver_command_line = 'cvs -d:pserver:%(user)s@boost.cvs.sourceforge.net:/cvsroot/boost -z9 %(command)s'
svn_anonymous_command_line = 'svn %(command)s'
svn_command_line = 'svn --non-interactive --username=%(user)s %(command)s'
bjam = {}
process_jam_log = {}
@ -43,10 +43,10 @@ process_jam_log = {}
if sys.platform == 'win32':
bjam[ 'name' ] = 'bjam.exe'
bjam[ 'build_cmd' ] = lambda toolset, v2: bjam_build_script_cmd( 'build.bat %s' % toolset )
bjam[ 'is_supported_toolset' ] = lambda x: x in [ 'borland', 'como', 'gcc', 'gcc-nocygwin' \
, 'intel-win32', 'metrowerks', 'mingw' \
, 'msvc', 'vc7' \
]
bjam[ 'is_supported_toolset' ] = lambda x: x in [ \
'borland', 'como', 'gcc', 'gcc-nocygwin', 'intel-win32', 'metrowerks', 'mingw', \
'msvc', 'vc7', 'vc8' \
]
process_jam_log[ 'name' ] = 'process_jam_log.exe'
def default_toolset(v2):
@ -60,11 +60,10 @@ if sys.platform == 'win32':
else:
bjam[ 'name' ] = 'bjam'
bjam[ 'build_cmd' ] = lambda toolset, v2: bjam_build_script_cmd( './build.sh %s' % toolset )
bjam[ 'is_supported_toolset' ] = lambda x: x in [ 'acc', 'como', 'darwin', 'gcc' \
, 'intel-linux', 'kcc', 'kylix' \
, 'mipspro', 'sunpro', 'tru64cxx' \
, 'vacpp'\
]
bjam[ 'is_supported_toolset' ] = lambda x: x in [ \
'acc', 'como', 'darwin', 'gcc', 'intel-linux', 'kcc', 'kylix', 'mipspro', \
'pathscale', 'pgi', 'qcc', 'sun', 'sunpro', 'tru64cxx', 'vacpp' \
]
process_jam_log[ 'name' ] = 'process_jam_log'
process_jam_log[ 'default_toolset' ] = lambda x: 'gcc'
patch_boost_name = 'patch_boost'
@ -175,6 +174,7 @@ def http_get( source_url, destination, proxy ):
def tarball_name_for_tag( tag, timestamp = False ):
tag = tag.split( '/' )[-1]
if not timestamp: return 'boost-%s.tar.bz2' % tag
else: return 'boost-%s.timestamp' % tag
@ -182,7 +182,7 @@ def tarball_name_for_tag( tag, timestamp = False ):
def download_boost_tarball( destination, tag, proxy, timestamp_only = False ):
tarball_name = tarball_name_for_tag( tag, timestamp_only )
tarball_path = os.path.join( destination, tarball_name )
tarball_url = 'http://engineering.meta-comm.com/boost/snapshot/%s' % tarball_name
tarball_url = 'http://beta.boost.org/development/snapshot.php/%s' % tag
log( 'Downloading "%s" to "%s"...' % ( tarball_url, os.path.dirname( tarball_path ) ) )
if os.path.exists( tarball_path ):
@ -256,38 +256,32 @@ def unpack_tarball( tarball_path, destination ):
os.rename( boost_dir, boost_root )
def cvs_command( user, command ):
if user is None:
cmd = cvs_command_line % { 'command': command }
elif user == 'anonymous':
cmd = cvs_pserver_command_line % { 'user': user, 'command': command }
def svn_command( user, command ):
if user is None or user == 'anonymous':
cmd = svn_anonymous_command_line % { 'command': command }
else:
cmd = cvs_ext_command_line % { 'user': user, 'command': command }
cmd = svn_command_line % { 'user': user, 'command': command }
log( 'Executing CVS command "%s"' % cmd )
log( 'Executing SVN command "%s"' % cmd )
rc = os.system( cmd )
if rc != 0:
raise Exception( 'CVS command "%s" failed with code %d' % ( cmd, rc ) )
raise Exception( 'SVN command "%s" failed with code %d' % ( cmd, rc ) )
def cvs_checkout( user, tag, args ):
if tag != 'CVS-HEAD':
command = 'checkout -r %s boost' % tag
else:
command = 'checkout boost'
def svn_repository_url( user, tag ):
if user != 'anonymous': return 'https://svn.boost.org/svn/boost/%s' % tag
else: return 'http://svn.boost.org/svn/boost/%s' % tag
def svn_checkout( user, tag, args ):
command = 'co %s boost' % svn_repository_url( user, tag )
os.chdir( regression_root )
cvs_command( user, command )
svn_command( user, command )
def cvs_update( user, tag, args ):
if tag != 'CVS-HEAD':
command = 'update -dPA -r %s' % tag
else:
command = 'update -dPA'
os.chdir( os.path.join( regression_root, 'boost' ) )
cvs_command( user, command )
def svn_update( user, tag, args ):
os.chdir( boost_root )
svn_command( user, 'update' )
def format_time( t ):
@ -331,7 +325,7 @@ def get_source( user, tag, proxy, args, **unused ):
if user is not None:
retry(
cvs_checkout
svn_checkout
, ( user, tag, args )
)
else:
@ -342,11 +336,11 @@ def get_source( user, tag, proxy, args, **unused ):
def update_source( user, tag, proxy, args, **unused ):
if user is not None or os.path.exists( os.path.join( boost_root, 'CVS' ) ):
if user is not None or os.path.exists( os.path.join( boost_root, '.svn' ) ):
open( timestamp_path, 'w' ).close()
log( 'Updating sources from CVS (%s)...' % timestamp() )
log( 'Updating sources from SVN (%s)...' % timestamp() )
retry(
cvs_update
svn_update
, ( user, tag, args )
)
else:
@ -472,6 +466,7 @@ def setup(
log( 'Warning: Test monitoring is not supported on this platform (yet).' )
log( ' Please consider contributing this piece!' )
def bjam_build_script_cmd( cmd ):
env_setup_key = 'BJAM_ENVIRONMENT_SETUP'
if os.environ.has_key( env_setup_key ):
@ -483,9 +478,11 @@ def bjam_build_script_cmd( cmd ):
def bjam_command( toolsets, v2 ):
build_path = regression_root
if build_path[-1] == '\\': build_path += '\\'
v2_option = ""
if v2:
v2_option = "--v2"
result = '"%s" %s "-sBOOST_BUILD_PATH=%s" "-sBOOST_ROOT=%s"'\
% (
tool_path( bjam, v2 )
@ -494,7 +491,7 @@ def bjam_command( toolsets, v2 ):
, boost_root
)
if not toolsets is None:
if toolsets:
if v2:
result += ' ' + string.join(string.split( toolsets, ',' ), ' ' )
else:
@ -604,6 +601,7 @@ def test(
if monitored:
stop_build_monitor()
def build_book( **kargs ):
# To do
# 1. PDF generation
@ -629,6 +627,8 @@ def collect_logs(
, user
, comment
, incremental
, dart_server
, ftp_proxy
, args
, **unused
):
@ -649,13 +649,26 @@ def collect_logs(
else: run_type = 'full'
source = 'tarball'
cvs_root_file = os.path.join( boost_root, 'CVS', 'root' )
if os.path.exists( cvs_root_file ):
if string.split( open( cvs_root_file ).readline(), '@' )[0] == ':pserver:anonymous':
source = 'anonymous CVS'
else:
source = 'CVS'
revision = ''
svn_root_file = os.path.join( boost_root, '.svn' )
svn_info_file = os.path.join( boost_root, 'svn_info.txt' )
if os.path.exists( svn_root_file ):
source = 'SVN'
svn_command( 'user', 'info --xml ' + boost_root + ' >' + svn_info_file )
if os.path.exists( svn_info_file ):
f = open( svn_info_file, 'r' )
svn_info = f.read()
f.close()
i = svn_info.find( 'Revision:' )
if i < 0: i = svn_info.find( 'revision=' ) # --xml format
if i >= 0:
i += 10
while svn_info[i] >= '0' and svn_info[i] <= '9':
revision += svn_info[i]
i += 1
from runner import collect_logs
collect_logs(
regression_results
@ -667,8 +680,12 @@ def collect_logs(
, user
, source
, run_type
, dart_server
, ftp_proxy
, revision
)
def collect_book( **unused ):
log( 'Collecting files for BoostBook into "%s"...' % boostbook_archive_name )
import zipfile
@ -682,9 +699,11 @@ def collect_book( **unused ):
for name in names:
path = os.path.join( dirname, name )
if not os.path.isdir( path ):
boostbook_archive.write( path, path[ len(html_root) + 1: ] )
boostbook_archive.write( path, path[ len( html_root ) + 1: ] )
os.path.walk( html_root, add_files, None )
def upload_logs(
tag
, runner
@ -692,35 +711,36 @@ def upload_logs(
, ftp_proxy
, debug_level
, send_bjam_log
, dart_server
, **unused
):
import_utils()
from runner import upload_logs
retry(
upload_logs
, ( regression_results, runner, tag, user, ftp_proxy, debug_level, send_bjam_log, timestamp_path )
, ( regression_results, runner, tag, user, ftp_proxy, debug_level,
send_bjam_log, timestamp_path, dart_server )
)
def upload_book( tag, runner, ftp_proxy, debug_level, **unused ):
import_utils()
from runner import upload_to_ftp
upload_to_ftp( tag, boostbook_archive_name, ftp_proxy, debug_level )
def update_itself( tag, **unused ):
source = os.path.join( xsl_reports_dir, 'runner', os.path.basename( sys.argv[0] ) )
self = os.path.join( regression_root, os.path.basename( sys.argv[0] ) )
# Through revision 38985, the update copy was not done if
# os.stat(self).st_mtime > os.stat(source).st_mtime. This was not
# reliable on all systems, so the copy is now done unconditionally.
log( ' Saving a backup copy of the current script...' )
os.chmod( self, stat.S_IWRITE ) # Win32 workaround
shutil.move( self, '%s~' % self )
log( 'Updating %s from %s...' % ( self, source ) )
log( ' Checking modification dates...' )
if os.stat( self ).st_mtime > os.stat( source ).st_mtime:
log( 'Warning: The current version of script appears to be newer than the source.' )
log( ' Update skipped.' )
else:
log( ' Saving a backup copy of the current script...' )
os.chmod( self, stat.S_IWRITE ) # Win32 workaround
shutil.move( self, '%s~' % self )
log( ' Replacing %s with a newer version...' % self )
shutil.copy2( source, self )
shutil.copy2( source, self )
def send_mail( smtp_login, mail, subject, msg = '', debug_level = 0 ):
@ -761,6 +781,8 @@ def regression(
, incremental
, send_bjam_log
, force_update
, have_source
, skip_tests
, monitored
, timeout
, mail = None
@ -769,6 +791,7 @@ def regression(
, ftp_proxy = None
, debug_level = 0
, v2 = 1
, dart_server = None
, args = []
):
@ -792,6 +815,8 @@ def regression(
log( 'Tag: "%s"' % tag )
unpack_tarball( local, regression_root )
elif have_source:
if not incremental: cleanup( [ 'bin' ] )
else:
if incremental or force_update:
if not incremental: cleanup( [ 'bin' ] )
@ -802,16 +827,18 @@ def regression(
setup( comment, toolsets, book, bjam_toolset, pjl_toolset, monitored, proxy,
v2, [] )
# Not specifying --toolset in command line is not enough
# that would mean to use Boost.Build default ones
# We can skip test only we were explictly
# told to have no toolsets in command line "--toolset="
if toolsets != '': # --toolset=,
test( toolsets, bjam_options, monitored, timeout, v2, [] )
collect_logs( tag, runner, platform, user, comment, incremental, [] )
upload_logs( tag, runner, user, ftp_proxy, debug_level, send_bjam_log )
if toolsets != '': # --toolset=,
if not skip_tests: test( toolsets, bjam_options, monitored, timeout, v2, [] )
collect_logs( tag, runner, platform, user, comment, incremental, dart_server, proxy, [] )
upload_logs( tag, runner, user, ftp_proxy, debug_level, send_bjam_log, dart_server )
if book:
build_book()
build_book()
collect_book()
upload_book( tag, runner, ftp_proxy, debug_level )
@ -847,7 +874,7 @@ def show_revision( **unused ):
import re
re_keyword_value = re.compile( r'^\$\w+:\s+(.*)\s+\$$' )
print '\n\tResivion: %s' % re_keyword_value.match( revision ).group( 1 )
print '\n\tRevision: %s' % re_keyword_value.match( revision ).group( 1 )
print '\tLast modified on: %s\n' % re_keyword_value.match( modified ).group( 1 )
@ -872,15 +899,18 @@ def accept_args( args ):
, 'debug-level='
, 'incremental'
, 'force-update'
, 'have-source'
, 'skip-tests'
, 'dont-send-bjam-log'
, 'monitored'
, 'help'
, 'v2'
, 'v1'
, 'dart-server='
]
options = {
'--tag' : 'CVS-HEAD'
'--tag' : 'trunk'
, '--local' : None
, '--platform' : platform_name()
, '--user' : None
@ -896,6 +926,7 @@ def accept_args( args ):
, '--proxy' : None
, '--debug-level' : 0
, '--ftp-proxy' : None
, '--dart-server' : 'beta.boost.org:8081'
}
( option_pairs, other_args ) = getopt.getopt( args, '', args_spec )
@ -920,6 +951,8 @@ def accept_args( args ):
, 'incremental' : options.has_key( '--incremental' )
, 'send_bjam_log' : not options.has_key( '--dont-send-bjam-log' )
, 'force_update' : options.has_key( '--force-update' )
, 'have_source' : options.has_key( '--have-source' )
, 'skip_tests' : options.has_key( '--skip-tests' )
, 'monitored' : options.has_key( '--monitored' )
, 'timeout' : options[ '--timeout' ]
, 'mail' : options[ '--mail' ]
@ -928,6 +961,7 @@ def accept_args( args ):
, 'ftp_proxy' : options[ '--ftp-proxy' ]
, 'debug_level' : int(options[ '--debug-level' ])
, 'v2' : not options.has_key( '--v1' )
, 'dart_server' : options[ '--dart-server' ]
, 'args' : other_args
}
@ -956,20 +990,23 @@ Commands:
Options:
\t--runner runner ID (e.g. 'Metacomm')
\t--tag the tag for the results ('CVS-HEAD' by default)
\t--tag the tag for the results ('trunk' by default)
\t--local the name of the boost tarball
\t--comment an HTML comment file to be inserted in the reports
\t ('comment.html' by default)
\t--incremental do incremental run (do not remove previous binaries)
\t--dont-send-bjam-log
\t do not send full bjam log of the regression run
\t--force-update do a CVS update (if applicable) instead of a clean
\t--force-update do an SVN update (if applicable) instead of a clean
\t checkout, even when performing a full run
\t--have-source do neither a tarball download nor an SVN update;
\t used primarily for testing script changes
\t--skip-tests do no run bjam; used for testing script changes
\t--monitored do a monitored run
\t--timeout specifies the timeout, in minutes, for a single test
\t run/compilation (enforced only in monitored runs, 5 by
\t default)
\t--user SourceForge user name for a shell/CVS account (optional)
\t--user Boost SVN user ID (optional)
\t--toolsets comma-separated list of toolsets to test with (optional)
\t--book build BoostBook (optional)
\t--bjam-options options to pass to the regression test (optional)
@ -986,6 +1023,7 @@ Options:
\t output printed; 0 by default (no debug output)
\t--v1 Use Boost.Build V1
\t--v2 Use Boost.Build V2 (default)
\t--dart-server The dart server to send results to.
''' % '\n\t'.join( commands.keys() )
print 'Example:\n\t%s --runner=Metacomm\n' % os.path.basename( sys.argv[0] )

View file

@ -91,7 +91,8 @@ def make_test_log( xml_generator
def make_expicit_failure_markup( num_of_libs, num_of_toolsets, num_of_tests ):
g = xml.sax.saxutils.XMLGenerator( open( "explicit-failures-markup.xml", "w" ) )
g = xml.sax.saxutils.XMLGenerator( open( "explicit-failures-markup.xml", "w" ), "utf-8" )
g.startDocument()
g.startElement( "explicit-failures-markup", {} );
# required toolsets
@ -108,7 +109,7 @@ def make_expicit_failure_markup( num_of_libs, num_of_toolsets, num_of_tests ):
if i_toolset % 2 == 1:
g.startElement( "toolset", { "name": make_toolset_name( i_toolset ) } )
g.endElement( "toolset" )
g.startElement( "note", { "author": "T. Test" } )
g.startElement( "note", { "author": u"T. T\xe8st" } )
g.characters( "Test note" )
g.endElement( "note" )
g.endElement( "mark-unusable" )
@ -145,7 +146,7 @@ def make_expicit_failure_markup( num_of_libs, num_of_toolsets, num_of_tests ):
g.startElement( "toolset", { "name": make_toolset_name( 2 ) } )
g.endElement( "toolset" )
g.startElement( "note", { "author": "V. Annotated" } )
g.startElement( "note", { "author": u"V. Ann\xf3tated" } )
g.characters( "Some thoughtful note" )
g.endElement( "note" )
@ -156,6 +157,7 @@ def make_expicit_failure_markup( num_of_libs, num_of_toolsets, num_of_tests ):
g.endElement( "explicit-failures-markup" )
g.endDocument()
def make_expected_results( num_of_libs, num_of_toolsets, num_of_tests ):

View file

@ -51,8 +51,9 @@ def make_test_results():
os.makedirs( results_directory )
for i_runner in range( 0, num_of_runners ):
runner_id = "runner_%02d" % i_runner
g = xml.sax.saxutils.XMLGenerator( open( os.path.join( results_directory, runner_id + ".xml" ), "w" ) )
runner_id = "runner %02d" % i_runner
g = xml.sax.saxutils.XMLGenerator( open( os.path.join( results_directory, runner_id + ".xml" ), "w" ), "utf-8" )
g.startDocument()
if i_runner % 2:
platform = "Win32"
else:
@ -63,6 +64,7 @@ def make_test_results():
, "timestamp": common.format_timestamp(
time.gmtime( time.time() - i_runner * 24*60*60 )
)
, "revision": '%d' % ( 7000 + i_runner )
, "source": test_run_source( i_runner )
, "run-type": test_run_type( i_runner )
} )
@ -92,7 +94,7 @@ def make_test_results():
continue
for i_toolset in range( num_of_toolsets ):
toolset_name = "toolset_%02d" % ( i_toolset )
toolset_name = "toolset %02d" % ( i_toolset )
if num_of_runners - 1 == i_runner and i_toolset % 2:
continue
@ -129,7 +131,7 @@ def make_test_results():
, show_run_output = show_run_output
, variant = variant )
g.endElement( "test-run" )
g.endDocument()

View file

@ -1,87 +0,0 @@
import new
import os
import unittest
import sys
sys.path.append( os.path.normpath( os.path.join( os.path.abspath( os.path.dirname( __file__ ) ), '../..' ) ) )
import utils.libxslt
#1. collect test_cases
#2. Add test cases to test suite
#3. Run test cases
class test_case_definition:
def __init__(self, root, name ):
self._root = root
self._name = name
def root(self):
return self._root
def name(self):
return self._name
def path(self):
return os.path.join( self._root, self._name )
def xsl_driver( self ):
return os.path.join( self.path(), 'test-driver.xsl' )
def xml_data_files( self ):
data_files = [ x for x in os.listdir( self.path() ) if os.path.splitext( x )[1] == '.xml' ]
return data_files
def collect_test_cases():
test_cases_dir = os.path.join( os.path.dirname( __file__ ) )
test_cases = os.listdir( test_cases_dir )
def is_disabled( test_case ):
return os.path.exists( os.path.join( test_case, '.disabled' ) )
return [ test_case_definition( test_cases_dir, x ) for x in test_cases if os.path.isdir( os.path.join( test_cases_dir, x ) ) and not is_disabled(x) ]
def log( msg ):
print msg
def read_file_lines( path ):
f = open( path )
try:
return f.readlines()
finally:
f.close()
def write_file_lines( path, content ):
f = open( path, 'w' )
try:
return f.writelines( content )
finally:
f.close()
def make_test_case( test_case ):
def test_case_method( self ):
for test_case_xml in test_case.xml_data_files():
print test_case_xml
result_file_name = os.path.splitext( os.path.basename( test_case_xml ) )[0] + '.xml'
( result_file, expected_file ) = [ os.path.join( test_case.path(), x, result_file_name ) for x in ( 'actual', 'expected' ) ]
print ( result_file, expected_file )
saved_cwd = os.getcwd()
try:
os.chdir( test_case.path() )
utils.libxslt( log, test_case_xml, test_case.xsl_driver(), result_file )
finally:
os.chdir( saved_cwd )
self.failUnlessEqual( read_file_lines( expected_file ), read_file_lines( result_file ) )
test_case_method.__name__ = test_case.name()
return test_case_method
class test_components(unittest.TestCase):
pass
if __name__ == '__main__':
for test_case in collect_test_cases():
print test_case.name()
setattr( test_components, test_case.name(), make_test_case(test_case) )
unittest.main()

View file

@ -1,15 +0,0 @@
<tests>
<test-log library="mpl" test-name="apply" test-type="compile" test-program="libs/mpl/test/apply.cpp" target-directory="boost/bin.v2/libs/mpl/test/apply.test/gcc-4.1.1_sunos_i86pc/debug/debug-symbols-off" toolset="gcc-4.1.1_sunos_i86pc" show-run-output="false">
<compile result="fail" timestamp="2007-01-28 11:58:42 UTC">
"/openpkg/bin/g++" -ftemplate-depth-128 -O0 -fno-inline -Wall -fPIC -DBOOST_ALL_NO_LIB=1 -I".." -c -o "/home/cae/boost-regression/RC_1_34_0/results/boost/bin.v2/libs/mpl/test/apply.test/gcc-4.1.1_sunos_i86pc/debug/debug-symbols-off/apply.o" "../libs/mpl/test/apply.cpp"
../boost/mpl/aux_/preprocessed/gcc/template_arity.hpp: In instantiation of 'boost::mpl::aux::template_arity&lt;T1&gt;':
../libs/mpl/test/apply.cpp:63: instantiated from here
../boost/mpl/aux_/preprocessed/gcc/template_arity.hpp:98: internal compiler error: Segmentation Fault
Please submit a full bug report,
with preprocessed source if appropriate.
See &lt;URL:http://www.openpkg.org/&gt; for instructions.
</compile>
</test-log>
</tests>

View file

@ -1,31 +0,0 @@
<tests>
<test-log library="algorithm/minmax" test-program="libs/algorithm/minmax/test/minmax_test.cpp" show-run-output="false" toolset="msvc-6.5~release" test-type="run" test-name="minmax" target-directory="boost/bin.v2/libs/algorithm/minmax/test/minmax.test/msvc-6.5~release/release/threading-multi" result="success" expected-result="success" expected-reason="" status="expected" is-new="yes" category="0"><notes/>
<compile timestamp="2007-01-16 06:43:40 UTC" result="succeed">
call "c:\Program Files\Microsoft Visual Studio\VC98\Bin\VCVARS32.BAT" &gt;nul
cl /Zm800 -nologo @"g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax_test.obj.rsp"
minmax_test.cpp
file g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax.exe.rsp
"g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax_test.obj"
</compile>
<link timestamp="2007-01-16 06:43:40 UTC" result="succeed">
call "c:\Program Files\Microsoft Visual Studio\VC98\Bin\VCVARS32.BAT" &gt;nul
link /NOLOGO /INCREMENTAL:NO /subsystem:console /out:"g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax.exe" @"g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax.exe.rsp"
if %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
if exist "g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax.exe.manifest" (
mt -nologo -manifest "g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax.exe.manifest" "-outputresource:g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax.exe;1"
)
</link>
<run timestamp="2007-01-16 06:43:40 UTC" result="succeed">
Running 1 test case...
*** No errors detected
EXIT STATUS: 0
</run>
</test-log>
</tests>

View file

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<tests>
<result name="apply" target-directory="boost/bin.v2/libs/mpl/test/apply.test/gcc-4.1.1_sunos_i86pc/debug/debug-symbols-off" result="true"/>
</tests>

View file

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<tests>
<result name="minmax" target-directory="boost/bin.v2/libs/algorithm/minmax/test/minmax.test/msvc-6.5~release/release/threading-multi" result="true"/>
</tests>

View file

@ -1,5 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<tests>
<result name="" target-directory="boost/bin.v2/libs/date_time/build/borland-5.8.2/debug" result="true"/>
<result name="" target-directory="boost/bin.v2/libs/date_time/build/borland-5.8.2/debug" result="false"/>
</tests>

View file

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<tests>
<result name="config_link_test" target-directory="status/config_link_test.test/borland-5.8.2/debug" result="false"/>
</tests>

View file

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<tests>
<result name="minmax" target-directory="boost/bin.v2/libs/algorithm/minmax/test/minmax.test/msvc-6.5~release/release/threading-multi" result="false"/>
</tests>

View file

@ -1,6 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<tests>
<result name="python_test" target-directory="boost/bin.v2/libs/parameter/test/python_test.test/msvc-8.0/rls/adrs-mdl-64/instr-set-optrn/thrd-mlt" result="true"/>
<result name="python_test" target-directory="boost/bin.v2/libs/parameter/test/python_test.test/msvc-8.0/rls/adrs-mdl-64/instr-set-optrn/thrd-mlt" result="false"/>
</tests>

View file

@ -1,5 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<tests>
<result name="" target-directory="libs/config/test/link/borland-5.8.2/debug" result="true"/>
<result name="config_link_test" target-directory="status/config_link_test.test/borland-5.8.2/debug" result="true"/>
</tests>

View file

@ -1,42 +0,0 @@
<tests>
<test-log library="date_time" test-program="" show-run-output="true" toolset="borland-5.8.2" test-type="lib" test-name="" target-directory="boost/bin.v2/libs/date_time/build/borland-5.8.2/debug">
<compile timestamp="2007-01-30 00:50:48 UTC" result="succeed">
"C:/Programme/Borland/BDS/4.0/Bin/bcc32.exe" -j5 -g255 -q -c -P -Ve -Vx -a8 -b- -IX:/spirit-1.6.3/boost -v -Od -tWC -tWR -tWC -tWD -WM- -DBOOST_ALL_DYN_LINK=1 -DBOOST_ALL_NO_LIB=1 -DDATE_TIME_INLINE -I".." -I"C:/Programme/Borland/BDS/4.0/include/" -o"X:\boost-regression\results\boost\bin.v2\libs\date_time\build\borland-5.8.2\debug\date_generators.obj" "..\libs\date_time\src\gregorian\date_generators.cpp"
..\libs\date_time\src\gregorian\date_generators.cpp:
file X:\boost-regression\results\boost\bin.v2\libs\date_time\build\borland-5.8.2\debug\boost_date_time-bcb-d-1_34.dll.rsp
"X:\boost-regression\results\boost\bin.v2\libs\date_time\build\borland-5.8.2\debug\greg_month.obj"
"X:\boost-regression\results\boost\bin.v2\libs\date_time\build\borland-5.8.2\debug\greg_weekday.obj"
"X:\boost-regression\results\boost\bin.v2\libs\date_time\build\borland-5.8.2\debug\date_generators.obj"
</compile>
<link timestamp="2007-01-30 00:50:48 UTC" result="succeed">
set "PATH=C:/Programme/Borland/BDS/4.0/bin/;%PATH%"
"C:/Programme/Borland/BDS/4.0/Bin/bcc32.exe" -v -q -v -v -tWD -tWC -tWR -tWC -tWD -WM- -L"C:/Programme/Borland/BDS/4.0/lib" -e"X:\boost-regression\results\boost\bin.v2\libs\date_time\build\borland-5.8.2\debug\boost_date_time-bcb-d-1_34.dll" @"X:\boost-regression\results\boost\bin.v2\libs\date_time\build\borland-5.8.2\debug\boost_date_time-bcb-d-1_34.dll.rsp" &amp;&amp; "C:/Programme/Borland/BDS/4.0/bin/implib" "X:\boost-regression\results\boost\bin.v2\libs\date_time\build\borland-5.8.2\debug\boost_date_time-bcb-d-1_34.lib" "X:\boost-regression\results\boost\bin.v2\libs\date_time\build\borland-5.8.2\debug\boost_date_time-bcb-d-1_34.dll"
Borland Implib Version 3.0.22 Copyright (c) 1991, 2000 Inprise Corporation
</link>
</test-log>
<test-log library="date_time" test-program="" show-run-output="true" toolset="borland-5.8.2" test-type="lib" test-name="" target-directory="boost/bin.v2/libs/date_time/build/borland-5.8.2/debug">
<compile timestamp="2007-01-30 00:50:48 UTC" result="succeed">
"C:/Programme/Borland/BDS/4.0/Bin/bcc32.exe" -j5 -g255 -q -c -P -Ve -Vx -a8 -b- -IX:/spirit-1.6.3/boost -v -Od -tWC -tWR -tWC -tWD -WM- -DBOOST_ALL_DYN_LINK=1 -DBOOST_ALL_NO_LIB=1 -DDATE_TIME_INLINE -I".." -I"C:/Programme/Borland/BDS/4.0/include/" -o"X:\boost-regression\results\boost\bin.v2\libs\date_time\build\borland-5.8.2\debug\date_generators.obj" "..\libs\date_time\src\gregorian\date_generators.cpp"
..\libs\date_time\src\gregorian\date_generators.cpp:
file X:\boost-regression\results\boost\bin.v2\libs\date_time\build\borland-5.8.2\debug\boost_date_time-bcb-d-1_34.dll.rsp
"X:\boost-regression\results\boost\bin.v2\libs\date_time\build\borland-5.8.2\debug\greg_month.obj"
"X:\boost-regression\results\boost\bin.v2\libs\date_time\build\borland-5.8.2\debug\greg_weekday.obj"
"X:\boost-regression\results\boost\bin.v2\libs\date_time\build\borland-5.8.2\debug\date_generators.obj"
</compile>
</test-log>
</tests>

View file

@ -1,16 +0,0 @@
<tests>
<test-log library="config" test-name="config_link_test" test-type="run" test-program="libs/config/test/link/main.cpp" target-directory="status/config_link_test.test/borland-5.8.2/debug" toolset="borland-5.8.2" show-run-output="false">
<compile result="succeed" timestamp="xxx">
"c:/progra~1/borland/bds/4.0/bin/bcc32.exe" -j5 -g255 -q -c -P -Ve -Vx -a8 -b- -v -Od -tWC -tWR -tWC -WM- -DBOOST_ALL_NO_LIB=1 -DBOOST_CONFIG_NO_LIB=1 -DBOOST_DYN_LINK=1 -I".." -I"c:/progra~1/borland/bds/4.0/include/" -o"C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\status\config_link_test.test\borland-5.8.2\debug\main.obj" "g:\boost\rc-1-34\boost\libs\config\test\link\main.cpp"
g:\boost\rc-1-34\boost\libs\config\test\link\main.cpp:
</compile>
<link result="fail" timestamp="xxx">
set "PATH=c:/progra~1/borland/bds/4.0/bin/;%PATH%"
"c:/progra~1/borland/bds/4.0/bin/bcc32.exe" -v -q -v -v -tWD -tWC -tWR -tWC -WM- -L"c:/progra~1/borland/bds/4.0/lib" -e"C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\status\config_link_test.test\borland-5.8.2\debug\config_link_test.exe" @"C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\status\config_link_test.test\borland-5.8.2\debug\config_link_test.exe.rsp"
</link>
<run result="succeed" timestamp="xxx">
</run>
</test-log>
</tests>

View file

@ -1,24 +0,0 @@
<tests>
<test-log library="algorithm/minmax" test-program="libs/algorithm/minmax/test/minmax_test.cpp" show-run-output="false" toolset="msvc-6.5~release" test-type="run" test-name="minmax" target-directory="boost/bin.v2/libs/algorithm/minmax/test/minmax.test/msvc-6.5~release/release/threading-multi" result="success" expected-result="success" expected-reason="" status="expected" is-new="yes" category="0"><notes/>
<compile timestamp="2007-01-16 06:43:40 UTC" result="succeed">
call "c:\Program Files\Microsoft Visual Studio\VC98\Bin\VCVARS32.BAT" &gt;nul
cl /Zm800 -nologo @"g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax_test.obj.rsp"
minmax_test.cpp
file g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax.exe.rsp
"g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax_test.obj"
</compile>
<link timestamp="2007-01-16 06:43:40 UTC" result="succeed">
call "c:\Program Files\Microsoft Visual Studio\VC98\Bin\VCVARS32.BAT" &gt;nul
link /NOLOGO /INCREMENTAL:NO /subsystem:console /out:"g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax.exe" @"g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax.exe.rsp"
if %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
if exist "g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax.exe.manifest" (
mt -nologo -manifest "g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax.exe.manifest" "-outputresource:g:\boost\rc-1-34\results\boost\bin.v2\libs\algorithm\minmax\test\minmax.test\msvc-6.5~release\release\threading-multi\minmax.exe;1"
)
</link>
</test-log>
</tests>

View file

@ -1,57 +0,0 @@
<tests>
<test-log library="parameter" test-program="libs/parameter/test/python_test.py" show-run-output="false" toolset="msvc-8.0" test-type="run_pyd" test-name="python_test" target-directory="boost/bin.v2/libs/parameter/test/python_test.test/msvc-8.0/rls/adrs-mdl-64/instr-set-optrn/thrd-mlt">
<compile timestamp="2007-01-30 05:09:28 UTC" result="succeed">
call "C:\Program Files (x86)\Microsoft Visual Studio 8\VC\vcvarsall.bat" amd64 &gt;nul
cl /Zm800 -nologo @"Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test.obj.rsp"
python_test.cpp
file Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext.pyd.rsp
"Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test.obj"
"Z:\results\boost\bin.v2\libs\python\build\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\boost_python-vc80-mt-1_34.lib"
</compile>
<link timestamp="2007-01-30 05:09:28 UTC" result="succeed">
call "C:\Program Files (x86)\Microsoft Visual Studio 8\VC\vcvarsall.bat" amd64 &gt;nul
link /NOLOGO /INCREMENTAL:NO /DLL /subsystem:console /out:"Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext.pyd" /IMPLIB:"Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext-vc80-mt-1_34.lib" /LIBPATH:"c:\python25\libs" @"Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext.pyd.rsp"
if %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
if exist "Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext.pyd.manifest" (
mt -nologo -manifest "Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext.pyd.manifest" "-outputresource:Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext.pyd;2"
)
Creating library Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext-vc80-mt-1_34.lib and object Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext-vc80-mt-1_34.exp
</link>
<run timestamp="2007-01-30 05:09:28 UTC" result="succeed">
EXIT STATUS: 0
</run>
</test-log>
<test-log library="parameter" test-program="libs/parameter/test/python_test.py" show-run-output="false" toolset="msvc-8.0" test-type="run_pyd" test-name="python_test" target-directory="boost/bin.v2/libs/parameter/test/python_test.test/msvc-8.0/rls/adrs-mdl-64/instr-set-optrn/thrd-mlt">
<compile timestamp="2007-01-30 05:09:28 UTC" result="succeed">
call "C:\Program Files (x86)\Microsoft Visual Studio 8\VC\vcvarsall.bat" amd64 &gt;nul
cl /Zm800 -nologo @"Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test.obj.rsp"
python_test.cpp
file Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext.pyd.rsp
"Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test.obj"
"Z:\results\boost\bin.v2\libs\python\build\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\boost_python-vc80-mt-1_34.lib"
</compile>
<link timestamp="2007-01-30 05:09:28 UTC" result="succeed">
call "C:\Program Files (x86)\Microsoft Visual Studio 8\VC\vcvarsall.bat" amd64 &gt;nul
link /NOLOGO /INCREMENTAL:NO /DLL /subsystem:console /out:"Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext.pyd" /IMPLIB:"Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext-vc80-mt-1_34.lib" /LIBPATH:"c:\python25\libs" @"Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext.pyd.rsp"
if %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
if exist "Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext.pyd.manifest" (
mt -nologo -manifest "Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext.pyd.manifest" "-outputresource:Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext.pyd;2"
)
Creating library Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext-vc80-mt-1_34.lib and object Z:\results\boost\bin.v2\libs\parameter\test\python_test.test\msvc-8.0\rls\adrs-mdl-64\instr-set-optrn\thrd-mlt\python_test_ext-vc80-mt-1_34.exp
</link>
</test-log>
</tests>

View file

@ -1,43 +0,0 @@
<tests>
<test-log library="config" test-name="" test-type="" test-program="" target-directory="libs/config/test/link/borland-5.8.2/debug" toolset="" show-run-output="true">
<compile result="succeed" timestamp="xxx">
"c:/progra~1/borland/bds/4.0/bin/bcc32.exe" -j5 -g255 -q -c -P -Ve -Vx -a8 -b- -v -Od -tWC -tWR -tWC -tWD -WM- -DBOOST_ALL_NO_LIB=1 -DBOOST_DYN_LINK=1 -I".." -I"c:/progra~1/borland/bds/4.0/include/" -o"C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\libs\config\test\link\borland-5.8.2\debug\link_test.obj" "..\libs\config\test\link\link_test.cpp"
..\libs\config\test\link\link_test.cpp:
file C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\libs\config\test\link\borland-5.8.2\debug\link_test-bcb-d-1_34.dll.rsp
"C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\libs\config\test\link\borland-5.8.2\debug\link_test.obj"
</compile>
<link result="succeed" timestamp="xxx">
set "PATH=c:/progra~1/borland/bds/4.0/bin/;%PATH%"
"c:/progra~1/borland/bds/4.0/bin/bcc32.exe" -v -q -v -v -tWD -tWC -tWR -tWC -tWD -WM- -L"c:/progra~1/borland/bds/4.0/lib" -e"C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\libs\config\test\link\borland-5.8.2\debug\link_test-bcb-d-1_34.dll" @"C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\libs\config\test\link\borland-5.8.2\debug\link_test-bcb-d-1_34.dll.rsp" &amp;&amp; "c:/progra~1/borland/bds/4.0/bin/implib" "C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\libs\config\test\link\borland-5.8.2\debug\link_test-bcb-d-1_34.lib" "C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\libs\config\test\link\borland-5.8.2\debug\link_test-bcb-d-1_34.dll"
Borland Implib Version 3.0.22 Copyright (c) 1991, 2000 Inprise Corporation
file C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\status\config_link_test.test\borland-5.8.2\debug\config_link_test.exe.rsp
"C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\status\config_link_test.test\borland-5.8.2\debug\main.obj"
"C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\libs\config\test\link\borland-5.8.2\debug\link_test-bcb-d-1_34.lib"
</link>
</test-log>
<test-log library="config" test-name="config_link_test" test-type="run" test-program="libs/config/test/link/main.cpp" target-directory="status/config_link_test.test/borland-5.8.2/debug" toolset="borland-5.8.2" show-run-output="false">
<compile result="succeed" timestamp="xxx">
"c:/progra~1/borland/bds/4.0/bin/bcc32.exe" -j5 -g255 -q -c -P -Ve -Vx -a8 -b- -v -Od -tWC -tWR -tWC -WM- -DBOOST_ALL_NO_LIB=1 -DBOOST_CONFIG_NO_LIB=1 -DBOOST_DYN_LINK=1 -I".." -I"c:/progra~1/borland/bds/4.0/include/" -o"C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\status\config_link_test.test\borland-5.8.2\debug\main.obj" "g:\boost\rc-1-34\boost\libs\config\test\link\main.cpp"
g:\boost\rc-1-34\boost\libs\config\test\link\main.cpp:
</compile>
<link result="succeed" timestamp="xxx">
set "PATH=c:/progra~1/borland/bds/4.0/bin/;%PATH%"
"c:/progra~1/borland/bds/4.0/bin/bcc32.exe" -v -q -v -v -tWD -tWC -tWR -tWC -WM- -L"c:/progra~1/borland/bds/4.0/lib" -e"C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\status\config_link_test.test\borland-5.8.2\debug\config_link_test.exe" @"C:\Users\Misha\Stuff\boost\RC_1_34_0\boost\tools\regression\test\test-cases\borland\actual\status\config_link_test.test\borland-5.8.2\debug\config_link_test.exe.rsp"
</link>
<run result="succeed" timestamp="xxx">
</run>
</test-log>
</tests>

View file

@ -1,19 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"
xmlns:func="http://exslt.org/functions"
xmlns:meta="http://www.meta-comm.com"
extension-element-prefixes="func"
exclude-result-prefixes="func meta">
<xsl:import href="../../../xsl/v2/add_expected_results.xsl"/>
<xsl:template match="/">
<xsl:apply-templates/>
</xsl:template>
<xsl:template match="//test-log">
<xsl:variable name="is_complete" select="meta:is_test_log_complete( . )"/>
<result name="{@test-name}" target-directory="{@target-directory}" result="{$is_complete}" />
</xsl:template>
</xsl:stylesheet>

View file

@ -1,48 +0,0 @@
<?xml version="1.0"?>
<result regex="" text="" result="true"/>
<result regex="pattern" text="pattern" result="true"/>
<result regex="" text="pattern" result="false"/>
<result regex="pattern" text="" result="false"/>
<result regex="*" text="" result="true"/>
<result regex="*" text="pattern" result="true"/>
<result regex="*pattern*" text="" result="false"/>
<result regex="*pattern*" text="__pattern__" result="true"/>
<result regex="*pattern*" text="pattern" result="true"/>
<result regex="*pattern*" text="patter" result="false"/>
<result regex="*pattern*" text="patte__" result="false"/>
<result regex="*pattern*" text="attern" result="false"/>
<result regex="*pattern*" text="__ttern" result="false"/>
<result regex="*pattern" text="" result="false"/>
<result regex="*pattern" text="__pattern" result="true"/>
<result regex="*pattern" text="pattern" result="true"/>
<result regex="*pattern" text="pattern__" result="false"/>
<result regex="*pattern" text="patter" result="false"/>
<result regex="*pattern" text="patte__" result="false"/>
<result regex="*pattern" text="attern" result="false"/>
<result regex="*pattern" text="__ttern" result="false"/>
<result regex="pattern*" text="" result="false"/>
<result regex="pattern*" text="pattern__" result="true"/>
<result regex="pattern*" text="pattern" result="true"/>
<result regex="pattern*" text="patter" result="false"/>
<result regex="pattern*" text="__pattern" result="false"/>
<result regex="pattern*" text="attern" result="false"/>
<result regex="pattern*" text="patter_" result="false"/>
<result regex="pattern*" text="patte__" result="false"/>
<result regex="patt*ern" text="" result="false"/>
<result regex="patt*ern" text="patt__ern" result="true"/>
<result regex="patt*ern" text="pattern" result="true"/>
<result regex="patter*n" text="patter__n" result="true"/>
<result regex="p*attern" text="pttern" result="false"/>
<result regex="p*attern" text="pattern" result="true"/>
<result regex="patter*n" text="patter" result="false"/>
<result regex="p*attern" text="attern" result="false"/>
<result regex="p*attern" text="p_ttern" result="false"/>

View file

@ -1,33 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
Copyright MetaCommunications, Inc. 2003-2004.
Distributed under the Boost Software License, Version 1.0. (See
accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
-->
<xsl:stylesheet
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:exsl="http://exslt.org/common"
xmlns:func="http://exslt.org/functions"
xmlns:str="http://exslt.org/strings"
xmlns:meta="http://www.meta-comm.com"
extension-element-prefixes="func"
exclude-result-prefixes="str meta exsl"
version="1.0">
<xsl:import href="../../../xsl/v2/common.xsl"/>
<xsl:template match="/">
<xsl:apply-templates />
</xsl:template>
<xsl:template match='test'>
<xsl:variable name="result" select="meta:re_match( @pattern, @text )"/>
<xsl:variable name="expected-result" select="@result = 'true'"/>
<result regex="{@pattern}" text="{@text}" result="{$result}"/>
</xsl:template>
</xsl:stylesheet>

View file

@ -1,57 +0,0 @@
<!--
Copyright MetaCommunications, Inc. 2003-2005.
Distributed under the Boost Software License, Version 1.0. (See
accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
-->
<root>
<test pattern="" text="" result="true"/>
<test pattern="pattern" text="pattern" result="true"/>
<test pattern="" text="pattern" result="false"/>
<test pattern="pattern" text="" result="false"/>
<test pattern="*" text="" result="true"/>
<test pattern="*" text="pattern" result="true"/>
<test pattern="*pattern*" text="" result="false"/>
<test pattern="*pattern*" text="__pattern__" result="true"/>
<test pattern="*pattern*" text="pattern" result="true"/>
<test pattern="*pattern*" text="patter" result="false"/>
<test pattern="*pattern*" text="patte__" result="false"/>
<test pattern="*pattern*" text="attern" result="false"/>
<test pattern="*pattern*" text="__ttern" result="false"/>
<test pattern="*pattern" text="" result="false"/>
<test pattern="*pattern" text="__pattern" result="true"/>
<test pattern="*pattern" text="pattern" result="true"/>
<test pattern="*pattern" text="pattern__" result="false"/>
<test pattern="*pattern" text="patter" result="false"/>
<test pattern="*pattern" text="patte__" result="false"/>
<test pattern="*pattern" text="attern" result="false"/>
<test pattern="*pattern" text="__ttern" result="false"/>
<test pattern="pattern*" text="" result="false"/>
<test pattern="pattern*" text="pattern__" result="true"/>
<test pattern="pattern*" text="pattern" result="true"/>
<test pattern="pattern*" text="patter" result="false"/>
<test pattern="pattern*" text="__pattern" result="false"/>
<test pattern="pattern*" text="attern" result="false"/>
<test pattern="pattern*" text="patter_" result="false"/>
<test pattern="pattern*" text="patte__" result="false"/>
<test pattern="patt*ern" text="" result="false"/>
<test pattern="patt*ern" text="patt__ern" result="true"/>
<test pattern="patt*ern" text="pattern" result="true"/>
<test pattern="patter*n" text="patter__n" result="true"/>
<test pattern="p*attern" text="pttern" result="false"/>
<test pattern="p*attern" text="pattern" result="true"/>
<test pattern="patter*n" text="patter" result="false"/>
<test pattern="p*attern" text="attern" result="false"/>
<test pattern="p*attern" text="p_ttern" result="false"/>
</root>

View file

@ -1,10 +0,0 @@
<?xml version="1.0" encoding="iso-8859-1"?>
<test-run source="CVS" runner="BoostConsulting" timestamp="2007-01-13T06:25:21Z" platform="Windows" tag="RC_1_34_0" run-type="incremental">
<test-log status="expected" result="success" />
<test-log status="expected" />
<test-log status="expected" result="fail" />
<test-log status="unexpected" result="success" />
<test-log status="unexpected" result="fail" is-new="no" />
<test-log status="unexpected" result="fail" toolset="old_compiler" library="unusable_library" />
</test-run>

View file

@ -1,2 +0,0 @@
<?xml version="1.0"?>
<results xmlns:exsl="http://exslt.org/common"><result>expected</result><result>expected</result><result>expected</result><result>success-unexpected</result><result>fail-unexpected</result><result>unusable</result></results>

Some files were not shown because too many files have changed in this diff Show more