bitbake: Update to 1.8.1 (inc. various bug fixes, epoch support)

git-svn-id: https://svn.o-hand.com/repos/poky/trunk@1419 311d38ba-8fff-0310-9ca6-ca027cbcb966
This commit is contained in:
Richard Purdie 2007-04-01 15:04:49 +00:00
parent 8b36dc2174
commit 7371e6323c
18 changed files with 542 additions and 548 deletions

View File

@ -1,10 +1,52 @@
Changes in BitBake 1.7.3: Changes in Bitbake 1.8.2:
- Catch truncated cache file errors
- Add PE (Package Epoch) support from Philipp Zabel (pH5)
- Add code to handle inter-task dependencies
- Allow operations other than assignment on flag variables
Changes in BitBake 1.7.1: Changes in Bitbake 1.8.0:
- Major updates of the dependency handling and execution - Release 1.7.x as a stable series
of tasks
- Change of the SVN Fetcher to keep the checkout around Changes in BitBake 1.7.x:
courtsey to Paul Sokolovsky (#1367) - Major updates of the dependency handling and execution
of tasks. Code from bin/bitbake replaced with runqueue.py
and taskdata.py
- New task execution code supports multithreading with a simplistic
threading algorithm controlled by BB_NUMBER_THREADS
- Change of the SVN Fetcher to keep the checkout around
courtsey of Paul Sokolovsky (#1367)
- PATH fix to bbimage (#1108)
- Allow debug domains to be specified on the commandline (-l)
- Allow 'interactive' tasks
- Logging message improvements
- Drop now uneeded BUILD_ALL_DEPS variable
- Add support for wildcards to -b option
- Major overhaul of the fetchers making a large amount of code common
including mirroring code
- Fetchers now touch md5 stamps upon access (to show activity)
- Fix -f force option when used without -b (long standing bug)
- Add expand_cache to data_cache.py, caching expanded data (speedup)
- Allow version field in DEPENDS (ignored for now)
- Add abort flag support to the shell
- Make inherit fail if the class doesn't exist (#1478)
- Fix data.emit_env() to expand keynames as well as values
- Add ssh fetcher
- Add perforce fetcher
- Make PREFERRED_PROVIDER_foobar defaults to foobar if available
- Share the parser's mtime_cache, reducing the number of stat syscalls
- Compile all anonfuncs at once!
*** Anonfuncs must now use common spacing format ***
- Memorise the list of handlers in __BBHANDLERS and tasks in __BBTASKS
This removes 2 million function calls resulting in a 5-10% speedup
- Add manpage
- Update generateDotGraph to use taskData/runQueue improving accuracy
and also adding a task dependency graph
- Fix/standardise on GPLv2 licence
- Move most functionality from bin/bitbake to cooker.py and split into
separate funcitons
- CVS fetcher: Added support for non-default port
- Add BBINCLUDELOGS_LINES, the number of lines to read from any logfile
- Drop shebangs from lib/bb scripts
Changes in Bitbake 1.6.0: Changes in Bitbake 1.6.0:
- Better msg handling - Better msg handling

View File

@ -1,45 +1,49 @@
AUTHORS AUTHORS
COPYING
ChangeLog ChangeLog
MANIFEST MANIFEST
setup.py setup.py
bin/bitdoc bin/bitdoc
bin/bbimage bin/bbimage
bin/bitbake bin/bitbake
lib/bb/COW.py
lib/bb/__init__.py lib/bb/__init__.py
lib/bb/build.py lib/bb/build.py
lib/bb/cache.py lib/bb/cache.py
lib/bb/cooker.py lib/bb/cooker.py
lib/bb/COW.py
lib/bb/data.py lib/bb/data.py
lib/bb/data_smart.py lib/bb/data_smart.py
lib/bb/event.py lib/bb/event.py
lib/bb/manifest.py lib/bb/fetch/__init__.py
lib/bb/methodpool.py
lib/bb/msg.py
lib/bb/providers.py
lib/bb/runqueue.py
lib/bb/shell.py
lib/bb/taskdata.py
lib/bb/utils.py
lib/bb/fetch/cvs.py lib/bb/fetch/cvs.py
lib/bb/fetch/git.py lib/bb/fetch/git.py
lib/bb/fetch/__init__.py
lib/bb/fetch/local.py lib/bb/fetch/local.py
lib/bb/fetch/perforce.py lib/bb/fetch/perforce.py
lib/bb/fetch/ssh.py lib/bb/fetch/ssh.py
lib/bb/fetch/svk.py lib/bb/fetch/svk.py
lib/bb/fetch/svn.py lib/bb/fetch/svn.py
lib/bb/fetch/wget.py lib/bb/fetch/wget.py
lib/bb/manifest.py
lib/bb/methodpool.py
lib/bb/msg.py
lib/bb/parse/__init__.py lib/bb/parse/__init__.py
lib/bb/parse/parse_py/__init__.py
lib/bb/parse/parse_py/BBHandler.py lib/bb/parse/parse_py/BBHandler.py
lib/bb/parse/parse_py/ConfHandler.py lib/bb/parse/parse_py/ConfHandler.py
lib/bb/parse/parse_py/__init__.py lib/bb/providers.py
lib/bb/runqueue.py
lib/bb/shell.py
lib/bb/taskdata.py
lib/bb/utils.py
setup.py
doc/COPYING.GPL doc/COPYING.GPL
doc/COPYING.MIT doc/COPYING.MIT
doc/bitbake.1
doc/manual/html.css doc/manual/html.css
doc/manual/Makefile doc/manual/Makefile
doc/manual/usermanual.xml doc/manual/usermanual.xml
contrib/bbdev.sh contrib/bbdev.sh
contrib/vim/syntax/bitbake.vim contrib/vim/syntax/bitbake.vim
contrib/vim/ftdetect/bitbake.vim
conf/bitbake.conf conf/bitbake.conf
classes/base.bbclass classes/base.bbclass

View File

@ -27,7 +27,7 @@ sys.path.insert(0,os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'l
import bb import bb
from bb import cooker from bb import cooker
__version__ = "1.7.4" __version__ = "1.8.1"
#============================================================================# #============================================================================#
# BBOptions # BBOptions
@ -109,15 +109,9 @@ Default BBFILES are the .bb files in the current directory.""" )
configuration.pkgs_to_build = [] configuration.pkgs_to_build = []
configuration.pkgs_to_build.extend(args[1:]) configuration.pkgs_to_build.extend(args[1:])
bb.cooker.BBCooker().cook(configuration) cooker = bb.cooker.BBCooker(configuration)
cooker.cook()
if __name__ == "__main__": if __name__ == "__main__":
main() main()
sys.exit(0)
import profile
profile.run('main()', "profile.log")
import pstats
p = pstats.Stats('profile.log')
p.sort_stats('time')
p.print_stats()
p.print_callers()

View File

@ -1,79 +0,0 @@
# Copyright (C) 2003 Chris Larson
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
die() {
bbfatal "$*"
}
bbnote() {
echo "NOTE:" "$*"
}
bbwarn() {
echo "WARNING:" "$*"
}
bbfatal() {
echo "FATAL:" "$*"
exit 1
}
bbdebug() {
test $# -ge 2 || {
echo "Usage: bbdebug level \"message\""
exit 1
}
test ${@bb.msg.debug_level} -ge $1 && {
shift
echo "DEBUG:" $*
}
}
addtask showdata
do_showdata[nostamp] = "1"
python do_showdata() {
import sys
# emit variables and shell functions
bb.data.emit_env(sys.__stdout__, d, True)
# emit the metadata which isnt valid shell
for e in bb.data.keys(d):
if bb.data.getVarFlag(e, 'python', d):
sys.__stdout__.write("\npython %s () {\n%s}\n" % (e, bb.data.getVar(e, d, 1)))
}
addtask listtasks
do_listtasks[nostamp] = "1"
python do_listtasks() {
import sys
for e in bb.data.keys(d):
if bb.data.getVarFlag(e, 'task', d):
sys.__stdout__.write("%s\n" % e)
}
addtask build
do_build[dirs] = "${TOPDIR}"
do_build[nostamp] = "1"
python base_do_build () {
bb.note("The included, default BB base.bbclass does not define a useful default task.")
bb.note("Try running the 'listtasks' task against a .bb to see what tasks are defined.")
}
EXPORT_FUNCTIONS do_clean do_mrproper do_build

View File

@ -1,58 +0,0 @@
# Copyright (C) 2003 Chris Larson
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
B = "${S}"
CVSDIR = "${DL_DIR}/cvs"
DEPENDS = ""
DEPLOY_DIR = "${TMPDIR}/deploy"
DEPLOY_DIR_IMAGE = "${DEPLOY_DIR}/images"
DL_DIR = "${TMPDIR}/downloads"
FETCHCOMMAND = ""
FETCHCOMMAND_cvs = "/usr/bin/env cvs -d${CVSROOT} co ${CVSCOOPTS} ${CVSMODULE}"
FETCHCOMMAND_svn = "/usr/bin/env svn co ${SVNCOOPTS} ${SVNROOT} ${SVNMODULE}"
FETCHCOMMAND_wget = "/usr/bin/env wget -t 5 --passive-ftp -P ${DL_DIR} ${URI}"
FILESDIR = "${@bb.which(bb.data.getVar('FILESPATH', d, 1), '.')}"
FILESPATH = "${FILE_DIRNAME}/${PF}:${FILE_DIRNAME}/${P}:${FILE_DIRNAME}/${PN}:${FILE_DIRNAME}/files:${FILE_DIRNAME}"
FILE_DIRNAME = "${@os.path.dirname(bb.data.getVar('FILE', d))}"
GITDIR = "${DL_DIR}/git"
IMAGE_CMD = "_NO_DEFINED_IMAGE_TYPES_"
IMAGE_ROOTFS = "${TMPDIR}/rootfs"
MKTEMPCMD = "mktemp -q ${TMPBASE}"
MKTEMPDIRCMD = "mktemp -d -q ${TMPBASE}"
OVERRIDES = "local:${MACHINE}:${TARGET_OS}:${TARGET_ARCH}"
P = "${PN}-${PV}"
PF = "${PN}-${PV}-${PR}"
PN = "${@bb.parse.BBHandler.vars_from_file(bb.data.getVar('FILE',d),d)[0] or 'defaultpkgname'}"
PR = "${@bb.parse.BBHandler.vars_from_file(bb.data.getVar('FILE',d),d)[2] or 'r0'}"
PROVIDES = ""
PV = "${@bb.parse.BBHandler.vars_from_file(bb.data.getVar('FILE',d),d)[1] or '1.0'}"
RESUMECOMMAND = ""
RESUMECOMMAND_wget = "/usr/bin/env wget -c -t 5 --passive-ftp -P ${DL_DIR} ${URI}"
S = "${WORKDIR}/${P}"
SRC_URI = "file://${FILE}"
STAMP = "${TMPDIR}/stamps/${PF}"
SVNDIR = "${DL_DIR}/svn"
T = "${WORKDIR}/temp"
TARGET_ARCH = "${BUILD_ARCH}"
TMPDIR = "${TOPDIR}/tmp"
UPDATECOMMAND = ""
UPDATECOMMAND_cvs = "/usr/bin/env cvs -d${CVSROOT} update ${CVSCOOPTS}"
UPDATECOMMAND_svn = "/usr/bin/env svn update ${SVNCOOPTS}"
WORKDIR = "${TMPDIR}/work/${PF}"

View File

@ -21,7 +21,7 @@
# with this program; if not, write to the Free Software Foundation, Inc., # with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
__version__ = "1.7.4" __version__ = "1.8.1"
__all__ = [ __all__ = [

View File

@ -188,7 +188,8 @@ def exec_func_shell(func, d):
maybe_fakeroot = "PATH=\"%s\" fakeroot " % bb.data.getVar("PATH", d, 1) maybe_fakeroot = "PATH=\"%s\" fakeroot " % bb.data.getVar("PATH", d, 1)
else: else:
maybe_fakeroot = '' maybe_fakeroot = ''
ret = os.system('%ssh -e %s' % (maybe_fakeroot, runfile)) lang_environment = "LC_ALL=C "
ret = os.system('%s%ssh -e %s' % (lang_environment, maybe_fakeroot, runfile))
try: try:
os.chdir(prevdir) os.chdir(prevdir)
except: except:

View File

@ -39,7 +39,7 @@ except ImportError:
import pickle import pickle
bb.msg.note(1, bb.msg.domain.Cache, "Importing cPickle failed. Falling back to a very slow implementation.") bb.msg.note(1, bb.msg.domain.Cache, "Importing cPickle failed. Falling back to a very slow implementation.")
__cache_version__ = "125" __cache_version__ = "126"
class Cache: class Cache:
""" """
@ -75,6 +75,9 @@ class Cache:
raise ValueError, 'Cache Version Mismatch' raise ValueError, 'Cache Version Mismatch'
if version_data['BITBAKE_VER'] != bb.__version__: if version_data['BITBAKE_VER'] != bb.__version__:
raise ValueError, 'Bitbake Version Mismatch' raise ValueError, 'Bitbake Version Mismatch'
except EOFError:
bb.msg.note(1, bb.msg.domain.Cache, "Truncated cache found, rebuilding...")
self.depends_cache = {}
except (ValueError, KeyError): except (ValueError, KeyError):
bb.msg.note(1, bb.msg.domain.Cache, "Invalid cache found, rebuilding...") bb.msg.note(1, bb.msg.domain.Cache, "Invalid cache found, rebuilding...")
self.depends_cache = {} self.depends_cache = {}
@ -251,6 +254,7 @@ class Cache:
""" """
pn = self.getVar('PN', file_name, True) pn = self.getVar('PN', file_name, True)
pe = self.getVar('PE', file_name, True) or "0"
pv = self.getVar('PV', file_name, True) pv = self.getVar('PV', file_name, True)
pr = self.getVar('PR', file_name, True) pr = self.getVar('PR', file_name, True)
dp = int(self.getVar('DEFAULT_PREFERENCE', file_name, True) or "0") dp = int(self.getVar('DEFAULT_PREFERENCE', file_name, True) or "0")
@ -272,7 +276,7 @@ class Cache:
# build FileName to PackageName lookup table # build FileName to PackageName lookup table
cacheData.pkg_fn[file_name] = pn cacheData.pkg_fn[file_name] = pn
cacheData.pkg_pvpr[file_name] = (pv,pr) cacheData.pkg_pepvpr[file_name] = (pe,pv,pr)
cacheData.pkg_dp[file_name] = dp cacheData.pkg_dp[file_name] = dp
# Build forward and reverse provider hashes # Build forward and reverse provider hashes
@ -407,7 +411,7 @@ class CacheData:
self.possible_world = [] self.possible_world = []
self.pkg_pn = {} self.pkg_pn = {}
self.pkg_fn = {} self.pkg_fn = {}
self.pkg_pvpr = {} self.pkg_pepvpr = {}
self.pkg_dp = {} self.pkg_dp = {}
self.pn_provides = {} self.pn_provides = {}
self.all_depends = Set() self.all_depends = Set()

View File

@ -30,29 +30,6 @@ import itertools
parsespin = itertools.cycle( r'|/-\\' ) parsespin = itertools.cycle( r'|/-\\' )
#============================================================================#
# BBStatistics
#============================================================================#
class BBStatistics:
"""
Manage build statistics for one run
"""
def __init__(self ):
self.attempt = 0
self.success = 0
self.fail = 0
self.deps = 0
def show( self ):
print "Build statistics:"
print " Attempted builds: %d" % self.attempt
if self.fail:
print " Failed builds: %d" % self.fail
if self.deps:
print " Dependencies not satisfied: %d" % self.deps
if self.fail or self.deps: return 1
else: return 0
#============================================================================# #============================================================================#
# BBCooker # BBCooker
#============================================================================# #============================================================================#
@ -61,43 +38,61 @@ class BBCooker:
Manages one bitbake build run Manages one bitbake build run
""" """
Statistics = BBStatistics # make it visible from the shell def __init__(self, configuration):
def __init__( self ):
self.build_cache_fail = []
self.build_cache = []
self.stats = BBStatistics()
self.status = None self.status = None
self.cache = None self.cache = None
self.bb_cache = None self.bb_cache = None
self.configuration = configuration
if self.configuration.verbose:
bb.msg.set_verbose(True)
if self.configuration.debug:
bb.msg.set_debug_level(self.configuration.debug)
else:
bb.msg.set_debug_level(0)
if self.configuration.debug_domains:
bb.msg.set_debug_domains(self.configuration.debug_domains)
self.configuration.data = bb.data.init()
for f in self.configuration.file:
self.parseConfigurationFile( f )
self.parseConfigurationFile( os.path.join( "conf", "bitbake.conf" ) )
if not self.configuration.cmd:
self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data) or "build"
#
# Special updated configuration we use for firing events
#
self.configuration.event_data = bb.data.createCopy(self.configuration.data)
bb.data.update_data(self.configuration.event_data)
def tryBuildPackage(self, fn, item, task, the_data, build_depends): def tryBuildPackage(self, fn, item, task, the_data, build_depends):
""" """
Build one task of a package, optionally build following task depends Build one task of a package, optionally build following task depends
""" """
bb.event.fire(bb.event.PkgStarted(item, the_data)) bb.event.fire(bb.event.PkgStarted(item, the_data))
try: try:
self.stats.attempt += 1
if not build_depends: if not build_depends:
bb.data.setVarFlag('do_%s' % task, 'dontrundeps', 1, the_data) bb.data.setVarFlag('do_%s' % task, 'dontrundeps', 1, the_data)
if not self.configuration.dry_run: if not self.configuration.dry_run:
bb.build.exec_task('do_%s' % task, the_data) bb.build.exec_task('do_%s' % task, the_data)
bb.event.fire(bb.event.PkgSucceeded(item, the_data)) bb.event.fire(bb.event.PkgSucceeded(item, the_data))
self.build_cache.append(fn)
return True return True
except bb.build.FuncFailed: except bb.build.FuncFailed:
self.stats.fail += 1
bb.msg.error(bb.msg.domain.Build, "task stack execution failed") bb.msg.error(bb.msg.domain.Build, "task stack execution failed")
bb.event.fire(bb.event.PkgFailed(item, the_data)) bb.event.fire(bb.event.PkgFailed(item, the_data))
self.build_cache_fail.append(fn)
raise raise
except bb.build.EventException, e: except bb.build.EventException, e:
self.stats.fail += 1
event = e.args[1] event = e.args[1]
bb.msg.error(bb.msg.domain.Build, "%s event exception, aborting" % bb.event.getName(event)) bb.msg.error(bb.msg.domain.Build, "%s event exception, aborting" % bb.event.getName(event))
bb.event.fire(bb.event.PkgFailed(item, the_data)) bb.event.fire(bb.event.PkgFailed(item, the_data))
self.build_cache_fail.append(fn)
raise raise
def tryBuild( self, fn, build_depends): def tryBuild( self, fn, build_depends):
@ -112,12 +107,11 @@ class BBCooker:
item = self.status.pkg_fn[fn] item = self.status.pkg_fn[fn]
if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data): if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data):
self.build_cache.append(fn)
return True return True
return self.tryBuildPackage(fn, item, self.configuration.cmd, the_data, build_depends) return self.tryBuildPackage(fn, item, self.configuration.cmd, the_data, build_depends)
def showVersions( self ): def showVersions(self):
pkg_pn = self.status.pkg_pn pkg_pn = self.status.pkg_pn
preferred_versions = {} preferred_versions = {}
latest_versions = {} latest_versions = {}
@ -136,11 +130,11 @@ class BBCooker:
latest = latest_versions[p] latest = latest_versions[p]
if pref != latest: if pref != latest:
prefstr = pref[0][0] + "-" + pref[0][1] prefstr = pref[0][0] + ":" + pref[0][1] + '-' + pref[0][2]
else: else:
prefstr = "" prefstr = ""
print "%-30s %20s %20s" % (p, latest[0][0] + "-" + latest[0][1], print "%-30s %20s %20s" % (p, latest[0][0] + ":" + latest[0][1] + "-" + latest[0][2],
prefstr) prefstr)
@ -192,8 +186,8 @@ class BBCooker:
taskdata.add_unresolved(localdata, self.status) taskdata.add_unresolved(localdata, self.status)
except bb.providers.NoProvider: except bb.providers.NoProvider:
sys.exit(1) sys.exit(1)
rq = bb.runqueue.RunQueue() rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist)
rq.prepare_runqueue(self, self.configuration.data, self.status, taskdata, runlist) rq.prepare_runqueue()
seen_fnids = [] seen_fnids = []
depends_file = file('depends.dot', 'w' ) depends_file = file('depends.dot', 'w' )
@ -371,92 +365,99 @@ class BBCooker:
except ValueError: except ValueError:
bb.msg.error(bb.msg.domain.Parsing, "invalid value for BBFILE_PRIORITY_%s: \"%s\"" % (c, priority)) bb.msg.error(bb.msg.domain.Parsing, "invalid value for BBFILE_PRIORITY_%s: \"%s\"" % (c, priority))
def buildSetVars(self):
def cook(self, configuration):
""" """
We are building stuff here. We do the building Setup any variables needed before starting a build
from here. By default we try to execute task
build.
""" """
self.configuration = configuration
if self.configuration.verbose:
bb.msg.set_verbose(True)
if self.configuration.debug:
bb.msg.set_debug_level(self.configuration.debug)
else:
bb.msg.set_debug_level(0)
if self.configuration.debug_domains:
bb.msg.set_debug_domains(self.configuration.debug_domains)
self.configuration.data = bb.data.init()
for f in self.configuration.file:
self.parseConfigurationFile( f )
self.parseConfigurationFile( os.path.join( "conf", "bitbake.conf" ) )
if not self.configuration.cmd:
self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data) or "build"
#
# Special updated configuration we use for firing events
#
self.configuration.event_data = bb.data.createCopy(self.configuration.data)
bb.data.update_data(self.configuration.event_data)
if self.configuration.show_environment:
self.showEnvironment()
sys.exit( 0 )
# inject custom variables
if not bb.data.getVar("BUILDNAME", self.configuration.data): if not bb.data.getVar("BUILDNAME", self.configuration.data):
bb.data.setVar("BUILDNAME", os.popen('date +%Y%m%d%H%M').readline().strip(), self.configuration.data) bb.data.setVar("BUILDNAME", os.popen('date +%Y%m%d%H%M').readline().strip(), self.configuration.data)
bb.data.setVar("BUILDSTART", time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime()),self.configuration.data) bb.data.setVar("BUILDSTART", time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime()),self.configuration.data)
def buildFile(self, buildfile):
"""
Build the file matching regexp buildfile
"""
bf = os.path.abspath(buildfile)
try:
os.stat(bf)
except OSError:
(filelist, masked) = self.collect_bbfiles()
regexp = re.compile(buildfile)
matches = []
for f in filelist:
if regexp.search(f) and os.path.isfile(f):
bf = f
matches.append(f)
if len(matches) != 1:
bb.msg.error(bb.msg.domain.Parsing, "Unable to match %s (%s matches found):" % (buildfile, len(matches)))
for f in matches:
bb.msg.error(bb.msg.domain.Parsing, " %s" % f)
sys.exit(1)
bf = matches[0]
bbfile_data = bb.parse.handle(bf, self.configuration.data)
# Remove stamp for target if force mode active
if self.configuration.force:
bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (self.configuration.cmd, bf))
bb.build.del_stamp('do_%s' % self.configuration.cmd, bbfile_data)
item = bb.data.getVar('PN', bbfile_data, 1)
try:
self.tryBuildPackage(bf, item, self.configuration.cmd, bbfile_data, True)
except bb.build.EventException:
bb.msg.error(bb.msg.domain.Build, "Build of '%s' failed" % item )
sys.exit(0)
def buildTargets(self, targets):
"""
Attempt to build the targets specified
"""
buildname = bb.data.getVar("BUILDNAME", self.configuration.data) buildname = bb.data.getVar("BUILDNAME", self.configuration.data)
bb.event.fire(bb.event.BuildStarted(buildname, targets, self.configuration.event_data))
if self.configuration.interactive: localdata = data.createCopy(self.configuration.data)
self.interactiveMode() bb.data.update_data(localdata)
bb.data.expandKeys(localdata)
if self.configuration.buildfile is not None: taskdata = bb.taskdata.TaskData(self.configuration.abort)
bf = os.path.abspath( self.configuration.buildfile )
runlist = []
try:
for k in targets:
taskdata.add_provider(localdata, self.status, k)
runlist.append([k, "do_%s" % self.configuration.cmd])
taskdata.add_unresolved(localdata, self.status)
except bb.providers.NoProvider:
sys.exit(1)
rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist)
rq.prepare_runqueue()
try:
failures = rq.execute_runqueue()
except runqueue.TaskFailure, fnids:
for fnid in fnids:
bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid])
sys.exit(1)
bb.event.fire(bb.event.BuildCompleted(buildname, targets, self.configuration.event_data, failures))
sys.exit(0)
def updateCache(self):
# Import Psyco if available and not disabled
if not self.configuration.disable_psyco:
try: try:
os.stat(bf) import psyco
except OSError: except ImportError:
(filelist, masked) = self.collect_bbfiles() bb.msg.note(1, bb.msg.domain.Collection, "Psyco JIT Compiler (http://psyco.sf.net) not available. Install it to increase performance.")
regexp = re.compile(self.configuration.buildfile) else:
matches = [] psyco.bind( self.parse_bbfiles )
for f in filelist: else:
if regexp.search(f) and os.path.isfile(f): bb.msg.note(1, bb.msg.domain.Collection, "You have disabled Psyco. This decreases performance.")
bf = f
matches.append(f)
if len(matches) != 1:
bb.msg.error(bb.msg.domain.Parsing, "Unable to match %s (%s matches found):" % (self.configuration.buildfile, len(matches)))
for f in matches:
bb.msg.error(bb.msg.domain.Parsing, " %s" % f)
sys.exit(1)
bf = matches[0]
bbfile_data = bb.parse.handle(bf, self.configuration.data)
# Remove stamp for target if force mode active
if self.configuration.force:
bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (self.configuration.cmd, bf))
bb.build.del_stamp('do_%s' % self.configuration.cmd, bbfile_data)
item = bb.data.getVar('PN', bbfile_data, 1)
try:
self.tryBuildPackage(bf, item, self.configuration.cmd, bbfile_data, True)
except bb.build.EventException:
bb.msg.error(bb.msg.domain.Build, "Build of '%s' failed" % item )
sys.exit( self.stats.show() )
# initialise the parsing status now we know we will need deps
self.status = bb.cache.CacheData() self.status = bb.cache.CacheData()
ignore = bb.data.getVar("ASSUME_PROVIDED", self.configuration.data, 1) or "" ignore = bb.data.getVar("ASSUME_PROVIDED", self.configuration.data, 1) or ""
@ -464,6 +465,39 @@ class BBCooker:
self.handleCollections( bb.data.getVar("BBFILE_COLLECTIONS", self.configuration.data, 1) ) self.handleCollections( bb.data.getVar("BBFILE_COLLECTIONS", self.configuration.data, 1) )
bb.msg.debug(1, bb.msg.domain.Collection, "collecting .bb files")
(filelist, masked) = self.collect_bbfiles()
self.parse_bbfiles(filelist, masked, self.myProgressCallback)
bb.msg.debug(1, bb.msg.domain.Collection, "parsing complete")
self.buildDepgraph()
def cook(self):
"""
We are building stuff here. We do the building
from here. By default we try to execute task
build.
"""
if self.configuration.show_environment:
self.showEnvironment()
sys.exit( 0 )
self.buildSetVars()
if self.configuration.interactive:
self.interactiveMode()
if self.configuration.buildfile is not None:
return self.buildFile(self.configuration.buildfile)
# initialise the parsing status now we know we will need deps
self.updateCache()
if self.configuration.parse_only:
bb.msg.note(1, bb.msg.domain.Collection, "Requested parsing .bb files only. Exiting.")
return 0
pkgs_to_build = self.configuration.pkgs_to_build pkgs_to_build = self.configuration.pkgs_to_build
bbpkgs = bb.data.getVar('BBPKGS', self.configuration.data, 1) bbpkgs = bb.data.getVar('BBPKGS', self.configuration.data, 1)
@ -475,30 +509,7 @@ class BBCooker:
print "for usage information." print "for usage information."
sys.exit(0) sys.exit(0)
# Import Psyco if available and not disabled
if not self.configuration.disable_psyco:
try:
import psyco
except ImportError:
bb.msg.note(1, bb.msg.domain.Collection, "Psyco JIT Compiler (http://psyco.sf.net) not available. Install it to increase performance.")
else:
psyco.bind( self.parse_bbfiles )
else:
bb.msg.note(1, bb.msg.domain.Collection, "You have disabled Psyco. This decreases performance.")
try: try:
bb.msg.debug(1, bb.msg.domain.Collection, "collecting .bb files")
(filelist, masked) = self.collect_bbfiles()
self.parse_bbfiles(filelist, masked, self.myProgressCallback)
bb.msg.debug(1, bb.msg.domain.Collection, "parsing complete")
print
if self.configuration.parse_only:
bb.msg.note(1, bb.msg.domain.Collection, "Requested parsing .bb files only. Exiting.")
return
self.buildDepgraph()
if self.configuration.show_versions: if self.configuration.show_versions:
self.showVersions() self.showVersions()
sys.exit( 0 ) sys.exit( 0 )
@ -512,34 +523,7 @@ class BBCooker:
self.generateDotGraph( pkgs_to_build, self.configuration.ignored_dot_deps ) self.generateDotGraph( pkgs_to_build, self.configuration.ignored_dot_deps )
sys.exit( 0 ) sys.exit( 0 )
bb.event.fire(bb.event.BuildStarted(buildname, pkgs_to_build, self.configuration.event_data)) return self.buildTargets(pkgs_to_build)
localdata = data.createCopy(self.configuration.data)
bb.data.update_data(localdata)
bb.data.expandKeys(localdata)
taskdata = bb.taskdata.TaskData(self.configuration.abort)
runlist = []
try:
for k in pkgs_to_build:
taskdata.add_provider(localdata, self.status, k)
runlist.append([k, "do_%s" % self.configuration.cmd])
taskdata.add_unresolved(localdata, self.status)
except bb.providers.NoProvider:
sys.exit(1)
rq = bb.runqueue.RunQueue()
rq.prepare_runqueue(self, self.configuration.data, self.status, taskdata, runlist)
try:
failures = rq.execute_runqueue(self, self.configuration.data, self.status, taskdata, runlist)
except runqueue.TaskFailure, fnids:
for fnid in fnids:
bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid])
sys.exit(1)
bb.event.fire(bb.event.BuildCompleted(buildname, pkgs_to_build, self.configuration.event_data, failures))
sys.exit( self.stats.show() )
except KeyboardInterrupt: except KeyboardInterrupt:
bb.msg.note(1, bb.msg.domain.Collection, "KeyboardInterrupt - Build not completed.") bb.msg.note(1, bb.msg.domain.Collection, "KeyboardInterrupt - Build not completed.")
@ -556,13 +540,17 @@ class BBCooker:
return bbfiles return bbfiles
def find_bbfiles( self, path ): def find_bbfiles( self, path ):
"""Find all the .bb files in a directory (uses find)""" """Find all the .bb files in a directory"""
findcmd = 'find ' + path + ' -name *.bb | grep -v SCCS/' from os.path import join
try:
finddata = os.popen(findcmd) found = []
except OSError: for dir, dirs, files in os.walk(path):
return [] for ignored in ('SCCS', 'CVS', '.svn'):
return finddata.readlines() if ignored in dirs:
dirs.remove(ignored)
found += [join(dir,f) for f in files if f.endswith('.bb')]
return found
def collect_bbfiles( self ): def collect_bbfiles( self ):
"""Collect all available .bb build files""" """Collect all available .bb build files"""

View File

@ -23,14 +23,13 @@ BitBake build tools.
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os, re import os, re
import bb.data
import bb.utils import bb.utils
class Event: class Event:
"""Base class for events""" """Base class for events"""
type = "Event" type = "Event"
def __init__(self, d = bb.data.init()): def __init__(self, d):
self._data = d self._data = d
def getData(self): def getData(self):
@ -129,7 +128,7 @@ def getName(e):
class PkgBase(Event): class PkgBase(Event):
"""Base class for package events""" """Base class for package events"""
def __init__(self, t, d = bb.data.init()): def __init__(self, t, d):
self._pkg = t self._pkg = t
Event.__init__(self, d) Event.__init__(self, d)

View File

@ -91,6 +91,12 @@ class Svn(Fetch):
elif ud.date != "now": elif ud.date != "now":
options.append("-r {%s}" % ud.date) options.append("-r {%s}" % ud.date)
if ud.user:
options.append("--username %s" % ud.user)
if ud.pswd:
options.append("--password %s" % ud.pswd)
localdata = data.createCopy(d) localdata = data.createCopy(d)
data.setVar('OVERRIDES', "svn:%s" % data.getVar('OVERRIDES', localdata), localdata) data.setVar('OVERRIDES', "svn:%s" % data.getVar('OVERRIDES', localdata), localdata)
data.update_data(localdata) data.update_data(localdata)

View File

@ -23,7 +23,7 @@ Message handling infrastructure for bitbake
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys, os, re, bb import sys, os, re, bb
from bb import utils from bb import utils, event
debug_level = {} debug_level = {}
@ -42,6 +42,29 @@ domain = bb.utils.Enum(
'TaskData', 'TaskData',
'Util') 'Util')
class MsgBase(bb.event.Event):
"""Base class for messages"""
def __init__(self, msg, d ):
self._message = msg
event.Event.__init__(self, d)
class MsgDebug(MsgBase):
"""Debug Message"""
class MsgNote(MsgBase):
"""Note Message"""
class MsgWarn(MsgBase):
"""Warning Message"""
class MsgError(MsgBase):
"""Error Message"""
class MsgFatal(MsgBase):
"""Fatal Message"""
# #
# Message control functions # Message control functions
# #
@ -71,6 +94,7 @@ def set_debug_domains(domains):
def debug(level, domain, msg, fn = None): def debug(level, domain, msg, fn = None):
if debug_level[domain] >= level: if debug_level[domain] >= level:
bb.event.fire(MsgDebug(msg, None))
print 'DEBUG: ' + msg print 'DEBUG: ' + msg
def note(level, domain, msg, fn = None): def note(level, domain, msg, fn = None):
@ -91,17 +115,22 @@ def fatal(domain, msg, fn = None):
# #
def std_debug(lvl, msg): def std_debug(lvl, msg):
if debug_level['default'] >= lvl: if debug_level['default'] >= lvl:
bb.event.fire(MsgDebug(msg, None))
print 'DEBUG: ' + msg print 'DEBUG: ' + msg
def std_note(msg): def std_note(msg):
bb.event.fire(MsgNote(msg, None))
print 'NOTE: ' + msg print 'NOTE: ' + msg
def std_warn(msg): def std_warn(msg):
bb.event.fire(MsgWarn(msg, None))
print 'WARNING: ' + msg print 'WARNING: ' + msg
def std_error(msg): def std_error(msg):
bb.event.fire(MsgError(msg, None))
print 'ERROR: ' + msg print 'ERROR: ' + msg
def std_fatal(msg): def std_fatal(msg):
bb.event.fire(MsgFatal(msg, None))
print 'ERROR: ' + msg print 'ERROR: ' + msg
sys.exit(1) sys.exit(1)

View File

@ -161,6 +161,12 @@ def handle(fn, data, include = 0):
return data return data
def feeder(lineno, s, fn, data): def feeder(lineno, s, fn, data):
def getFunc(groupd, key, data):
if 'flag' in groupd and groupd['flag'] != None:
return bb.data.getVarFlag(key, groupd['flag'], data)
else:
return bb.data.getVar(key, data)
m = __config_regexp__.match(s) m = __config_regexp__.match(s)
if m: if m:
groupd = m.groupdict() groupd = m.groupdict()
@ -168,19 +174,19 @@ def feeder(lineno, s, fn, data):
if "exp" in groupd and groupd["exp"] != None: if "exp" in groupd and groupd["exp"] != None:
bb.data.setVarFlag(key, "export", 1, data) bb.data.setVarFlag(key, "export", 1, data)
if "ques" in groupd and groupd["ques"] != None: if "ques" in groupd and groupd["ques"] != None:
val = bb.data.getVar(key, data) val = getFunc(groupd, key, data)
if val == None: if val == None:
val = groupd["value"] val = groupd["value"]
elif "colon" in groupd and groupd["colon"] != None: elif "colon" in groupd and groupd["colon"] != None:
val = bb.data.expand(groupd["value"], data) val = bb.data.expand(groupd["value"], data)
elif "append" in groupd and groupd["append"] != None: elif "append" in groupd and groupd["append"] != None:
val = "%s %s" % ((bb.data.getVar(key, data) or ""), groupd["value"]) val = "%s %s" % ((getFunc(groupd, key, data) or ""), groupd["value"])
elif "prepend" in groupd and groupd["prepend"] != None: elif "prepend" in groupd and groupd["prepend"] != None:
val = "%s %s" % (groupd["value"], (bb.data.getVar(key, data) or "")) val = "%s %s" % (groupd["value"], (getFunc(groupd, key, data) or ""))
elif "postdot" in groupd and groupd["postdot"] != None: elif "postdot" in groupd and groupd["postdot"] != None:
val = "%s%s" % ((bb.data.getVar(key, data) or ""), groupd["value"]) val = "%s%s" % ((getFunc(groupd, key, data) or ""), groupd["value"])
elif "predot" in groupd and groupd["predot"] != None: elif "predot" in groupd and groupd["predot"] != None:
val = "%s%s" % (groupd["value"], (bb.data.getVar(key, data) or "")) val = "%s%s" % (groupd["value"], (getFunc(groupd, key, data) or ""))
else: else:
val = groupd["value"] val = groupd["value"]
if 'flag' in groupd and groupd['flag'] != None: if 'flag' in groupd and groupd['flag'] != None:

View File

@ -61,19 +61,27 @@ def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
preferred_v = bb.data.getVar('PREFERRED_VERSION_%s' % pn, localdata, True) preferred_v = bb.data.getVar('PREFERRED_VERSION_%s' % pn, localdata, True)
if preferred_v: if preferred_v:
m = re.match('(.*)_(.*)', preferred_v) m = re.match('(\d+:)*(.*)(_.*)*', preferred_v)
if m: if m:
preferred_v = m.group(1) if m.group(1):
preferred_r = m.group(2) preferred_e = int(m.group(1)[:-1])
else:
preferred_e = None
preferred_v = m.group(2)
if m.group(3):
preferred_r = m.group(3)[1:]
else:
preferred_r = None
else: else:
preferred_e = None
preferred_r = None preferred_r = None
for file_set in tmp_pn: for file_set in tmp_pn:
for f in file_set: for f in file_set:
pv,pr = dataCache.pkg_pvpr[f] pe,pv,pr = dataCache.pkg_pepvpr[f]
if preferred_v == pv and (preferred_r == pr or preferred_r == None): if preferred_v == pv and (preferred_r == pr or preferred_r == None) and (preferred_e == pe or preferred_e == None):
preferred_file = f preferred_file = f
preferred_ver = (pv, pr) preferred_ver = (pe, pv, pr)
break break
if preferred_file: if preferred_file:
break; break;
@ -81,6 +89,8 @@ def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
pv_str = '%s-%s' % (preferred_v, preferred_r) pv_str = '%s-%s' % (preferred_v, preferred_r)
else: else:
pv_str = preferred_v pv_str = preferred_v
if not (preferred_e is None):
pv_str = '%s:%s' % (preferred_e, pv_str)
itemstr = "" itemstr = ""
if item: if item:
itemstr = " (for item %s)" % item itemstr = " (for item %s)" % item
@ -97,11 +107,11 @@ def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
latest_p = 0 latest_p = 0
latest_f = None latest_f = None
for file_name in files: for file_name in files:
pv,pr = dataCache.pkg_pvpr[file_name] pe,pv,pr = dataCache.pkg_pepvpr[file_name]
dp = dataCache.pkg_dp[file_name] dp = dataCache.pkg_dp[file_name]
if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pv, pr)) < 0)) or (dp > latest_p): if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pe, pv, pr)) < 0)) or (dp > latest_p):
latest = (pv, pr) latest = (pe, pv, pr)
latest_f = file_name latest_f = file_name
latest_p = dp latest_p = dp
if preferred_file is None: if preferred_file is None:
@ -110,10 +120,7 @@ def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
return (latest,latest_f,preferred_ver, preferred_file) return (latest,latest_f,preferred_ver, preferred_file)
# def filterProviders(providers, item, cfgData, dataCache):
# RP - build_cache_fail needs to move elsewhere
#
def filterProviders(providers, item, cfgData, dataCache, build_cache_fail = {}):
""" """
Take a list of providers and filter/reorder according to the Take a list of providers and filter/reorder according to the
environment variables and previous build results environment variables and previous build results
@ -135,12 +142,6 @@ def filterProviders(providers, item, cfgData, dataCache, build_cache_fail = {}):
preferred_versions[pn] = bb.providers.findBestProvider(pn, cfgData, dataCache, pkg_pn, item)[2:4] preferred_versions[pn] = bb.providers.findBestProvider(pn, cfgData, dataCache, pkg_pn, item)[2:4]
eligible.append(preferred_versions[pn][1]) eligible.append(preferred_versions[pn][1])
for p in eligible:
if p in build_cache_fail:
bb.msg.debug(1, bb.msg.domain.Provider, "rejecting already-failed %s" % p)
eligible.remove(p)
if len(eligible) == 0: if len(eligible) == 0:
bb.msg.error(bb.msg.domain.Provider, "no eligible providers for %s" % item) bb.msg.error(bb.msg.domain.Provider, "no eligible providers for %s" % item)
return 0 return 0
@ -162,7 +163,7 @@ def filterProviders(providers, item, cfgData, dataCache, build_cache_fail = {}):
# if so, bump it to the head of the queue # if so, bump it to the head of the queue
for p in providers: for p in providers:
pn = dataCache.pkg_fn[p] pn = dataCache.pkg_fn[p]
pv, pr = dataCache.pkg_pvpr[p] pe, pv, pr = dataCache.pkg_pepvpr[p]
stamp = '%s.do_populate_staging' % dataCache.stamp[p] stamp = '%s.do_populate_staging' % dataCache.stamp[p]
if os.path.exists(stamp): if os.path.exists(stamp):
@ -171,7 +172,11 @@ def filterProviders(providers, item, cfgData, dataCache, build_cache_fail = {}):
# package was made ineligible by already-failed check # package was made ineligible by already-failed check
continue continue
oldver = "%s-%s" % (pv, pr) oldver = "%s-%s" % (pv, pr)
newver = '-'.join(newvers) if pe > 0:
oldver = "%s:%s" % (pe, oldver)
newver = "%s-%s" % (newvers[1], newvers[2])
if newvers[0] > 0:
newver = "%s:%s" % (newvers[0], newver)
if (newver != oldver): if (newver != oldver):
extra_chat = "%s (%s) already staged but upgrading to %s to satisfy %s" % (pn, oldver, newver, item) extra_chat = "%s (%s) already staged but upgrading to %s to satisfy %s" % (pn, oldver, newver, item)
else: else:

View File

@ -25,20 +25,47 @@ Handles preparation and execution of a queue of tasks
from bb import msg, data, fetch, event, mkdirhier, utils from bb import msg, data, fetch, event, mkdirhier, utils
from sets import Set from sets import Set
import bb, os, sys import bb, os, sys
import signal
class TaskFailure(Exception): class TaskFailure(Exception):
"""Exception raised when a task in a runqueue fails""" """Exception raised when a task in a runqueue fails"""
def __init__(self, x): def __init__(self, x):
self.args = x self.args = x
class RunQueueStats:
"""
Holds statistics on the tasks handled by the associated runQueue
"""
def __init__(self):
self.completed = 0
self.skipped = 0
self.failed = 0
def taskFailed(self):
self.failed = self.failed + 1
def taskCompleted(self):
self.completed = self.completed + 1
def taskSkipped(self):
self.skipped = self.skipped + 1
class RunQueue: class RunQueue:
""" """
BitBake Run Queue implementation BitBake Run Queue implementation
""" """
def __init__(self): def __init__(self, cooker, cfgData, dataCache, taskData, targets):
self.reset_runqueue() self.reset_runqueue()
self.cooker = cooker
self.dataCache = dataCache
self.taskData = taskData
self.targets = targets
self.number_tasks = int(bb.data.getVar("BB_NUMBER_THREADS", cfgData) or 1)
def reset_runqueue(self): def reset_runqueue(self):
self.runq_fnid = [] self.runq_fnid = []
self.runq_task = [] self.runq_task = []
self.runq_depends = [] self.runq_depends = []
@ -46,16 +73,15 @@ class RunQueue:
self.runq_weight = [] self.runq_weight = []
self.prio_map = [] self.prio_map = []
def get_user_idstring(self, task, taskData): def get_user_idstring(self, task):
fn = taskData.fn_index[self.runq_fnid[task]] fn = self.taskData.fn_index[self.runq_fnid[task]]
taskname = self.runq_task[task] taskname = self.runq_task[task]
return "%s, %s" % (fn, taskname) return "%s, %s" % (fn, taskname)
def prepare_runqueue(self, cooker, cfgData, dataCache, taskData, targets): def prepare_runqueue(self):
""" """
Turn a set of taskData into a RunQueue and compute data needed Turn a set of taskData into a RunQueue and compute data needed
to optimise the execution order. to optimise the execution order.
targets is list of paired values - a provider name and the task to run
""" """
depends = [] depends = []
@ -63,12 +89,14 @@ class RunQueue:
runq_build = [] runq_build = []
runq_done = [] runq_done = []
taskData = self.taskData
bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing Runqueue") bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing Runqueue")
for task in range(len(taskData.tasks_name)): for task in range(len(taskData.tasks_name)):
fnid = taskData.tasks_fnid[task] fnid = taskData.tasks_fnid[task]
fn = taskData.fn_index[fnid] fn = taskData.fn_index[fnid]
task_deps = dataCache.task_deps[fn] task_deps = self.dataCache.task_deps[fn]
if fnid not in taskData.failed_fnids: if fnid not in taskData.failed_fnids:
@ -94,6 +122,15 @@ class RunQueue:
dep = taskData.fn_index[depdata] dep = taskData.fn_index[depdata]
depends.append(taskData.gettask_id(dep, taskname)) depends.append(taskData.gettask_id(dep, taskname))
idepends = taskData.tasks_idepends[task]
for idepend in idepends:
depid = int(idepend.split(":")[0])
if depid in taskData.build_targets:
depdata = taskData.build_targets[depid][0]
if depdata:
dep = taskData.fn_index[depdata]
depends.append(taskData.gettask_id(dep, idepend.split(":")[1]))
def add_recursive_build(depid): def add_recursive_build(depid):
""" """
Add build depends of depid to depends Add build depends of depid to depends
@ -197,7 +234,7 @@ class RunQueue:
for depend in depends: for depend in depends:
mark_active(depend, depth+1) mark_active(depend, depth+1)
for target in targets: for target in self.targets:
targetid = taskData.getbuild_id(target[0]) targetid = taskData.getbuild_id(target[0])
if targetid not in taskData.build_targets: if targetid not in taskData.build_targets:
@ -209,10 +246,10 @@ class RunQueue:
fnid = taskData.build_targets[targetid][0] fnid = taskData.build_targets[targetid][0]
# Remove stamps for targets if force mode active # Remove stamps for targets if force mode active
if cooker.configuration.force: if self.cooker.configuration.force:
fn = taskData.fn_index[fnid] fn = taskData.fn_index[fnid]
bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (target[1], fn)) bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (target[1], fn))
bb.build.del_stamp(target[1], dataCache, fn) bb.build.del_stamp(target[1], self.dataCache, fn)
if fnid in taskData.failed_fnids: if fnid in taskData.failed_fnids:
continue continue
@ -299,18 +336,18 @@ class RunQueue:
seen.append(taskid) seen.append(taskid)
for revdep in self.runq_revdeps[taskid]: for revdep in self.runq_revdeps[taskid]:
if runq_done[revdep] == 0 and revdep not in seen and not finish: if runq_done[revdep] == 0 and revdep not in seen and not finish:
bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) (depends: %s)" % (revdep, self.get_user_idstring(revdep, taskData), self.runq_depends[revdep])) bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) (depends: %s)" % (revdep, self.get_user_idstring(revdep), self.runq_depends[revdep]))
if revdep in deps_seen: if revdep in deps_seen:
bb.msg.error(bb.msg.domain.RunQueue, "Chain ends at Task %s (%s)" % (revdep, self.get_user_idstring(revdep, taskData))) bb.msg.error(bb.msg.domain.RunQueue, "Chain ends at Task %s (%s)" % (revdep, self.get_user_idstring(revdep)))
finish = True finish = True
return return
for dep in self.runq_depends[revdep]: for dep in self.runq_depends[revdep]:
deps_seen.append(dep) deps_seen.append(dep)
print_chain(revdep, finish) print_chain(revdep, finish)
print_chain(task, False) print_chain(task, False)
bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) not processed!\nThis is probably a circular dependency (the chain might be printed above)." % (task, self.get_user_idstring(task, taskData))) bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) not processed!\nThis is probably a circular dependency (the chain might be printed above)." % (task, self.get_user_idstring(task)))
if runq_weight1[task] != 0: if runq_weight1[task] != 0:
bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) count not zero!" % (task, self.get_user_idstring(task, taskData))) bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) count not zero!" % (task, self.get_user_idstring(task)))
# Make a weight sorted map # Make a weight sorted map
from copy import deepcopy from copy import deepcopy
@ -328,7 +365,7 @@ class RunQueue:
#self.dump_data(taskData) #self.dump_data(taskData)
def execute_runqueue(self, cooker, cfgData, dataCache, taskData, runlist): def execute_runqueue(self):
""" """
Run the tasks in a queue prepared by prepare_runqueue Run the tasks in a queue prepared by prepare_runqueue
Upon failure, optionally try to recover the build using any alternate providers Upon failure, optionally try to recover the build using any alternate providers
@ -337,35 +374,86 @@ class RunQueue:
failures = 0 failures = 0
while 1: while 1:
failed_fnids = self.execute_runqueue_internal(cooker, cfgData, dataCache, taskData) failed_fnids = []
try:
self.execute_runqueue_internal()
finally:
if self.master_process:
failed_fnids = self.finish_runqueue()
if len(failed_fnids) == 0: if len(failed_fnids) == 0:
return failures return failures
if taskData.abort: if self.taskData.abort:
raise bb.runqueue.TaskFailure(failed_fnids) raise bb.runqueue.TaskFailure(failed_fnids)
for fnid in failed_fnids: for fnid in failed_fnids:
#print "Failure: %s %s %s" % (fnid, taskData.fn_index[fnid], self.runq_task[fnid]) #print "Failure: %s %s %s" % (fnid, self.taskData.fn_index[fnid], self.runq_task[fnid])
taskData.fail_fnid(fnid) self.taskData.fail_fnid(fnid)
failures = failures + 1 failures = failures + 1
self.reset_runqueue() self.reset_runqueue()
self.prepare_runqueue(cooker, cfgData, dataCache, taskData, runlist) self.prepare_runqueue()
def execute_runqueue_internal(self, cooker, cfgData, dataCache, taskData): def execute_runqueue_initVars(self):
self.stats = RunQueueStats()
self.active_builds = 0
self.runq_buildable = []
self.runq_running = []
self.runq_complete = []
self.build_pids = {}
self.failed_fnids = []
self.master_process = True
# Mark initial buildable tasks
for task in range(len(self.runq_fnid)):
self.runq_running.append(0)
self.runq_complete.append(0)
if len(self.runq_depends[task]) == 0:
self.runq_buildable.append(1)
else:
self.runq_buildable.append(0)
def task_complete(self, task):
"""
Mark a task as completed
Look at the reverse dependencies and mark any task with
completed dependencies as buildable
"""
self.runq_complete[task] = 1
for revdep in self.runq_revdeps[task]:
if self.runq_running[revdep] == 1:
continue
if self.runq_buildable[revdep] == 1:
continue
alldeps = 1
for dep in self.runq_depends[revdep]:
if self.runq_complete[dep] != 1:
alldeps = 0
if alldeps == 1:
self.runq_buildable[revdep] = 1
fn = self.taskData.fn_index[self.runq_fnid[revdep]]
taskname = self.runq_task[revdep]
bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname))
def get_next_task(self):
"""
Return the id of the highest priority task that is buildable
"""
for task1 in range(len(self.runq_fnid)):
task = self.prio_map[task1]
if self.runq_running[task] == 1:
continue
if self.runq_buildable[task] == 1:
return task
return None
def execute_runqueue_internal(self):
""" """
Run the tasks in a queue prepared by prepare_runqueue Run the tasks in a queue prepared by prepare_runqueue
""" """
import signal
bb.msg.note(1, bb.msg.domain.RunQueue, "Executing runqueue") bb.msg.note(1, bb.msg.domain.RunQueue, "Executing runqueue")
active_builds = 0 self.execute_runqueue_initVars()
tasks_completed = 0
tasks_skipped = 0
runq_buildable = []
runq_running = []
runq_complete = []
build_pids = {}
failed_fnids = []
if len(self.runq_fnid) == 0: if len(self.runq_fnid) == 0:
# nothing to do # nothing to do
@ -374,144 +462,103 @@ class RunQueue:
def sigint_handler(signum, frame): def sigint_handler(signum, frame):
raise KeyboardInterrupt raise KeyboardInterrupt
def get_next_task(data): while True:
""" task = self.get_next_task()
Return the id of the highest priority task that is buildable if task is not None:
""" fn = self.taskData.fn_index[self.runq_fnid[task]]
for task1 in range(len(data.runq_fnid)):
task = data.prio_map[task1] taskname = self.runq_task[task]
if runq_running[task] == 1: if bb.build.stamp_is_current(taskname, self.dataCache, fn):
bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
self.runq_running[task] = 1
self.task_complete(task)
self.stats.taskCompleted()
self.stats.taskSkipped()
continue continue
if runq_buildable[task] == 1:
return task
return None
def task_complete(data, task): bb.msg.note(1, bb.msg.domain.RunQueue, "Running task %d of %d (ID: %s, %s)" % (self.stats.completed + self.active_builds + 1, len(self.runq_fnid), task, self.get_user_idstring(task)))
""" try:
Mark a task as completed pid = os.fork()
Look at the reverse dependencies and mark any task with except OSError, e:
completed dependencies as buildable bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror))
""" if pid == 0:
runq_complete[task] = 1 # Bypass master process' handling
for revdep in data.runq_revdeps[task]: self.master_process = False
if runq_running[revdep] == 1: # Stop Ctrl+C being sent to children
continue # signal.signal(signal.SIGINT, signal.SIG_IGN)
if runq_buildable[revdep] == 1: # Make the child the process group leader
continue os.setpgid(0, 0)
alldeps = 1 sys.stdin = open('/dev/null', 'r')
for dep in data.runq_depends[revdep]: self.cooker.configuration.cmd = taskname[3:]
if runq_complete[dep] != 1:
alldeps = 0
if alldeps == 1:
runq_buildable[revdep] = 1
fn = taskData.fn_index[self.runq_fnid[revdep]]
taskname = self.runq_task[revdep]
bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname))
# Mark initial buildable tasks
for task in range(len(self.runq_fnid)):
runq_running.append(0)
runq_complete.append(0)
if len(self.runq_depends[task]) == 0:
runq_buildable.append(1)
else:
runq_buildable.append(0)
number_tasks = int(bb.data.getVar("BB_NUMBER_THREADS", cfgData) or 1)
try:
while 1:
task = get_next_task(self)
if task is not None:
fn = taskData.fn_index[self.runq_fnid[task]]
taskname = self.runq_task[task]
if bb.build.stamp_is_current(taskname, dataCache, fn):
bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task, taskData)))
runq_running[task] = 1
task_complete(self, task)
tasks_completed = tasks_completed + 1
tasks_skipped = tasks_skipped + 1
continue
bb.msg.note(1, bb.msg.domain.RunQueue, "Running task %d of %d (ID: %s, %s)" % (tasks_completed + active_builds + 1, len(self.runq_fnid), task, self.get_user_idstring(task, taskData)))
try: try:
pid = os.fork() self.cooker.tryBuild(fn, False)
except OSError, e: except bb.build.EventException:
bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror)) bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
if pid == 0: sys.exit(1)
# Bypass finally below except:
active_builds = 0 bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
# Stop Ctrl+C being sent to children raise
# signal.signal(signal.SIGINT, signal.SIG_IGN) sys.exit(0)
# Make the child the process group leader self.build_pids[pid] = task
os.setpgid(0, 0) self.runq_running[task] = 1
sys.stdin = open('/dev/null', 'r') self.active_builds = self.active_builds + 1
cooker.configuration.cmd = taskname[3:] if self.active_builds < self.number_tasks:
try:
cooker.tryBuild(fn, False)
except bb.build.EventException:
bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
sys.exit(1)
except:
bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
raise
sys.exit(0)
build_pids[pid] = task
runq_running[task] = 1
active_builds = active_builds + 1
if active_builds < number_tasks:
continue
if active_builds > 0:
result = os.waitpid(-1, 0)
active_builds = active_builds - 1
task = build_pids[result[0]]
if result[1] != 0:
del build_pids[result[0]]
bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task, taskData)))
failed_fnids.append(self.runq_fnid[task])
break
task_complete(self, task)
tasks_completed = tasks_completed + 1
del build_pids[result[0]]
continue continue
break if self.active_builds > 0:
finally: result = os.waitpid(-1, 0)
try: self.active_builds = self.active_builds - 1
while active_builds > 0: task = self.build_pids[result[0]]
bb.msg.note(1, bb.msg.domain.RunQueue, "Waiting for %s active tasks to finish" % active_builds) if result[1] != 0:
tasknum = 1 del self.build_pids[result[0]]
for k, v in build_pids.iteritems(): bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task)))
bb.msg.note(1, bb.msg.domain.RunQueue, "%s: %s (%s)" % (tasknum, self.get_user_idstring(v, taskData), k)) self.failed_fnids.append(self.runq_fnid[task])
tasknum = tasknum + 1 self.stats.taskFailed()
result = os.waitpid(-1, 0) break
task = build_pids[result[0]] self.task_complete(task)
if result[1] != 0: self.stats.taskCompleted()
bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task, taskData))) del self.build_pids[result[0]]
failed_fnids.append(self.runq_fnid[task]) continue
del build_pids[result[0]] return
active_builds = active_builds - 1
if len(failed_fnids) > 0: def finish_runqueue(self):
return failed_fnids try:
except: while self.active_builds > 0:
bb.msg.note(1, bb.msg.domain.RunQueue, "Sending SIGINT to remaining %s tasks" % active_builds) bb.msg.note(1, bb.msg.domain.RunQueue, "Waiting for %s active tasks to finish" % self.active_builds)
for k, v in build_pids.iteritems(): tasknum = 1
for k, v in self.build_pids.iteritems():
bb.msg.note(1, bb.msg.domain.RunQueue, "%s: %s (%s)" % (tasknum, self.get_user_idstring(v), k))
tasknum = tasknum + 1
result = os.waitpid(-1, 0)
task = self.build_pids[result[0]]
if result[1] != 0:
bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task)))
self.failed_fnids.append(self.runq_fnid[task])
self.stats.taskFailed()
del self.build_pids[result[0]]
self.active_builds = self.active_builds - 1
if len(self.failed_fnids) > 0:
return self.failed_fnids
except KeyboardInterrupt:
bb.msg.note(1, bb.msg.domain.RunQueue, "Sending SIGINT to remaining %s tasks" % self.active_builds)
for k, v in self.build_pids.iteritems():
try:
os.kill(-k, signal.SIGINT) os.kill(-k, signal.SIGINT)
raise except:
pass
raise
# Sanity Checks # Sanity Checks
for task in range(len(self.runq_fnid)): for task in range(len(self.runq_fnid)):
if runq_buildable[task] == 0: if self.runq_buildable[task] == 0:
bb.msg.error(bb.msg.domain.RunQueue, "Task %s never buildable!" % task) bb.msg.error(bb.msg.domain.RunQueue, "Task %s never buildable!" % task)
if runq_running[task] == 0: if self.runq_running[task] == 0:
bb.msg.error(bb.msg.domain.RunQueue, "Task %s never ran!" % task) bb.msg.error(bb.msg.domain.RunQueue, "Task %s never ran!" % task)
if runq_complete[task] == 0: if self.runq_complete[task] == 0:
bb.msg.error(bb.msg.domain.RunQueue, "Task %s never completed!" % task) bb.msg.error(bb.msg.domain.RunQueue, "Task %s never completed!" % task)
bb.msg.note(1, bb.msg.domain.RunQueue, "Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed." % (tasks_completed, tasks_skipped, len(failed_fnids))) bb.msg.note(1, bb.msg.domain.RunQueue, "Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed." % (self.stats.completed, self.stats.skipped, self.stats.failed))
return failed_fnids return self.failed_fnids
def dump_data(self, taskQueue): def dump_data(self, taskQueue):
""" """

View File

@ -104,10 +104,11 @@ class BitBakeShellCommands:
def _findProvider( self, item ): def _findProvider( self, item ):
self._checkParsed() self._checkParsed()
# Need to use taskData for this information
preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 ) preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 )
if not preferred: preferred = item if not preferred: preferred = item
try: try:
lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status, cooker.build_cache_fail) lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status)
except KeyError: except KeyError:
if item in cooker.status.providers: if item in cooker.status.providers:
pf = cooker.status.providers[item][0] pf = cooker.status.providers[item][0]
@ -144,6 +145,7 @@ class BitBakeShellCommands:
def build( self, params, cmd = "build" ): def build( self, params, cmd = "build" ):
"""Build a providee""" """Build a providee"""
global last_exception
globexpr = params[0] globexpr = params[0]
self._checkParsed() self._checkParsed()
names = globfilter( cooker.status.pkg_pn.keys(), globexpr ) names = globfilter( cooker.status.pkg_pn.keys(), globexpr )
@ -152,8 +154,6 @@ class BitBakeShellCommands:
oldcmd = cooker.configuration.cmd oldcmd = cooker.configuration.cmd
cooker.configuration.cmd = cmd cooker.configuration.cmd = cmd
cooker.build_cache = []
cooker.build_cache_fail = []
td = taskdata.TaskData(cooker.configuration.abort) td = taskdata.TaskData(cooker.configuration.abort)
@ -170,24 +170,21 @@ class BitBakeShellCommands:
td.add_unresolved(cooker.configuration.data, cooker.status) td.add_unresolved(cooker.configuration.data, cooker.status)
rq = runqueue.RunQueue() rq = runqueue.RunQueue(cooker, cooker.configuration.data, cooker.status, td, tasks)
rq.prepare_runqueue(cooker, cooker.configuration.data, cooker.status, td, tasks) rq.prepare_runqueue()
rq.execute_runqueue(cooker, cooker.configuration.data, cooker.status, td, tasks) rq.execute_runqueue()
except Providers.NoProvider: except Providers.NoProvider:
print "ERROR: No Provider" print "ERROR: No Provider"
global last_exception
last_exception = Providers.NoProvider last_exception = Providers.NoProvider
except runqueue.TaskFailure, fnids: except runqueue.TaskFailure, fnids:
for fnid in fnids: for fnid in fnids:
print "ERROR: '%s' failed" % td.fn_index[fnid] print "ERROR: '%s' failed" % td.fn_index[fnid]
global last_exception
last_exception = runqueue.TaskFailure last_exception = runqueue.TaskFailure
except build.EventException, e: except build.EventException, e:
print "ERROR: Couldn't build '%s'" % names print "ERROR: Couldn't build '%s'" % names
global last_exception
last_exception = e last_exception = e
cooker.configuration.cmd = oldcmd cooker.configuration.cmd = oldcmd
@ -236,14 +233,13 @@ class BitBakeShellCommands:
def fileBuild( self, params, cmd = "build" ): def fileBuild( self, params, cmd = "build" ):
"""Parse and build a .bb file""" """Parse and build a .bb file"""
global last_exception
name = params[0] name = params[0]
bf = completeFilePath( name ) bf = completeFilePath( name )
print "SHELL: Calling '%s' on '%s'" % ( cmd, bf ) print "SHELL: Calling '%s' on '%s'" % ( cmd, bf )
oldcmd = cooker.configuration.cmd oldcmd = cooker.configuration.cmd
cooker.configuration.cmd = cmd cooker.configuration.cmd = cmd
cooker.build_cache = []
cooker.build_cache_fail = []
thisdata = copy.deepcopy( initdata ) thisdata = copy.deepcopy( initdata )
# Caution: parse.handle modifies thisdata, hence it would # Caution: parse.handle modifies thisdata, hence it would
@ -266,7 +262,6 @@ class BitBakeShellCommands:
cooker.tryBuildPackage( os.path.abspath( bf ), item, cmd, bbfile_data, True ) cooker.tryBuildPackage( os.path.abspath( bf ), item, cmd, bbfile_data, True )
except build.EventException, e: except build.EventException, e:
print "ERROR: Couldn't build '%s'" % name print "ERROR: Couldn't build '%s'" % name
global last_exception
last_exception = e last_exception = e
cooker.configuration.cmd = oldcmd cooker.configuration.cmd = oldcmd
@ -537,8 +532,6 @@ SRC_URI = ""
def status( self, params ): def status( self, params ):
"""<just for testing>""" """<just for testing>"""
print "-" * 78 print "-" * 78
print "build cache = '%s'" % cooker.build_cache
print "build cache fail = '%s'" % cooker.build_cache_fail
print "building list = '%s'" % cooker.building_list print "building list = '%s'" % cooker.building_list
print "build path = '%s'" % cooker.build_path print "build path = '%s'" % cooker.build_path
print "consider_msgs_cache = '%s'" % cooker.consider_msgs_cache print "consider_msgs_cache = '%s'" % cooker.consider_msgs_cache
@ -557,6 +550,7 @@ SRC_URI = ""
def which( self, params ): def which( self, params ):
"""Computes the providers for a given providee""" """Computes the providers for a given providee"""
# Need to use taskData for this information
item = params[0] item = params[0]
self._checkParsed() self._checkParsed()
@ -565,8 +559,7 @@ SRC_URI = ""
if not preferred: preferred = item if not preferred: preferred = item
try: try:
lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status, lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status)
cooker.build_cache_fail)
except KeyError: except KeyError:
lv, lf, pv, pf = (None,)*4 lv, lf, pv, pf = (None,)*4

View File

@ -43,6 +43,7 @@ class TaskData:
self.tasks_fnid = [] self.tasks_fnid = []
self.tasks_name = [] self.tasks_name = []
self.tasks_tdepends = [] self.tasks_tdepends = []
self.tasks_idepends = []
# Cache to speed up task ID lookups # Cache to speed up task ID lookups
self.tasks_lookup = {} self.tasks_lookup = {}
@ -108,6 +109,7 @@ class TaskData:
self.tasks_name.append(task) self.tasks_name.append(task)
self.tasks_fnid.append(fnid) self.tasks_fnid.append(fnid)
self.tasks_tdepends.append([]) self.tasks_tdepends.append([])
self.tasks_idepends.append([])
listid = len(self.tasks_name) - 1 listid = len(self.tasks_name) - 1
@ -134,8 +136,9 @@ class TaskData:
if fnid in self.tasks_fnid: if fnid in self.tasks_fnid:
return return
# Work out task dependencies
for task in task_graph.allnodes(): for task in task_graph.allnodes():
# Work out task dependencies
parentids = [] parentids = []
for dep in task_graph.getparents(task): for dep in task_graph.getparents(task):
parentid = self.gettask_id(fn, dep) parentid = self.gettask_id(fn, dep)
@ -143,6 +146,14 @@ class TaskData:
taskid = self.gettask_id(fn, task) taskid = self.gettask_id(fn, task)
self.tasks_tdepends[taskid].extend(parentids) self.tasks_tdepends[taskid].extend(parentids)
# Touch all intertask dependencies
if 'depends' in task_deps and task in task_deps['depends']:
ids = []
for dep in task_deps['depends'][task].split(" "):
if dep:
ids.append(str(self.getbuild_id(dep.split(":")[0])) + ":" + dep.split(":")[1])
self.tasks_idepends[taskid].extend(ids)
# Work out build dependencies # Work out build dependencies
if not fnid in self.depids: if not fnid in self.depids:
dependids = {} dependids = {}

View File

@ -62,10 +62,12 @@ def vercmp_part(a, b):
return -1 return -1
def vercmp(ta, tb): def vercmp(ta, tb):
(va, ra) = ta (ea, va, ra) = ta
(vb, rb) = tb (eb, vb, rb) = tb
r = vercmp_part(va, vb) r = int(ea)-int(eb)
if (r == 0):
r = vercmp_part(va, vb)
if (r == 0): if (r == 0):
r = vercmp_part(ra, rb) r = vercmp_part(ra, rb)
return r return r