bitbake: Update to 1.8.1 (inc. various bug fixes, epoch support)

git-svn-id: https://svn.o-hand.com/repos/poky/trunk@1419 311d38ba-8fff-0310-9ca6-ca027cbcb966
This commit is contained in:
Richard Purdie 2007-04-01 15:04:49 +00:00
parent 8b36dc2174
commit 7371e6323c
18 changed files with 542 additions and 548 deletions

View File

@ -1,10 +1,52 @@
Changes in BitBake 1.7.3:
Changes in Bitbake 1.8.2:
- Catch truncated cache file errors
- Add PE (Package Epoch) support from Philipp Zabel (pH5)
- Add code to handle inter-task dependencies
- Allow operations other than assignment on flag variables
Changes in BitBake 1.7.1:
- Major updates of the dependency handling and execution
of tasks
- Change of the SVN Fetcher to keep the checkout around
courtsey to Paul Sokolovsky (#1367)
Changes in Bitbake 1.8.0:
- Release 1.7.x as a stable series
Changes in BitBake 1.7.x:
- Major updates of the dependency handling and execution
of tasks. Code from bin/bitbake replaced with runqueue.py
and taskdata.py
- New task execution code supports multithreading with a simplistic
threading algorithm controlled by BB_NUMBER_THREADS
- Change of the SVN Fetcher to keep the checkout around
courtsey of Paul Sokolovsky (#1367)
- PATH fix to bbimage (#1108)
- Allow debug domains to be specified on the commandline (-l)
- Allow 'interactive' tasks
- Logging message improvements
- Drop now uneeded BUILD_ALL_DEPS variable
- Add support for wildcards to -b option
- Major overhaul of the fetchers making a large amount of code common
including mirroring code
- Fetchers now touch md5 stamps upon access (to show activity)
- Fix -f force option when used without -b (long standing bug)
- Add expand_cache to data_cache.py, caching expanded data (speedup)
- Allow version field in DEPENDS (ignored for now)
- Add abort flag support to the shell
- Make inherit fail if the class doesn't exist (#1478)
- Fix data.emit_env() to expand keynames as well as values
- Add ssh fetcher
- Add perforce fetcher
- Make PREFERRED_PROVIDER_foobar defaults to foobar if available
- Share the parser's mtime_cache, reducing the number of stat syscalls
- Compile all anonfuncs at once!
*** Anonfuncs must now use common spacing format ***
- Memorise the list of handlers in __BBHANDLERS and tasks in __BBTASKS
This removes 2 million function calls resulting in a 5-10% speedup
- Add manpage
- Update generateDotGraph to use taskData/runQueue improving accuracy
and also adding a task dependency graph
- Fix/standardise on GPLv2 licence
- Move most functionality from bin/bitbake to cooker.py and split into
separate funcitons
- CVS fetcher: Added support for non-default port
- Add BBINCLUDELOGS_LINES, the number of lines to read from any logfile
- Drop shebangs from lib/bb scripts
Changes in Bitbake 1.6.0:
- Better msg handling

View File

@ -1,45 +1,49 @@
AUTHORS
COPYING
ChangeLog
MANIFEST
setup.py
bin/bitdoc
bin/bbimage
bin/bitbake
lib/bb/COW.py
lib/bb/__init__.py
lib/bb/build.py
lib/bb/cache.py
lib/bb/cooker.py
lib/bb/COW.py
lib/bb/data.py
lib/bb/data_smart.py
lib/bb/event.py
lib/bb/manifest.py
lib/bb/methodpool.py
lib/bb/msg.py
lib/bb/providers.py
lib/bb/runqueue.py
lib/bb/shell.py
lib/bb/taskdata.py
lib/bb/utils.py
lib/bb/fetch/__init__.py
lib/bb/fetch/cvs.py
lib/bb/fetch/git.py
lib/bb/fetch/__init__.py
lib/bb/fetch/local.py
lib/bb/fetch/perforce.py
lib/bb/fetch/ssh.py
lib/bb/fetch/svk.py
lib/bb/fetch/svn.py
lib/bb/fetch/wget.py
lib/bb/manifest.py
lib/bb/methodpool.py
lib/bb/msg.py
lib/bb/parse/__init__.py
lib/bb/parse/parse_py/__init__.py
lib/bb/parse/parse_py/BBHandler.py
lib/bb/parse/parse_py/ConfHandler.py
lib/bb/parse/parse_py/__init__.py
lib/bb/providers.py
lib/bb/runqueue.py
lib/bb/shell.py
lib/bb/taskdata.py
lib/bb/utils.py
setup.py
doc/COPYING.GPL
doc/COPYING.MIT
doc/bitbake.1
doc/manual/html.css
doc/manual/Makefile
doc/manual/usermanual.xml
contrib/bbdev.sh
contrib/vim/syntax/bitbake.vim
contrib/vim/ftdetect/bitbake.vim
conf/bitbake.conf
classes/base.bbclass

View File

@ -27,7 +27,7 @@ sys.path.insert(0,os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'l
import bb
from bb import cooker
__version__ = "1.7.4"
__version__ = "1.8.1"
#============================================================================#
# BBOptions
@ -109,15 +109,9 @@ Default BBFILES are the .bb files in the current directory.""" )
configuration.pkgs_to_build = []
configuration.pkgs_to_build.extend(args[1:])
bb.cooker.BBCooker().cook(configuration)
cooker = bb.cooker.BBCooker(configuration)
cooker.cook()
if __name__ == "__main__":
main()
sys.exit(0)
import profile
profile.run('main()', "profile.log")
import pstats
p = pstats.Stats('profile.log')
p.sort_stats('time')
p.print_stats()
p.print_callers()

View File

@ -1,79 +0,0 @@
# Copyright (C) 2003 Chris Larson
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
die() {
bbfatal "$*"
}
bbnote() {
echo "NOTE:" "$*"
}
bbwarn() {
echo "WARNING:" "$*"
}
bbfatal() {
echo "FATAL:" "$*"
exit 1
}
bbdebug() {
test $# -ge 2 || {
echo "Usage: bbdebug level \"message\""
exit 1
}
test ${@bb.msg.debug_level} -ge $1 && {
shift
echo "DEBUG:" $*
}
}
addtask showdata
do_showdata[nostamp] = "1"
python do_showdata() {
import sys
# emit variables and shell functions
bb.data.emit_env(sys.__stdout__, d, True)
# emit the metadata which isnt valid shell
for e in bb.data.keys(d):
if bb.data.getVarFlag(e, 'python', d):
sys.__stdout__.write("\npython %s () {\n%s}\n" % (e, bb.data.getVar(e, d, 1)))
}
addtask listtasks
do_listtasks[nostamp] = "1"
python do_listtasks() {
import sys
for e in bb.data.keys(d):
if bb.data.getVarFlag(e, 'task', d):
sys.__stdout__.write("%s\n" % e)
}
addtask build
do_build[dirs] = "${TOPDIR}"
do_build[nostamp] = "1"
python base_do_build () {
bb.note("The included, default BB base.bbclass does not define a useful default task.")
bb.note("Try running the 'listtasks' task against a .bb to see what tasks are defined.")
}
EXPORT_FUNCTIONS do_clean do_mrproper do_build

View File

@ -1,58 +0,0 @@
# Copyright (C) 2003 Chris Larson
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
B = "${S}"
CVSDIR = "${DL_DIR}/cvs"
DEPENDS = ""
DEPLOY_DIR = "${TMPDIR}/deploy"
DEPLOY_DIR_IMAGE = "${DEPLOY_DIR}/images"
DL_DIR = "${TMPDIR}/downloads"
FETCHCOMMAND = ""
FETCHCOMMAND_cvs = "/usr/bin/env cvs -d${CVSROOT} co ${CVSCOOPTS} ${CVSMODULE}"
FETCHCOMMAND_svn = "/usr/bin/env svn co ${SVNCOOPTS} ${SVNROOT} ${SVNMODULE}"
FETCHCOMMAND_wget = "/usr/bin/env wget -t 5 --passive-ftp -P ${DL_DIR} ${URI}"
FILESDIR = "${@bb.which(bb.data.getVar('FILESPATH', d, 1), '.')}"
FILESPATH = "${FILE_DIRNAME}/${PF}:${FILE_DIRNAME}/${P}:${FILE_DIRNAME}/${PN}:${FILE_DIRNAME}/files:${FILE_DIRNAME}"
FILE_DIRNAME = "${@os.path.dirname(bb.data.getVar('FILE', d))}"
GITDIR = "${DL_DIR}/git"
IMAGE_CMD = "_NO_DEFINED_IMAGE_TYPES_"
IMAGE_ROOTFS = "${TMPDIR}/rootfs"
MKTEMPCMD = "mktemp -q ${TMPBASE}"
MKTEMPDIRCMD = "mktemp -d -q ${TMPBASE}"
OVERRIDES = "local:${MACHINE}:${TARGET_OS}:${TARGET_ARCH}"
P = "${PN}-${PV}"
PF = "${PN}-${PV}-${PR}"
PN = "${@bb.parse.BBHandler.vars_from_file(bb.data.getVar('FILE',d),d)[0] or 'defaultpkgname'}"
PR = "${@bb.parse.BBHandler.vars_from_file(bb.data.getVar('FILE',d),d)[2] or 'r0'}"
PROVIDES = ""
PV = "${@bb.parse.BBHandler.vars_from_file(bb.data.getVar('FILE',d),d)[1] or '1.0'}"
RESUMECOMMAND = ""
RESUMECOMMAND_wget = "/usr/bin/env wget -c -t 5 --passive-ftp -P ${DL_DIR} ${URI}"
S = "${WORKDIR}/${P}"
SRC_URI = "file://${FILE}"
STAMP = "${TMPDIR}/stamps/${PF}"
SVNDIR = "${DL_DIR}/svn"
T = "${WORKDIR}/temp"
TARGET_ARCH = "${BUILD_ARCH}"
TMPDIR = "${TOPDIR}/tmp"
UPDATECOMMAND = ""
UPDATECOMMAND_cvs = "/usr/bin/env cvs -d${CVSROOT} update ${CVSCOOPTS}"
UPDATECOMMAND_svn = "/usr/bin/env svn update ${SVNCOOPTS}"
WORKDIR = "${TMPDIR}/work/${PF}"

View File

@ -21,7 +21,7 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
__version__ = "1.7.4"
__version__ = "1.8.1"
__all__ = [

View File

@ -188,7 +188,8 @@ def exec_func_shell(func, d):
maybe_fakeroot = "PATH=\"%s\" fakeroot " % bb.data.getVar("PATH", d, 1)
else:
maybe_fakeroot = ''
ret = os.system('%ssh -e %s' % (maybe_fakeroot, runfile))
lang_environment = "LC_ALL=C "
ret = os.system('%s%ssh -e %s' % (lang_environment, maybe_fakeroot, runfile))
try:
os.chdir(prevdir)
except:

View File

@ -39,7 +39,7 @@ except ImportError:
import pickle
bb.msg.note(1, bb.msg.domain.Cache, "Importing cPickle failed. Falling back to a very slow implementation.")
__cache_version__ = "125"
__cache_version__ = "126"
class Cache:
"""
@ -75,6 +75,9 @@ class Cache:
raise ValueError, 'Cache Version Mismatch'
if version_data['BITBAKE_VER'] != bb.__version__:
raise ValueError, 'Bitbake Version Mismatch'
except EOFError:
bb.msg.note(1, bb.msg.domain.Cache, "Truncated cache found, rebuilding...")
self.depends_cache = {}
except (ValueError, KeyError):
bb.msg.note(1, bb.msg.domain.Cache, "Invalid cache found, rebuilding...")
self.depends_cache = {}
@ -251,6 +254,7 @@ class Cache:
"""
pn = self.getVar('PN', file_name, True)
pe = self.getVar('PE', file_name, True) or "0"
pv = self.getVar('PV', file_name, True)
pr = self.getVar('PR', file_name, True)
dp = int(self.getVar('DEFAULT_PREFERENCE', file_name, True) or "0")
@ -272,7 +276,7 @@ class Cache:
# build FileName to PackageName lookup table
cacheData.pkg_fn[file_name] = pn
cacheData.pkg_pvpr[file_name] = (pv,pr)
cacheData.pkg_pepvpr[file_name] = (pe,pv,pr)
cacheData.pkg_dp[file_name] = dp
# Build forward and reverse provider hashes
@ -407,7 +411,7 @@ class CacheData:
self.possible_world = []
self.pkg_pn = {}
self.pkg_fn = {}
self.pkg_pvpr = {}
self.pkg_pepvpr = {}
self.pkg_dp = {}
self.pn_provides = {}
self.all_depends = Set()

View File

@ -30,29 +30,6 @@ import itertools
parsespin = itertools.cycle( r'|/-\\' )
#============================================================================#
# BBStatistics
#============================================================================#
class BBStatistics:
"""
Manage build statistics for one run
"""
def __init__(self ):
self.attempt = 0
self.success = 0
self.fail = 0
self.deps = 0
def show( self ):
print "Build statistics:"
print " Attempted builds: %d" % self.attempt
if self.fail:
print " Failed builds: %d" % self.fail
if self.deps:
print " Dependencies not satisfied: %d" % self.deps
if self.fail or self.deps: return 1
else: return 0
#============================================================================#
# BBCooker
#============================================================================#
@ -61,43 +38,61 @@ class BBCooker:
Manages one bitbake build run
"""
Statistics = BBStatistics # make it visible from the shell
def __init__( self ):
self.build_cache_fail = []
self.build_cache = []
self.stats = BBStatistics()
def __init__(self, configuration):
self.status = None
self.cache = None
self.bb_cache = None
self.configuration = configuration
if self.configuration.verbose:
bb.msg.set_verbose(True)
if self.configuration.debug:
bb.msg.set_debug_level(self.configuration.debug)
else:
bb.msg.set_debug_level(0)
if self.configuration.debug_domains:
bb.msg.set_debug_domains(self.configuration.debug_domains)
self.configuration.data = bb.data.init()
for f in self.configuration.file:
self.parseConfigurationFile( f )
self.parseConfigurationFile( os.path.join( "conf", "bitbake.conf" ) )
if not self.configuration.cmd:
self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data) or "build"
#
# Special updated configuration we use for firing events
#
self.configuration.event_data = bb.data.createCopy(self.configuration.data)
bb.data.update_data(self.configuration.event_data)
def tryBuildPackage(self, fn, item, task, the_data, build_depends):
"""
Build one task of a package, optionally build following task depends
"""
bb.event.fire(bb.event.PkgStarted(item, the_data))
try:
self.stats.attempt += 1
if not build_depends:
bb.data.setVarFlag('do_%s' % task, 'dontrundeps', 1, the_data)
if not self.configuration.dry_run:
bb.build.exec_task('do_%s' % task, the_data)
bb.event.fire(bb.event.PkgSucceeded(item, the_data))
self.build_cache.append(fn)
return True
except bb.build.FuncFailed:
self.stats.fail += 1
bb.msg.error(bb.msg.domain.Build, "task stack execution failed")
bb.event.fire(bb.event.PkgFailed(item, the_data))
self.build_cache_fail.append(fn)
raise
except bb.build.EventException, e:
self.stats.fail += 1
event = e.args[1]
bb.msg.error(bb.msg.domain.Build, "%s event exception, aborting" % bb.event.getName(event))
bb.event.fire(bb.event.PkgFailed(item, the_data))
self.build_cache_fail.append(fn)
raise
def tryBuild( self, fn, build_depends):
@ -112,12 +107,11 @@ class BBCooker:
item = self.status.pkg_fn[fn]
if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data):
self.build_cache.append(fn)
return True
return self.tryBuildPackage(fn, item, self.configuration.cmd, the_data, build_depends)
def showVersions( self ):
def showVersions(self):
pkg_pn = self.status.pkg_pn
preferred_versions = {}
latest_versions = {}
@ -136,11 +130,11 @@ class BBCooker:
latest = latest_versions[p]
if pref != latest:
prefstr = pref[0][0] + "-" + pref[0][1]
prefstr = pref[0][0] + ":" + pref[0][1] + '-' + pref[0][2]
else:
prefstr = ""
print "%-30s %20s %20s" % (p, latest[0][0] + "-" + latest[0][1],
print "%-30s %20s %20s" % (p, latest[0][0] + ":" + latest[0][1] + "-" + latest[0][2],
prefstr)
@ -192,8 +186,8 @@ class BBCooker:
taskdata.add_unresolved(localdata, self.status)
except bb.providers.NoProvider:
sys.exit(1)
rq = bb.runqueue.RunQueue()
rq.prepare_runqueue(self, self.configuration.data, self.status, taskdata, runlist)
rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist)
rq.prepare_runqueue()
seen_fnids = []
depends_file = file('depends.dot', 'w' )
@ -371,92 +365,99 @@ class BBCooker:
except ValueError:
bb.msg.error(bb.msg.domain.Parsing, "invalid value for BBFILE_PRIORITY_%s: \"%s\"" % (c, priority))
def cook(self, configuration):
def buildSetVars(self):
"""
We are building stuff here. We do the building
from here. By default we try to execute task
build.
Setup any variables needed before starting a build
"""
self.configuration = configuration
if self.configuration.verbose:
bb.msg.set_verbose(True)
if self.configuration.debug:
bb.msg.set_debug_level(self.configuration.debug)
else:
bb.msg.set_debug_level(0)
if self.configuration.debug_domains:
bb.msg.set_debug_domains(self.configuration.debug_domains)
self.configuration.data = bb.data.init()
for f in self.configuration.file:
self.parseConfigurationFile( f )
self.parseConfigurationFile( os.path.join( "conf", "bitbake.conf" ) )
if not self.configuration.cmd:
self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data) or "build"
#
# Special updated configuration we use for firing events
#
self.configuration.event_data = bb.data.createCopy(self.configuration.data)
bb.data.update_data(self.configuration.event_data)
if self.configuration.show_environment:
self.showEnvironment()
sys.exit( 0 )
# inject custom variables
if not bb.data.getVar("BUILDNAME", self.configuration.data):
bb.data.setVar("BUILDNAME", os.popen('date +%Y%m%d%H%M').readline().strip(), self.configuration.data)
bb.data.setVar("BUILDSTART", time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime()),self.configuration.data)
def buildFile(self, buildfile):
"""
Build the file matching regexp buildfile
"""
bf = os.path.abspath(buildfile)
try:
os.stat(bf)
except OSError:
(filelist, masked) = self.collect_bbfiles()
regexp = re.compile(buildfile)
matches = []
for f in filelist:
if regexp.search(f) and os.path.isfile(f):
bf = f
matches.append(f)
if len(matches) != 1:
bb.msg.error(bb.msg.domain.Parsing, "Unable to match %s (%s matches found):" % (buildfile, len(matches)))
for f in matches:
bb.msg.error(bb.msg.domain.Parsing, " %s" % f)
sys.exit(1)
bf = matches[0]
bbfile_data = bb.parse.handle(bf, self.configuration.data)
# Remove stamp for target if force mode active
if self.configuration.force:
bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (self.configuration.cmd, bf))
bb.build.del_stamp('do_%s' % self.configuration.cmd, bbfile_data)
item = bb.data.getVar('PN', bbfile_data, 1)
try:
self.tryBuildPackage(bf, item, self.configuration.cmd, bbfile_data, True)
except bb.build.EventException:
bb.msg.error(bb.msg.domain.Build, "Build of '%s' failed" % item )
sys.exit(0)
def buildTargets(self, targets):
"""
Attempt to build the targets specified
"""
buildname = bb.data.getVar("BUILDNAME", self.configuration.data)
bb.event.fire(bb.event.BuildStarted(buildname, targets, self.configuration.event_data))
if self.configuration.interactive:
self.interactiveMode()
localdata = data.createCopy(self.configuration.data)
bb.data.update_data(localdata)
bb.data.expandKeys(localdata)
if self.configuration.buildfile is not None:
bf = os.path.abspath( self.configuration.buildfile )
taskdata = bb.taskdata.TaskData(self.configuration.abort)
runlist = []
try:
for k in targets:
taskdata.add_provider(localdata, self.status, k)
runlist.append([k, "do_%s" % self.configuration.cmd])
taskdata.add_unresolved(localdata, self.status)
except bb.providers.NoProvider:
sys.exit(1)
rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist)
rq.prepare_runqueue()
try:
failures = rq.execute_runqueue()
except runqueue.TaskFailure, fnids:
for fnid in fnids:
bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid])
sys.exit(1)
bb.event.fire(bb.event.BuildCompleted(buildname, targets, self.configuration.event_data, failures))
sys.exit(0)
def updateCache(self):
# Import Psyco if available and not disabled
if not self.configuration.disable_psyco:
try:
os.stat(bf)
except OSError:
(filelist, masked) = self.collect_bbfiles()
regexp = re.compile(self.configuration.buildfile)
matches = []
for f in filelist:
if regexp.search(f) and os.path.isfile(f):
bf = f
matches.append(f)
if len(matches) != 1:
bb.msg.error(bb.msg.domain.Parsing, "Unable to match %s (%s matches found):" % (self.configuration.buildfile, len(matches)))
for f in matches:
bb.msg.error(bb.msg.domain.Parsing, " %s" % f)
sys.exit(1)
bf = matches[0]
import psyco
except ImportError:
bb.msg.note(1, bb.msg.domain.Collection, "Psyco JIT Compiler (http://psyco.sf.net) not available. Install it to increase performance.")
else:
psyco.bind( self.parse_bbfiles )
else:
bb.msg.note(1, bb.msg.domain.Collection, "You have disabled Psyco. This decreases performance.")
bbfile_data = bb.parse.handle(bf, self.configuration.data)
# Remove stamp for target if force mode active
if self.configuration.force:
bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (self.configuration.cmd, bf))
bb.build.del_stamp('do_%s' % self.configuration.cmd, bbfile_data)
item = bb.data.getVar('PN', bbfile_data, 1)
try:
self.tryBuildPackage(bf, item, self.configuration.cmd, bbfile_data, True)
except bb.build.EventException:
bb.msg.error(bb.msg.domain.Build, "Build of '%s' failed" % item )
sys.exit( self.stats.show() )
# initialise the parsing status now we know we will need deps
self.status = bb.cache.CacheData()
ignore = bb.data.getVar("ASSUME_PROVIDED", self.configuration.data, 1) or ""
@ -464,6 +465,39 @@ class BBCooker:
self.handleCollections( bb.data.getVar("BBFILE_COLLECTIONS", self.configuration.data, 1) )
bb.msg.debug(1, bb.msg.domain.Collection, "collecting .bb files")
(filelist, masked) = self.collect_bbfiles()
self.parse_bbfiles(filelist, masked, self.myProgressCallback)
bb.msg.debug(1, bb.msg.domain.Collection, "parsing complete")
self.buildDepgraph()
def cook(self):
"""
We are building stuff here. We do the building
from here. By default we try to execute task
build.
"""
if self.configuration.show_environment:
self.showEnvironment()
sys.exit( 0 )
self.buildSetVars()
if self.configuration.interactive:
self.interactiveMode()
if self.configuration.buildfile is not None:
return self.buildFile(self.configuration.buildfile)
# initialise the parsing status now we know we will need deps
self.updateCache()
if self.configuration.parse_only:
bb.msg.note(1, bb.msg.domain.Collection, "Requested parsing .bb files only. Exiting.")
return 0
pkgs_to_build = self.configuration.pkgs_to_build
bbpkgs = bb.data.getVar('BBPKGS', self.configuration.data, 1)
@ -475,30 +509,7 @@ class BBCooker:
print "for usage information."
sys.exit(0)
# Import Psyco if available and not disabled
if not self.configuration.disable_psyco:
try:
import psyco
except ImportError:
bb.msg.note(1, bb.msg.domain.Collection, "Psyco JIT Compiler (http://psyco.sf.net) not available. Install it to increase performance.")
else:
psyco.bind( self.parse_bbfiles )
else:
bb.msg.note(1, bb.msg.domain.Collection, "You have disabled Psyco. This decreases performance.")
try:
bb.msg.debug(1, bb.msg.domain.Collection, "collecting .bb files")
(filelist, masked) = self.collect_bbfiles()
self.parse_bbfiles(filelist, masked, self.myProgressCallback)
bb.msg.debug(1, bb.msg.domain.Collection, "parsing complete")
print
if self.configuration.parse_only:
bb.msg.note(1, bb.msg.domain.Collection, "Requested parsing .bb files only. Exiting.")
return
self.buildDepgraph()
if self.configuration.show_versions:
self.showVersions()
sys.exit( 0 )
@ -512,34 +523,7 @@ class BBCooker:
self.generateDotGraph( pkgs_to_build, self.configuration.ignored_dot_deps )
sys.exit( 0 )
bb.event.fire(bb.event.BuildStarted(buildname, pkgs_to_build, self.configuration.event_data))
localdata = data.createCopy(self.configuration.data)
bb.data.update_data(localdata)
bb.data.expandKeys(localdata)
taskdata = bb.taskdata.TaskData(self.configuration.abort)
runlist = []
try:
for k in pkgs_to_build:
taskdata.add_provider(localdata, self.status, k)
runlist.append([k, "do_%s" % self.configuration.cmd])
taskdata.add_unresolved(localdata, self.status)
except bb.providers.NoProvider:
sys.exit(1)
rq = bb.runqueue.RunQueue()
rq.prepare_runqueue(self, self.configuration.data, self.status, taskdata, runlist)
try:
failures = rq.execute_runqueue(self, self.configuration.data, self.status, taskdata, runlist)
except runqueue.TaskFailure, fnids:
for fnid in fnids:
bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid])
sys.exit(1)
bb.event.fire(bb.event.BuildCompleted(buildname, pkgs_to_build, self.configuration.event_data, failures))
sys.exit( self.stats.show() )
return self.buildTargets(pkgs_to_build)
except KeyboardInterrupt:
bb.msg.note(1, bb.msg.domain.Collection, "KeyboardInterrupt - Build not completed.")
@ -556,13 +540,17 @@ class BBCooker:
return bbfiles
def find_bbfiles( self, path ):
"""Find all the .bb files in a directory (uses find)"""
findcmd = 'find ' + path + ' -name *.bb | grep -v SCCS/'
try:
finddata = os.popen(findcmd)
except OSError:
return []
return finddata.readlines()
"""Find all the .bb files in a directory"""
from os.path import join
found = []
for dir, dirs, files in os.walk(path):
for ignored in ('SCCS', 'CVS', '.svn'):
if ignored in dirs:
dirs.remove(ignored)
found += [join(dir,f) for f in files if f.endswith('.bb')]
return found
def collect_bbfiles( self ):
"""Collect all available .bb build files"""

View File

@ -23,14 +23,13 @@ BitBake build tools.
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os, re
import bb.data
import bb.utils
class Event:
"""Base class for events"""
type = "Event"
def __init__(self, d = bb.data.init()):
def __init__(self, d):
self._data = d
def getData(self):
@ -129,7 +128,7 @@ def getName(e):
class PkgBase(Event):
"""Base class for package events"""
def __init__(self, t, d = bb.data.init()):
def __init__(self, t, d):
self._pkg = t
Event.__init__(self, d)

View File

@ -91,6 +91,12 @@ class Svn(Fetch):
elif ud.date != "now":
options.append("-r {%s}" % ud.date)
if ud.user:
options.append("--username %s" % ud.user)
if ud.pswd:
options.append("--password %s" % ud.pswd)
localdata = data.createCopy(d)
data.setVar('OVERRIDES', "svn:%s" % data.getVar('OVERRIDES', localdata), localdata)
data.update_data(localdata)

View File

@ -23,7 +23,7 @@ Message handling infrastructure for bitbake
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys, os, re, bb
from bb import utils
from bb import utils, event
debug_level = {}
@ -42,6 +42,29 @@ domain = bb.utils.Enum(
'TaskData',
'Util')
class MsgBase(bb.event.Event):
"""Base class for messages"""
def __init__(self, msg, d ):
self._message = msg
event.Event.__init__(self, d)
class MsgDebug(MsgBase):
"""Debug Message"""
class MsgNote(MsgBase):
"""Note Message"""
class MsgWarn(MsgBase):
"""Warning Message"""
class MsgError(MsgBase):
"""Error Message"""
class MsgFatal(MsgBase):
"""Fatal Message"""
#
# Message control functions
#
@ -71,6 +94,7 @@ def set_debug_domains(domains):
def debug(level, domain, msg, fn = None):
if debug_level[domain] >= level:
bb.event.fire(MsgDebug(msg, None))
print 'DEBUG: ' + msg
def note(level, domain, msg, fn = None):
@ -91,17 +115,22 @@ def fatal(domain, msg, fn = None):
#
def std_debug(lvl, msg):
if debug_level['default'] >= lvl:
bb.event.fire(MsgDebug(msg, None))
print 'DEBUG: ' + msg
def std_note(msg):
bb.event.fire(MsgNote(msg, None))
print 'NOTE: ' + msg
def std_warn(msg):
bb.event.fire(MsgWarn(msg, None))
print 'WARNING: ' + msg
def std_error(msg):
bb.event.fire(MsgError(msg, None))
print 'ERROR: ' + msg
def std_fatal(msg):
bb.event.fire(MsgFatal(msg, None))
print 'ERROR: ' + msg
sys.exit(1)

View File

@ -161,6 +161,12 @@ def handle(fn, data, include = 0):
return data
def feeder(lineno, s, fn, data):
def getFunc(groupd, key, data):
if 'flag' in groupd and groupd['flag'] != None:
return bb.data.getVarFlag(key, groupd['flag'], data)
else:
return bb.data.getVar(key, data)
m = __config_regexp__.match(s)
if m:
groupd = m.groupdict()
@ -168,19 +174,19 @@ def feeder(lineno, s, fn, data):
if "exp" in groupd and groupd["exp"] != None:
bb.data.setVarFlag(key, "export", 1, data)
if "ques" in groupd and groupd["ques"] != None:
val = bb.data.getVar(key, data)
val = getFunc(groupd, key, data)
if val == None:
val = groupd["value"]
elif "colon" in groupd and groupd["colon"] != None:
val = bb.data.expand(groupd["value"], data)
elif "append" in groupd and groupd["append"] != None:
val = "%s %s" % ((bb.data.getVar(key, data) or ""), groupd["value"])
val = "%s %s" % ((getFunc(groupd, key, data) or ""), groupd["value"])
elif "prepend" in groupd and groupd["prepend"] != None:
val = "%s %s" % (groupd["value"], (bb.data.getVar(key, data) or ""))
val = "%s %s" % (groupd["value"], (getFunc(groupd, key, data) or ""))
elif "postdot" in groupd and groupd["postdot"] != None:
val = "%s%s" % ((bb.data.getVar(key, data) or ""), groupd["value"])
val = "%s%s" % ((getFunc(groupd, key, data) or ""), groupd["value"])
elif "predot" in groupd and groupd["predot"] != None:
val = "%s%s" % (groupd["value"], (bb.data.getVar(key, data) or ""))
val = "%s%s" % (groupd["value"], (getFunc(groupd, key, data) or ""))
else:
val = groupd["value"]
if 'flag' in groupd and groupd['flag'] != None:

View File

@ -61,19 +61,27 @@ def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
preferred_v = bb.data.getVar('PREFERRED_VERSION_%s' % pn, localdata, True)
if preferred_v:
m = re.match('(.*)_(.*)', preferred_v)
m = re.match('(\d+:)*(.*)(_.*)*', preferred_v)
if m:
preferred_v = m.group(1)
preferred_r = m.group(2)
if m.group(1):
preferred_e = int(m.group(1)[:-1])
else:
preferred_e = None
preferred_v = m.group(2)
if m.group(3):
preferred_r = m.group(3)[1:]
else:
preferred_r = None
else:
preferred_e = None
preferred_r = None
for file_set in tmp_pn:
for f in file_set:
pv,pr = dataCache.pkg_pvpr[f]
if preferred_v == pv and (preferred_r == pr or preferred_r == None):
pe,pv,pr = dataCache.pkg_pepvpr[f]
if preferred_v == pv and (preferred_r == pr or preferred_r == None) and (preferred_e == pe or preferred_e == None):
preferred_file = f
preferred_ver = (pv, pr)
preferred_ver = (pe, pv, pr)
break
if preferred_file:
break;
@ -81,6 +89,8 @@ def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
pv_str = '%s-%s' % (preferred_v, preferred_r)
else:
pv_str = preferred_v
if not (preferred_e is None):
pv_str = '%s:%s' % (preferred_e, pv_str)
itemstr = ""
if item:
itemstr = " (for item %s)" % item
@ -97,11 +107,11 @@ def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
latest_p = 0
latest_f = None
for file_name in files:
pv,pr = dataCache.pkg_pvpr[file_name]
pe,pv,pr = dataCache.pkg_pepvpr[file_name]
dp = dataCache.pkg_dp[file_name]
if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pv, pr)) < 0)) or (dp > latest_p):
latest = (pv, pr)
if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pe, pv, pr)) < 0)) or (dp > latest_p):
latest = (pe, pv, pr)
latest_f = file_name
latest_p = dp
if preferred_file is None:
@ -110,10 +120,7 @@ def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
return (latest,latest_f,preferred_ver, preferred_file)
#
# RP - build_cache_fail needs to move elsewhere
#
def filterProviders(providers, item, cfgData, dataCache, build_cache_fail = {}):
def filterProviders(providers, item, cfgData, dataCache):
"""
Take a list of providers and filter/reorder according to the
environment variables and previous build results
@ -135,12 +142,6 @@ def filterProviders(providers, item, cfgData, dataCache, build_cache_fail = {}):
preferred_versions[pn] = bb.providers.findBestProvider(pn, cfgData, dataCache, pkg_pn, item)[2:4]
eligible.append(preferred_versions[pn][1])
for p in eligible:
if p in build_cache_fail:
bb.msg.debug(1, bb.msg.domain.Provider, "rejecting already-failed %s" % p)
eligible.remove(p)
if len(eligible) == 0:
bb.msg.error(bb.msg.domain.Provider, "no eligible providers for %s" % item)
return 0
@ -162,7 +163,7 @@ def filterProviders(providers, item, cfgData, dataCache, build_cache_fail = {}):
# if so, bump it to the head of the queue
for p in providers:
pn = dataCache.pkg_fn[p]
pv, pr = dataCache.pkg_pvpr[p]
pe, pv, pr = dataCache.pkg_pepvpr[p]
stamp = '%s.do_populate_staging' % dataCache.stamp[p]
if os.path.exists(stamp):
@ -171,7 +172,11 @@ def filterProviders(providers, item, cfgData, dataCache, build_cache_fail = {}):
# package was made ineligible by already-failed check
continue
oldver = "%s-%s" % (pv, pr)
newver = '-'.join(newvers)
if pe > 0:
oldver = "%s:%s" % (pe, oldver)
newver = "%s-%s" % (newvers[1], newvers[2])
if newvers[0] > 0:
newver = "%s:%s" % (newvers[0], newver)
if (newver != oldver):
extra_chat = "%s (%s) already staged but upgrading to %s to satisfy %s" % (pn, oldver, newver, item)
else:

View File

@ -25,20 +25,47 @@ Handles preparation and execution of a queue of tasks
from bb import msg, data, fetch, event, mkdirhier, utils
from sets import Set
import bb, os, sys
import signal
class TaskFailure(Exception):
"""Exception raised when a task in a runqueue fails"""
def __init__(self, x):
self.args = x
class RunQueueStats:
"""
Holds statistics on the tasks handled by the associated runQueue
"""
def __init__(self):
self.completed = 0
self.skipped = 0
self.failed = 0
def taskFailed(self):
self.failed = self.failed + 1
def taskCompleted(self):
self.completed = self.completed + 1
def taskSkipped(self):
self.skipped = self.skipped + 1
class RunQueue:
"""
BitBake Run Queue implementation
"""
def __init__(self):
def __init__(self, cooker, cfgData, dataCache, taskData, targets):
self.reset_runqueue()
self.cooker = cooker
self.dataCache = dataCache
self.taskData = taskData
self.targets = targets
self.number_tasks = int(bb.data.getVar("BB_NUMBER_THREADS", cfgData) or 1)
def reset_runqueue(self):
self.runq_fnid = []
self.runq_task = []
self.runq_depends = []
@ -46,16 +73,15 @@ class RunQueue:
self.runq_weight = []
self.prio_map = []
def get_user_idstring(self, task, taskData):
fn = taskData.fn_index[self.runq_fnid[task]]
def get_user_idstring(self, task):
fn = self.taskData.fn_index[self.runq_fnid[task]]
taskname = self.runq_task[task]
return "%s, %s" % (fn, taskname)
def prepare_runqueue(self, cooker, cfgData, dataCache, taskData, targets):
def prepare_runqueue(self):
"""
Turn a set of taskData into a RunQueue and compute data needed
to optimise the execution order.
targets is list of paired values - a provider name and the task to run
"""
depends = []
@ -63,12 +89,14 @@ class RunQueue:
runq_build = []
runq_done = []
taskData = self.taskData
bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing Runqueue")
for task in range(len(taskData.tasks_name)):
fnid = taskData.tasks_fnid[task]
fn = taskData.fn_index[fnid]
task_deps = dataCache.task_deps[fn]
task_deps = self.dataCache.task_deps[fn]
if fnid not in taskData.failed_fnids:
@ -94,6 +122,15 @@ class RunQueue:
dep = taskData.fn_index[depdata]
depends.append(taskData.gettask_id(dep, taskname))
idepends = taskData.tasks_idepends[task]
for idepend in idepends:
depid = int(idepend.split(":")[0])
if depid in taskData.build_targets:
depdata = taskData.build_targets[depid][0]
if depdata:
dep = taskData.fn_index[depdata]
depends.append(taskData.gettask_id(dep, idepend.split(":")[1]))
def add_recursive_build(depid):
"""
Add build depends of depid to depends
@ -197,7 +234,7 @@ class RunQueue:
for depend in depends:
mark_active(depend, depth+1)
for target in targets:
for target in self.targets:
targetid = taskData.getbuild_id(target[0])
if targetid not in taskData.build_targets:
@ -209,10 +246,10 @@ class RunQueue:
fnid = taskData.build_targets[targetid][0]
# Remove stamps for targets if force mode active
if cooker.configuration.force:
if self.cooker.configuration.force:
fn = taskData.fn_index[fnid]
bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (target[1], fn))
bb.build.del_stamp(target[1], dataCache, fn)
bb.build.del_stamp(target[1], self.dataCache, fn)
if fnid in taskData.failed_fnids:
continue
@ -299,18 +336,18 @@ class RunQueue:
seen.append(taskid)
for revdep in self.runq_revdeps[taskid]:
if runq_done[revdep] == 0 and revdep not in seen and not finish:
bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) (depends: %s)" % (revdep, self.get_user_idstring(revdep, taskData), self.runq_depends[revdep]))
bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) (depends: %s)" % (revdep, self.get_user_idstring(revdep), self.runq_depends[revdep]))
if revdep in deps_seen:
bb.msg.error(bb.msg.domain.RunQueue, "Chain ends at Task %s (%s)" % (revdep, self.get_user_idstring(revdep, taskData)))
bb.msg.error(bb.msg.domain.RunQueue, "Chain ends at Task %s (%s)" % (revdep, self.get_user_idstring(revdep)))
finish = True
return
for dep in self.runq_depends[revdep]:
deps_seen.append(dep)
print_chain(revdep, finish)
print_chain(task, False)
bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) not processed!\nThis is probably a circular dependency (the chain might be printed above)." % (task, self.get_user_idstring(task, taskData)))
bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) not processed!\nThis is probably a circular dependency (the chain might be printed above)." % (task, self.get_user_idstring(task)))
if runq_weight1[task] != 0:
bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) count not zero!" % (task, self.get_user_idstring(task, taskData)))
bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) count not zero!" % (task, self.get_user_idstring(task)))
# Make a weight sorted map
from copy import deepcopy
@ -328,7 +365,7 @@ class RunQueue:
#self.dump_data(taskData)
def execute_runqueue(self, cooker, cfgData, dataCache, taskData, runlist):
def execute_runqueue(self):
"""
Run the tasks in a queue prepared by prepare_runqueue
Upon failure, optionally try to recover the build using any alternate providers
@ -337,35 +374,86 @@ class RunQueue:
failures = 0
while 1:
failed_fnids = self.execute_runqueue_internal(cooker, cfgData, dataCache, taskData)
failed_fnids = []
try:
self.execute_runqueue_internal()
finally:
if self.master_process:
failed_fnids = self.finish_runqueue()
if len(failed_fnids) == 0:
return failures
if taskData.abort:
if self.taskData.abort:
raise bb.runqueue.TaskFailure(failed_fnids)
for fnid in failed_fnids:
#print "Failure: %s %s %s" % (fnid, taskData.fn_index[fnid], self.runq_task[fnid])
taskData.fail_fnid(fnid)
#print "Failure: %s %s %s" % (fnid, self.taskData.fn_index[fnid], self.runq_task[fnid])
self.taskData.fail_fnid(fnid)
failures = failures + 1
self.reset_runqueue()
self.prepare_runqueue(cooker, cfgData, dataCache, taskData, runlist)
self.prepare_runqueue()
def execute_runqueue_internal(self, cooker, cfgData, dataCache, taskData):
def execute_runqueue_initVars(self):
self.stats = RunQueueStats()
self.active_builds = 0
self.runq_buildable = []
self.runq_running = []
self.runq_complete = []
self.build_pids = {}
self.failed_fnids = []
self.master_process = True
# Mark initial buildable tasks
for task in range(len(self.runq_fnid)):
self.runq_running.append(0)
self.runq_complete.append(0)
if len(self.runq_depends[task]) == 0:
self.runq_buildable.append(1)
else:
self.runq_buildable.append(0)
def task_complete(self, task):
"""
Mark a task as completed
Look at the reverse dependencies and mark any task with
completed dependencies as buildable
"""
self.runq_complete[task] = 1
for revdep in self.runq_revdeps[task]:
if self.runq_running[revdep] == 1:
continue
if self.runq_buildable[revdep] == 1:
continue
alldeps = 1
for dep in self.runq_depends[revdep]:
if self.runq_complete[dep] != 1:
alldeps = 0
if alldeps == 1:
self.runq_buildable[revdep] = 1
fn = self.taskData.fn_index[self.runq_fnid[revdep]]
taskname = self.runq_task[revdep]
bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname))
def get_next_task(self):
"""
Return the id of the highest priority task that is buildable
"""
for task1 in range(len(self.runq_fnid)):
task = self.prio_map[task1]
if self.runq_running[task] == 1:
continue
if self.runq_buildable[task] == 1:
return task
return None
def execute_runqueue_internal(self):
"""
Run the tasks in a queue prepared by prepare_runqueue
"""
import signal
bb.msg.note(1, bb.msg.domain.RunQueue, "Executing runqueue")
active_builds = 0
tasks_completed = 0
tasks_skipped = 0
runq_buildable = []
runq_running = []
runq_complete = []
build_pids = {}
failed_fnids = []
self.execute_runqueue_initVars()
if len(self.runq_fnid) == 0:
# nothing to do
@ -374,144 +462,103 @@ class RunQueue:
def sigint_handler(signum, frame):
raise KeyboardInterrupt
def get_next_task(data):
"""
Return the id of the highest priority task that is buildable
"""
for task1 in range(len(data.runq_fnid)):
task = data.prio_map[task1]
if runq_running[task] == 1:
while True:
task = self.get_next_task()
if task is not None:
fn = self.taskData.fn_index[self.runq_fnid[task]]
taskname = self.runq_task[task]
if bb.build.stamp_is_current(taskname, self.dataCache, fn):
bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
self.runq_running[task] = 1
self.task_complete(task)
self.stats.taskCompleted()
self.stats.taskSkipped()
continue
if runq_buildable[task] == 1:
return task
return None
def task_complete(data, task):
"""
Mark a task as completed
Look at the reverse dependencies and mark any task with
completed dependencies as buildable
"""
runq_complete[task] = 1
for revdep in data.runq_revdeps[task]:
if runq_running[revdep] == 1:
continue
if runq_buildable[revdep] == 1:
continue
alldeps = 1
for dep in data.runq_depends[revdep]:
if runq_complete[dep] != 1:
alldeps = 0
if alldeps == 1:
runq_buildable[revdep] = 1
fn = taskData.fn_index[self.runq_fnid[revdep]]
taskname = self.runq_task[revdep]
bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname))
# Mark initial buildable tasks
for task in range(len(self.runq_fnid)):
runq_running.append(0)
runq_complete.append(0)
if len(self.runq_depends[task]) == 0:
runq_buildable.append(1)
else:
runq_buildable.append(0)
number_tasks = int(bb.data.getVar("BB_NUMBER_THREADS", cfgData) or 1)
try:
while 1:
task = get_next_task(self)
if task is not None:
fn = taskData.fn_index[self.runq_fnid[task]]
taskname = self.runq_task[task]
if bb.build.stamp_is_current(taskname, dataCache, fn):
bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task, taskData)))
runq_running[task] = 1
task_complete(self, task)
tasks_completed = tasks_completed + 1
tasks_skipped = tasks_skipped + 1
continue
bb.msg.note(1, bb.msg.domain.RunQueue, "Running task %d of %d (ID: %s, %s)" % (tasks_completed + active_builds + 1, len(self.runq_fnid), task, self.get_user_idstring(task, taskData)))
bb.msg.note(1, bb.msg.domain.RunQueue, "Running task %d of %d (ID: %s, %s)" % (self.stats.completed + self.active_builds + 1, len(self.runq_fnid), task, self.get_user_idstring(task)))
try:
pid = os.fork()
except OSError, e:
bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror))
if pid == 0:
# Bypass master process' handling
self.master_process = False
# Stop Ctrl+C being sent to children
# signal.signal(signal.SIGINT, signal.SIG_IGN)
# Make the child the process group leader
os.setpgid(0, 0)
sys.stdin = open('/dev/null', 'r')
self.cooker.configuration.cmd = taskname[3:]
try:
pid = os.fork()
except OSError, e:
bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror))
if pid == 0:
# Bypass finally below
active_builds = 0
# Stop Ctrl+C being sent to children
# signal.signal(signal.SIGINT, signal.SIG_IGN)
# Make the child the process group leader
os.setpgid(0, 0)
sys.stdin = open('/dev/null', 'r')
cooker.configuration.cmd = taskname[3:]
try:
cooker.tryBuild(fn, False)
except bb.build.EventException:
bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
sys.exit(1)
except:
bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
raise
sys.exit(0)
build_pids[pid] = task
runq_running[task] = 1
active_builds = active_builds + 1
if active_builds < number_tasks:
continue
if active_builds > 0:
result = os.waitpid(-1, 0)
active_builds = active_builds - 1
task = build_pids[result[0]]
if result[1] != 0:
del build_pids[result[0]]
bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task, taskData)))
failed_fnids.append(self.runq_fnid[task])
break
task_complete(self, task)
tasks_completed = tasks_completed + 1
del build_pids[result[0]]
self.cooker.tryBuild(fn, False)
except bb.build.EventException:
bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
sys.exit(1)
except:
bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
raise
sys.exit(0)
self.build_pids[pid] = task
self.runq_running[task] = 1
self.active_builds = self.active_builds + 1
if self.active_builds < self.number_tasks:
continue
break
finally:
try:
while active_builds > 0:
bb.msg.note(1, bb.msg.domain.RunQueue, "Waiting for %s active tasks to finish" % active_builds)
tasknum = 1
for k, v in build_pids.iteritems():
bb.msg.note(1, bb.msg.domain.RunQueue, "%s: %s (%s)" % (tasknum, self.get_user_idstring(v, taskData), k))
tasknum = tasknum + 1
result = os.waitpid(-1, 0)
task = build_pids[result[0]]
if result[1] != 0:
bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task, taskData)))
failed_fnids.append(self.runq_fnid[task])
del build_pids[result[0]]
active_builds = active_builds - 1
if len(failed_fnids) > 0:
return failed_fnids
except:
bb.msg.note(1, bb.msg.domain.RunQueue, "Sending SIGINT to remaining %s tasks" % active_builds)
for k, v in build_pids.iteritems():
if self.active_builds > 0:
result = os.waitpid(-1, 0)
self.active_builds = self.active_builds - 1
task = self.build_pids[result[0]]
if result[1] != 0:
del self.build_pids[result[0]]
bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task)))
self.failed_fnids.append(self.runq_fnid[task])
self.stats.taskFailed()
break
self.task_complete(task)
self.stats.taskCompleted()
del self.build_pids[result[0]]
continue
return
def finish_runqueue(self):
try:
while self.active_builds > 0:
bb.msg.note(1, bb.msg.domain.RunQueue, "Waiting for %s active tasks to finish" % self.active_builds)
tasknum = 1
for k, v in self.build_pids.iteritems():
bb.msg.note(1, bb.msg.domain.RunQueue, "%s: %s (%s)" % (tasknum, self.get_user_idstring(v), k))
tasknum = tasknum + 1
result = os.waitpid(-1, 0)
task = self.build_pids[result[0]]
if result[1] != 0:
bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task)))
self.failed_fnids.append(self.runq_fnid[task])
self.stats.taskFailed()
del self.build_pids[result[0]]
self.active_builds = self.active_builds - 1
if len(self.failed_fnids) > 0:
return self.failed_fnids
except KeyboardInterrupt:
bb.msg.note(1, bb.msg.domain.RunQueue, "Sending SIGINT to remaining %s tasks" % self.active_builds)
for k, v in self.build_pids.iteritems():
try:
os.kill(-k, signal.SIGINT)
raise
except:
pass
raise
# Sanity Checks
for task in range(len(self.runq_fnid)):
if runq_buildable[task] == 0:
if self.runq_buildable[task] == 0:
bb.msg.error(bb.msg.domain.RunQueue, "Task %s never buildable!" % task)
if runq_running[task] == 0:
if self.runq_running[task] == 0:
bb.msg.error(bb.msg.domain.RunQueue, "Task %s never ran!" % task)
if runq_complete[task] == 0:
if self.runq_complete[task] == 0:
bb.msg.error(bb.msg.domain.RunQueue, "Task %s never completed!" % task)
bb.msg.note(1, bb.msg.domain.RunQueue, "Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed." % (tasks_completed, tasks_skipped, len(failed_fnids)))
bb.msg.note(1, bb.msg.domain.RunQueue, "Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed." % (self.stats.completed, self.stats.skipped, self.stats.failed))
return failed_fnids
return self.failed_fnids
def dump_data(self, taskQueue):
"""

View File

@ -104,10 +104,11 @@ class BitBakeShellCommands:
def _findProvider( self, item ):
self._checkParsed()
# Need to use taskData for this information
preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 )
if not preferred: preferred = item
try:
lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status, cooker.build_cache_fail)
lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status)
except KeyError:
if item in cooker.status.providers:
pf = cooker.status.providers[item][0]
@ -144,6 +145,7 @@ class BitBakeShellCommands:
def build( self, params, cmd = "build" ):
"""Build a providee"""
global last_exception
globexpr = params[0]
self._checkParsed()
names = globfilter( cooker.status.pkg_pn.keys(), globexpr )
@ -152,8 +154,6 @@ class BitBakeShellCommands:
oldcmd = cooker.configuration.cmd
cooker.configuration.cmd = cmd
cooker.build_cache = []
cooker.build_cache_fail = []
td = taskdata.TaskData(cooker.configuration.abort)
@ -170,24 +170,21 @@ class BitBakeShellCommands:
td.add_unresolved(cooker.configuration.data, cooker.status)
rq = runqueue.RunQueue()
rq.prepare_runqueue(cooker, cooker.configuration.data, cooker.status, td, tasks)
rq.execute_runqueue(cooker, cooker.configuration.data, cooker.status, td, tasks)
rq = runqueue.RunQueue(cooker, cooker.configuration.data, cooker.status, td, tasks)
rq.prepare_runqueue()
rq.execute_runqueue()
except Providers.NoProvider:
print "ERROR: No Provider"
global last_exception
last_exception = Providers.NoProvider
except runqueue.TaskFailure, fnids:
for fnid in fnids:
print "ERROR: '%s' failed" % td.fn_index[fnid]
global last_exception
last_exception = runqueue.TaskFailure
except build.EventException, e:
print "ERROR: Couldn't build '%s'" % names
global last_exception
last_exception = e
cooker.configuration.cmd = oldcmd
@ -236,14 +233,13 @@ class BitBakeShellCommands:
def fileBuild( self, params, cmd = "build" ):
"""Parse and build a .bb file"""
global last_exception
name = params[0]
bf = completeFilePath( name )
print "SHELL: Calling '%s' on '%s'" % ( cmd, bf )
oldcmd = cooker.configuration.cmd
cooker.configuration.cmd = cmd
cooker.build_cache = []
cooker.build_cache_fail = []
thisdata = copy.deepcopy( initdata )
# Caution: parse.handle modifies thisdata, hence it would
@ -266,7 +262,6 @@ class BitBakeShellCommands:
cooker.tryBuildPackage( os.path.abspath( bf ), item, cmd, bbfile_data, True )
except build.EventException, e:
print "ERROR: Couldn't build '%s'" % name
global last_exception
last_exception = e
cooker.configuration.cmd = oldcmd
@ -537,8 +532,6 @@ SRC_URI = ""
def status( self, params ):
"""<just for testing>"""
print "-" * 78
print "build cache = '%s'" % cooker.build_cache
print "build cache fail = '%s'" % cooker.build_cache_fail
print "building list = '%s'" % cooker.building_list
print "build path = '%s'" % cooker.build_path
print "consider_msgs_cache = '%s'" % cooker.consider_msgs_cache
@ -557,6 +550,7 @@ SRC_URI = ""
def which( self, params ):
"""Computes the providers for a given providee"""
# Need to use taskData for this information
item = params[0]
self._checkParsed()
@ -565,8 +559,7 @@ SRC_URI = ""
if not preferred: preferred = item
try:
lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status,
cooker.build_cache_fail)
lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status)
except KeyError:
lv, lf, pv, pf = (None,)*4

View File

@ -43,6 +43,7 @@ class TaskData:
self.tasks_fnid = []
self.tasks_name = []
self.tasks_tdepends = []
self.tasks_idepends = []
# Cache to speed up task ID lookups
self.tasks_lookup = {}
@ -108,6 +109,7 @@ class TaskData:
self.tasks_name.append(task)
self.tasks_fnid.append(fnid)
self.tasks_tdepends.append([])
self.tasks_idepends.append([])
listid = len(self.tasks_name) - 1
@ -134,8 +136,9 @@ class TaskData:
if fnid in self.tasks_fnid:
return
# Work out task dependencies
for task in task_graph.allnodes():
# Work out task dependencies
parentids = []
for dep in task_graph.getparents(task):
parentid = self.gettask_id(fn, dep)
@ -143,6 +146,14 @@ class TaskData:
taskid = self.gettask_id(fn, task)
self.tasks_tdepends[taskid].extend(parentids)
# Touch all intertask dependencies
if 'depends' in task_deps and task in task_deps['depends']:
ids = []
for dep in task_deps['depends'][task].split(" "):
if dep:
ids.append(str(self.getbuild_id(dep.split(":")[0])) + ":" + dep.split(":")[1])
self.tasks_idepends[taskid].extend(ids)
# Work out build dependencies
if not fnid in self.depids:
dependids = {}

View File

@ -62,10 +62,12 @@ def vercmp_part(a, b):
return -1
def vercmp(ta, tb):
(va, ra) = ta
(vb, rb) = tb
(ea, va, ra) = ta
(eb, vb, rb) = tb
r = vercmp_part(va, vb)
r = int(ea)-int(eb)
if (r == 0):
r = vercmp_part(va, vb)
if (r == 0):
r = vercmp_part(ra, rb)
return r