2006-11-29 22:52:37 +00:00
#!/usr/bin/env python
2006-11-16 15:02:15 +00:00
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake ' RunQueue ' implementation
Handles preparation and execution of a queue of tasks
"""
2007-09-02 14:10:08 +00:00
# Copyright (C) 2006-2007 Richard Purdie
2007-01-08 23:53:01 +00:00
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
2007-08-16 09:55:21 +00:00
from bb import msg , data , event , mkdirhier , utils
2006-11-16 15:02:15 +00:00
from sets import Set
import bb , os , sys
2007-04-01 15:04:49 +00:00
import signal
2006-11-16 15:02:15 +00:00
class TaskFailure ( Exception ) :
""" Exception raised when a task in a runqueue fails """
2006-11-29 22:52:37 +00:00
def __init__ ( self , x ) :
self . args = x
2006-11-16 15:02:15 +00:00
2007-04-01 15:04:49 +00:00
class RunQueueStats :
"""
Holds statistics on the tasks handled by the associated runQueue
"""
def __init__ ( self ) :
self . completed = 0
self . skipped = 0
self . failed = 0
def taskFailed ( self ) :
self . failed = self . failed + 1
def taskCompleted ( self ) :
self . completed = self . completed + 1
def taskSkipped ( self ) :
self . skipped = self . skipped + 1
2008-01-06 16:51:51 +00:00
class RunQueueScheduler :
"""
Control the order tasks are scheduled in .
"""
def __init__ ( self , runqueue ) :
"""
The default scheduler just returns the first buildable task ( the
priority map is sorted by task numer )
"""
self . rq = runqueue
numTasks = len ( self . rq . runq_fnid )
self . prio_map = [ ]
self . prio_map . extend ( range ( numTasks ) )
def next ( self ) :
"""
Return the id of the first task we find that is buildable
"""
for task1 in range ( len ( self . rq . runq_fnid ) ) :
task = self . prio_map [ task1 ]
if self . rq . runq_running [ task ] == 1 :
continue
if self . rq . runq_buildable [ task ] == 1 :
return task
class RunQueueSchedulerSpeed ( RunQueueScheduler ) :
"""
A scheduler optimised for speed . The priority map is sorted by task weight ,
heavier weighted tasks ( tasks needed by the most other tasks ) are run first .
"""
def __init__ ( self , runqueue ) :
"""
The priority map is sorted by task weight .
"""
from copy import deepcopy
self . rq = runqueue
sortweight = deepcopy ( self . rq . runq_weight )
sortweight . sort ( )
copyweight = deepcopy ( self . rq . runq_weight )
self . prio_map = [ ]
for weight in sortweight :
idx = copyweight . index ( weight )
self . prio_map . append ( idx )
copyweight [ idx ] = - 1
self . prio_map . reverse ( )
class RunQueueSchedulerCompletion ( RunQueueSchedulerSpeed ) :
"""
A scheduler optimised to complete . bb files are quickly as possible . The
priority map is sorted by task weight , but then reordered so once a given
. bb file starts to build , its completed as quickly as possible . This works
well where disk space is at a premium and classes like OE ' s rm_work are in
force .
"""
def __init__ ( self , runqueue ) :
RunQueueSchedulerSpeed . __init__ ( self , runqueue )
from copy import deepcopy
#FIXME - whilst this groups all fnids together it does not reorder the
#fnid groups optimally.
basemap = deepcopy ( self . prio_map )
self . prio_map = [ ]
while ( len ( basemap ) > 0 ) :
entry = basemap . pop ( 0 )
self . prio_map . append ( entry )
fnid = self . rq . runq_fnid [ entry ]
todel = [ ]
for entry in basemap :
entry_fnid = self . rq . runq_fnid [ entry ]
if entry_fnid == fnid :
todel . append ( basemap . index ( entry ) )
self . prio_map . append ( entry )
todel . reverse ( )
for idx in todel :
del basemap [ idx ]
2006-11-16 15:02:15 +00:00
class RunQueue :
"""
BitBake Run Queue implementation
"""
2007-04-01 15:04:49 +00:00
def __init__ ( self , cooker , cfgData , dataCache , taskData , targets ) :
2006-11-16 15:02:15 +00:00
self . reset_runqueue ( )
2007-04-01 15:04:49 +00:00
self . cooker = cooker
self . dataCache = dataCache
self . taskData = taskData
self . targets = targets
self . number_tasks = int ( bb . data . getVar ( " BB_NUMBER_THREADS " , cfgData ) or 1 )
2007-09-02 14:10:08 +00:00
self . multi_provider_whitelist = ( bb . data . getVar ( " MULTI_PROVIDER_WHITELIST " , cfgData ) or " " ) . split ( )
2006-11-16 15:02:15 +00:00
def reset_runqueue ( self ) :
2007-04-01 15:04:49 +00:00
2006-11-16 15:02:15 +00:00
self . runq_fnid = [ ]
self . runq_task = [ ]
self . runq_depends = [ ]
self . runq_revdeps = [ ]
2007-04-01 15:04:49 +00:00
def get_user_idstring ( self , task ) :
fn = self . taskData . fn_index [ self . runq_fnid [ task ] ]
2006-11-16 15:02:15 +00:00
taskname = self . runq_task [ task ]
return " %s , %s " % ( fn , taskname )
2008-01-06 16:51:51 +00:00
def circular_depchains_handler ( self , tasks ) :
"""
Some tasks aren ' t buildable, likely due to circular dependency issues.
Identify the circular dependencies and print them in a user readable format .
"""
from copy import deepcopy
valid_chains = [ ]
explored_deps = { }
msgs = [ ]
def chain_reorder ( chain ) :
"""
Reorder a dependency chain so the lowest task id is first
"""
lowest = 0
new_chain = [ ]
for entry in range ( len ( chain ) ) :
if chain [ entry ] < chain [ lowest ] :
lowest = entry
new_chain . extend ( chain [ lowest : ] )
new_chain . extend ( chain [ : lowest ] )
return new_chain
def chain_compare_equal ( chain1 , chain2 ) :
"""
Compare two dependency chains and see if they ' re the same
"""
if len ( chain1 ) != len ( chain2 ) :
return False
for index in range ( len ( chain1 ) ) :
if chain1 [ index ] != chain2 [ index ] :
return False
return True
def chain_array_contains ( chain , chain_array ) :
"""
Return True if chain_array contains chain
"""
for ch in chain_array :
if chain_compare_equal ( ch , chain ) :
return True
return False
def find_chains ( taskid , prev_chain ) :
prev_chain . append ( taskid )
total_deps = [ ]
total_deps . extend ( self . runq_revdeps [ taskid ] )
for revdep in self . runq_revdeps [ taskid ] :
if revdep in prev_chain :
idx = prev_chain . index ( revdep )
# To prevent duplicates, reorder the chain to start with the lowest taskid
# and search through an array of those we've already printed
chain = prev_chain [ idx : ]
new_chain = chain_reorder ( chain )
if not chain_array_contains ( new_chain , valid_chains ) :
valid_chains . append ( new_chain )
msgs . append ( " Dependency loop # %d found: \n " % len ( valid_chains ) )
for dep in new_chain :
msgs . append ( " Task %s ( %s ) (depends: %s ) \n " % ( dep , self . get_user_idstring ( dep ) , self . runq_depends [ dep ] ) )
msgs . append ( " \n " )
if len ( valid_chains ) > 10 :
msgs . append ( " Aborted dependency loops search after 10 matches. \n " )
return msgs
continue
scan = False
if revdep not in explored_deps :
scan = True
elif revdep in explored_deps [ revdep ] :
scan = True
else :
for dep in prev_chain :
if dep in explored_deps [ revdep ] :
scan = True
if scan :
find_chains ( revdep , deepcopy ( prev_chain ) )
for dep in explored_deps [ revdep ] :
if dep not in total_deps :
total_deps . append ( dep )
explored_deps [ taskid ] = total_deps
for task in tasks :
find_chains ( task , [ ] )
return msgs
def calculate_task_weights ( self , endpoints ) :
"""
Calculate a number representing the " weight " of each task . Heavier weighted tasks
have more dependencies and hence should be executed sooner for maximum speed .
This function also sanity checks the task list finding tasks that its not
possible to execute due to circular dependencies .
"""
numTasks = len ( self . runq_fnid )
weight = [ ]
deps_left = [ ]
task_done = [ ]
for listid in range ( numTasks ) :
task_done . append ( False )
weight . append ( 0 )
deps_left . append ( len ( self . runq_revdeps [ listid ] ) )
for listid in endpoints :
weight [ listid ] = 1
task_done [ listid ] = True
while 1 :
next_points = [ ]
for listid in endpoints :
for revdep in self . runq_depends [ listid ] :
weight [ revdep ] = weight [ revdep ] + weight [ listid ]
deps_left [ revdep ] = deps_left [ revdep ] - 1
if deps_left [ revdep ] == 0 :
next_points . append ( revdep )
task_done [ revdep ] = True
endpoints = next_points
if len ( next_points ) == 0 :
break
# Circular dependency sanity check
problem_tasks = [ ]
for task in range ( numTasks ) :
if task_done [ task ] is False or deps_left [ task ] != 0 :
problem_tasks . append ( task )
bb . msg . debug ( 2 , bb . msg . domain . RunQueue , " Task %s ( %s ) is not buildable \n " % ( task , self . get_user_idstring ( task ) ) )
bb . msg . debug ( 2 , bb . msg . domain . RunQueue , " (Complete marker was %s and the remaining dependency count was %s ) \n \n " % ( task_done [ task ] , deps_left [ task ] ) )
if problem_tasks :
message = " Unbuildable tasks were found. \n "
message = message + " These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks. \n \n "
message = message + " Identifying dependency loops (this may take a short while)... \n "
bb . msg . error ( bb . msg . domain . RunQueue , message )
msgs = self . circular_depchains_handler ( problem_tasks )
message = " \n "
for msg in msgs :
message = message + msg
bb . msg . fatal ( bb . msg . domain . RunQueue , message )
return weight
2007-04-01 15:04:49 +00:00
def prepare_runqueue ( self ) :
2006-11-16 15:02:15 +00:00
"""
Turn a set of taskData into a RunQueue and compute data needed
to optimise the execution order .
"""
depends = [ ]
runq_build = [ ]
2007-04-01 15:04:49 +00:00
taskData = self . taskData
2007-05-22 11:50:37 +00:00
if len ( taskData . tasks_name ) == 0 :
# Nothing to do
return
2007-06-26 21:23:37 +00:00
bb . msg . note ( 1 , bb . msg . domain . RunQueue , " Preparing runqueue " )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Step A - Work out a list of tasks to run
#
# Taskdata gives us a list of possible providers for a every target
# ordered by priority (build_targets, run_targets). It also gives
# information on each of those providers.
#
# To create the actual list of tasks to execute we fix the list of
# providers and then resolve the dependencies into task IDs. This
# process is repeated for each type of dependency (tdepends, deptask,
# rdeptast, recrdeptask, idepends).
2006-11-16 15:02:15 +00:00
for task in range ( len ( taskData . tasks_name ) ) :
fnid = taskData . tasks_fnid [ task ]
fn = taskData . fn_index [ fnid ]
2007-04-01 15:04:49 +00:00
task_deps = self . dataCache . task_deps [ fn ]
2006-11-16 15:02:15 +00:00
if fnid not in taskData . failed_fnids :
2008-01-06 16:51:51 +00:00
# Resolve task internal dependencies
#
# e.g. addtask before X after Y
2006-11-16 15:02:15 +00:00
depends = taskData . tasks_tdepends [ task ]
2008-01-06 16:51:51 +00:00
# Resolve 'deptask' dependencies
#
# e.g. do_sometask[deptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS)
2006-11-16 15:02:15 +00:00
if ' deptask ' in task_deps and taskData . tasks_name [ task ] in task_deps [ ' deptask ' ] :
2007-10-30 12:03:07 +00:00
tasknames = task_deps [ ' deptask ' ] [ taskData . tasks_name [ task ] ] . split ( )
2006-11-16 15:02:15 +00:00
for depid in taskData . depids [ fnid ] :
2007-08-05 22:43:24 +00:00
# Won't be in build_targets if ASSUME_PROVIDED
2006-11-16 15:02:15 +00:00
if depid in taskData . build_targets :
depdata = taskData . build_targets [ depid ] [ 0 ]
2007-08-16 09:55:21 +00:00
if depdata is not None :
2006-11-16 15:02:15 +00:00
dep = taskData . fn_index [ depdata ]
2007-10-30 12:03:07 +00:00
for taskname in tasknames :
depends . append ( taskData . gettask_id ( dep , taskname ) )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Resolve 'rdeptask' dependencies
#
# e.g. do_sometask[rdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all RDEPENDS)
2006-11-16 15:02:15 +00:00
if ' rdeptask ' in task_deps and taskData . tasks_name [ task ] in task_deps [ ' rdeptask ' ] :
taskname = task_deps [ ' rdeptask ' ] [ taskData . tasks_name [ task ] ]
for depid in taskData . rdepids [ fnid ] :
if depid in taskData . run_targets :
depdata = taskData . run_targets [ depid ] [ 0 ]
2007-08-16 09:55:21 +00:00
if depdata is not None :
2006-11-16 15:02:15 +00:00
dep = taskData . fn_index [ depdata ]
depends . append ( taskData . gettask_id ( dep , taskname ) )
2008-01-06 16:51:51 +00:00
# Resolve inter-task dependencies
#
# e.g. do_sometask[depends] = "targetname:do_someothertask"
# (makes sure sometask runs after targetname's someothertask)
2007-04-01 15:04:49 +00:00
idepends = taskData . tasks_idepends [ task ]
for idepend in idepends :
depid = int ( idepend . split ( " : " ) [ 0 ] )
if depid in taskData . build_targets :
2007-08-05 22:43:24 +00:00
# Won't be in build_targets if ASSUME_PROVIDED
2007-04-01 15:04:49 +00:00
depdata = taskData . build_targets [ depid ] [ 0 ]
2007-08-16 09:55:21 +00:00
if depdata is not None :
2007-04-01 15:04:49 +00:00
dep = taskData . fn_index [ depdata ]
depends . append ( taskData . gettask_id ( dep , idepend . split ( " : " ) [ 1 ] ) )
2007-08-20 07:48:43 +00:00
def add_recursive_build ( depid , depfnid ) :
2006-11-16 15:02:15 +00:00
"""
Add build depends of depid to depends
( if we ' ve not see it before)
( calls itself recursively )
"""
if str ( depid ) in dep_seen :
return
dep_seen . append ( depid )
if depid in taskData . build_targets :
depdata = taskData . build_targets [ depid ] [ 0 ]
2007-08-16 09:55:21 +00:00
if depdata is not None :
2006-11-16 15:02:15 +00:00
dep = taskData . fn_index [ depdata ]
2007-08-20 07:48:43 +00:00
idepends = [ ]
2007-01-08 23:53:01 +00:00
# Need to avoid creating new tasks here
taskid = taskData . gettask_id ( dep , taskname , False )
2007-08-16 09:55:21 +00:00
if taskid is not None :
2007-01-08 23:53:01 +00:00
depends . append ( taskid )
fnid = taskData . tasks_fnid [ taskid ]
2007-08-20 07:48:43 +00:00
idepends = taskData . tasks_idepends [ taskid ]
#print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid])
2007-01-08 23:53:01 +00:00
else :
fnid = taskData . getfn_id ( dep )
2006-11-16 15:02:15 +00:00
for nextdepid in taskData . depids [ fnid ] :
if nextdepid not in dep_seen :
2007-08-20 07:48:43 +00:00
add_recursive_build ( nextdepid , fnid )
2006-11-16 15:02:15 +00:00
for nextdepid in taskData . rdepids [ fnid ] :
if nextdepid not in rdep_seen :
2007-08-20 07:48:43 +00:00
add_recursive_run ( nextdepid , fnid )
2007-08-05 22:43:24 +00:00
for idepend in idepends :
nextdepid = int ( idepend . split ( " : " ) [ 0 ] )
if nextdepid not in dep_seen :
2007-08-20 07:48:43 +00:00
add_recursive_build ( nextdepid , fnid )
2006-11-16 15:02:15 +00:00
2007-08-20 07:48:43 +00:00
def add_recursive_run ( rdepid , depfnid ) :
2006-11-16 15:02:15 +00:00
"""
Add runtime depends of rdepid to depends
( if we ' ve not see it before)
( calls itself recursively )
"""
if str ( rdepid ) in rdep_seen :
return
rdep_seen . append ( rdepid )
if rdepid in taskData . run_targets :
depdata = taskData . run_targets [ rdepid ] [ 0 ]
2007-08-16 09:55:21 +00:00
if depdata is not None :
2006-11-16 15:02:15 +00:00
dep = taskData . fn_index [ depdata ]
2007-08-20 07:48:43 +00:00
idepends = [ ]
2007-01-08 23:53:01 +00:00
# Need to avoid creating new tasks here
taskid = taskData . gettask_id ( dep , taskname , False )
2007-08-16 09:55:21 +00:00
if taskid is not None :
2007-01-08 23:53:01 +00:00
depends . append ( taskid )
fnid = taskData . tasks_fnid [ taskid ]
2007-08-20 07:48:43 +00:00
idepends = taskData . tasks_idepends [ taskid ]
#print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid])
2007-01-08 23:53:01 +00:00
else :
fnid = taskData . getfn_id ( dep )
2006-11-16 15:02:15 +00:00
for nextdepid in taskData . depids [ fnid ] :
if nextdepid not in dep_seen :
2007-08-20 07:48:43 +00:00
add_recursive_build ( nextdepid , fnid )
2006-11-16 15:02:15 +00:00
for nextdepid in taskData . rdepids [ fnid ] :
if nextdepid not in rdep_seen :
2007-08-20 07:48:43 +00:00
add_recursive_run ( nextdepid , fnid )
2007-08-05 22:43:24 +00:00
for idepend in idepends :
nextdepid = int ( idepend . split ( " : " ) [ 0 ] )
if nextdepid not in dep_seen :
2007-08-20 07:48:43 +00:00
add_recursive_build ( nextdepid , fnid )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Resolve recursive 'recrdeptask' dependencies
#
# e.g. do_sometask[recrdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
2006-11-16 15:02:15 +00:00
if ' recrdeptask ' in task_deps and taskData . tasks_name [ task ] in task_deps [ ' recrdeptask ' ] :
2007-01-08 23:53:01 +00:00
for taskname in task_deps [ ' recrdeptask ' ] [ taskData . tasks_name [ task ] ] . split ( ) :
2007-02-21 20:15:13 +00:00
dep_seen = [ ]
rdep_seen = [ ]
2007-08-05 22:43:24 +00:00
idep_seen = [ ]
2007-01-08 23:53:01 +00:00
for depid in taskData . depids [ fnid ] :
2007-08-20 07:48:43 +00:00
add_recursive_build ( depid , fnid )
2007-01-08 23:53:01 +00:00
for rdepid in taskData . rdepids [ fnid ] :
2007-08-20 07:48:43 +00:00
add_recursive_run ( rdepid , fnid )
2007-08-05 22:43:24 +00:00
for idepend in idepends :
depid = int ( idepend . split ( " : " ) [ 0 ] )
2007-08-20 07:48:43 +00:00
add_recursive_build ( depid , fnid )
2006-11-16 15:02:15 +00:00
2008-01-06 16:51:51 +00:00
# Rmove all self references
2006-11-16 15:02:15 +00:00
if task in depends :
newdep = [ ]
bb . msg . debug ( 2 , bb . msg . domain . RunQueue , " Task %s ( %s %s ) contains self reference! %s " % ( task , taskData . fn_index [ taskData . tasks_fnid [ task ] ] , taskData . tasks_name [ task ] , depends ) )
for dep in depends :
if task != dep :
newdep . append ( dep )
depends = newdep
self . runq_fnid . append ( taskData . tasks_fnid [ task ] )
self . runq_task . append ( taskData . tasks_name [ task ] )
self . runq_depends . append ( Set ( depends ) )
self . runq_revdeps . append ( Set ( ) )
runq_build . append ( 0 )
2008-01-06 16:51:51 +00:00
# Step B - Mark all active tasks
#
# Start with the tasks we were asked to run and mark all dependencies
# as active too. If the task is to be 'forced', clear its stamp. Once
# all active tasks are marked, prune the ones we don't need.
2006-11-16 15:02:15 +00:00
bb . msg . note ( 2 , bb . msg . domain . RunQueue , " Marking Active Tasks " )
def mark_active ( listid , depth ) :
"""
Mark an item as active along with its depends
( calls itself recursively )
"""
if runq_build [ listid ] == 1 :
return
runq_build [ listid ] = 1
depends = self . runq_depends [ listid ]
for depend in depends :
mark_active ( depend , depth + 1 )
2007-04-01 15:04:49 +00:00
for target in self . targets :
2006-11-16 15:02:15 +00:00
targetid = taskData . getbuild_id ( target [ 0 ] )
if targetid not in taskData . build_targets :
continue
2007-02-21 20:15:13 +00:00
if targetid in taskData . failed_deps :
continue
2006-11-16 15:02:15 +00:00
fnid = taskData . build_targets [ targetid ] [ 0 ]
2007-01-08 23:53:01 +00:00
# Remove stamps for targets if force mode active
2007-04-01 15:04:49 +00:00
if self . cooker . configuration . force :
2007-01-08 23:53:01 +00:00
fn = taskData . fn_index [ fnid ]
bb . msg . note ( 2 , bb . msg . domain . RunQueue , " Remove stamp %s , %s " % ( target [ 1 ] , fn ) )
2007-04-01 15:04:49 +00:00
bb . build . del_stamp ( target [ 1 ] , self . dataCache , fn )
2007-01-08 23:53:01 +00:00
2006-11-16 15:02:15 +00:00
if fnid in taskData . failed_fnids :
continue
2008-01-06 16:51:51 +00:00
if target [ 1 ] not in taskData . tasks_lookup [ fnid ] :
bb . msg . fatal ( bb . msg . domain . RunQueue , " Task %s does not exist for target %s " % ( target [ 1 ] , target [ 0 ] ) )
2006-11-16 15:02:15 +00:00
listid = taskData . tasks_lookup [ fnid ] [ target [ 1 ] ]
mark_active ( listid , 1 )
2008-01-06 16:51:51 +00:00
# Step C - Prune all inactive tasks
#
# Once all active tasks are marked, prune the ones we don't need.
2006-11-16 15:02:15 +00:00
maps = [ ]
delcount = 0
for listid in range ( len ( self . runq_fnid ) ) :
if runq_build [ listid - delcount ] == 1 :
maps . append ( listid - delcount )
else :
del self . runq_fnid [ listid - delcount ]
del self . runq_task [ listid - delcount ]
del self . runq_depends [ listid - delcount ]
del runq_build [ listid - delcount ]
del self . runq_revdeps [ listid - delcount ]
delcount = delcount + 1
maps . append ( - 1 )
2008-01-06 16:51:51 +00:00
#
# Step D - Sanity checks and computation
#
# Check to make sure we still have tasks to run
2006-11-16 15:02:15 +00:00
if len ( self . runq_fnid ) == 0 :
if not taskData . abort :
2008-02-11 20:33:43 +00:00
bb . msg . fatal ( bb . msg . domain . RunQueue , " All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above. " )
else :
bb . msg . fatal ( bb . msg . domain . RunQueue , " No active tasks and not in --continue mode?! Please report this bug. " )
2006-11-16 15:02:15 +00:00
bb . msg . note ( 2 , bb . msg . domain . RunQueue , " Pruned %s inactive tasks, %s left " % ( delcount , len ( self . runq_fnid ) ) )
2008-01-06 16:51:51 +00:00
# Remap the dependencies to account for the deleted tasks
# Check we didn't delete a task we depend on
2006-11-16 15:02:15 +00:00
for listid in range ( len ( self . runq_fnid ) ) :
newdeps = [ ]
origdeps = self . runq_depends [ listid ]
for origdep in origdeps :
if maps [ origdep ] == - 1 :
bb . msg . fatal ( bb . msg . domain . RunQueue , " Invalid mapping - Should never happen! " )
newdeps . append ( maps [ origdep ] )
self . runq_depends [ listid ] = Set ( newdeps )
bb . msg . note ( 2 , bb . msg . domain . RunQueue , " Assign Weightings " )
2008-01-06 16:51:51 +00:00
# Generate a list of reverse dependencies to ease future calculations
2006-11-16 15:02:15 +00:00
for listid in range ( len ( self . runq_fnid ) ) :
for dep in self . runq_depends [ listid ] :
self . runq_revdeps [ dep ] . add ( listid )
2008-01-06 16:51:51 +00:00
# Identify tasks at the end of dependency chains
# Error on circular dependency loops (length two)
2006-11-16 15:02:15 +00:00
endpoints = [ ]
for listid in range ( len ( self . runq_fnid ) ) :
revdeps = self . runq_revdeps [ listid ]
if len ( revdeps ) == 0 :
endpoints . append ( listid )
for dep in revdeps :
if dep in self . runq_depends [ listid ] :
#self.dump_data(taskData)
bb . msg . fatal ( bb . msg . domain . RunQueue , " Task %s ( %s ) has circular dependency on %s ( %s ) " % ( taskData . fn_index [ self . runq_fnid [ dep ] ] , self . runq_task [ dep ] , taskData . fn_index [ self . runq_fnid [ listid ] ] , self . runq_task [ listid ] ) )
bb . msg . note ( 2 , bb . msg . domain . RunQueue , " Compute totals (have %s endpoint(s)) " % len ( endpoints ) )
2008-01-06 16:51:51 +00:00
# Calculate task weights
# Check of higher length circular dependencies
self . runq_weight = self . calculate_task_weights ( endpoints )
# Decide what order to execute the tasks in, pick a scheduler
# FIXME - Allow user selection
#self.sched = RunQueueScheduler(self)
self . sched = RunQueueSchedulerSpeed ( self )
#self.sched = RunQueueSchedulerCompletion(self)
# Sanity Check - Check for multiple tasks building the same provider
2007-09-02 14:10:08 +00:00
prov_list = { }
seen_fn = [ ]
for task in range ( len ( self . runq_fnid ) ) :
fn = taskData . fn_index [ self . runq_fnid [ task ] ]
if fn in seen_fn :
continue
seen_fn . append ( fn )
for prov in self . dataCache . fn_provides [ fn ] :
if prov not in prov_list :
prov_list [ prov ] = [ fn ]
elif fn not in prov_list [ prov ] :
prov_list [ prov ] . append ( fn )
error = False
for prov in prov_list :
if len ( prov_list [ prov ] ) > 1 and prov not in self . multi_provider_whitelist :
error = True
2008-01-20 12:09:31 +00:00
bb . msg . error ( bb . msg . domain . RunQueue , " Multiple .bb files are due to be built which each provide %s ( %s ). \n This usually means one provides something the other doesn ' t and should. " % ( prov , " " . join ( prov_list [ prov ] ) ) )
2007-09-02 14:10:08 +00:00
#if error:
# bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...")
2006-11-16 15:02:15 +00:00
#self.dump_data(taskData)
2007-04-01 15:04:49 +00:00
def execute_runqueue ( self ) :
2006-11-16 15:02:15 +00:00
"""
Run the tasks in a queue prepared by prepare_runqueue
Upon failure , optionally try to recover the build using any alternate providers
( if the abort on failure configuration option isn ' t set)
"""
failures = 0
while 1 :
2007-04-01 15:04:49 +00:00
failed_fnids = [ ]
try :
self . execute_runqueue_internal ( )
finally :
if self . master_process :
failed_fnids = self . finish_runqueue ( )
2006-11-29 22:52:37 +00:00
if len ( failed_fnids ) == 0 :
2006-11-16 15:02:15 +00:00
return failures
2007-04-01 15:04:49 +00:00
if self . taskData . abort :
2006-11-29 22:52:37 +00:00
raise bb . runqueue . TaskFailure ( failed_fnids )
for fnid in failed_fnids :
2007-04-01 15:04:49 +00:00
#print "Failure: %s %s %s" % (fnid, self.taskData.fn_index[fnid], self.runq_task[fnid])
self . taskData . fail_fnid ( fnid )
2006-11-16 15:02:15 +00:00
failures = failures + 1
2006-11-29 22:52:37 +00:00
self . reset_runqueue ( )
2007-04-01 15:04:49 +00:00
self . prepare_runqueue ( )
def execute_runqueue_initVars ( self ) :
self . stats = RunQueueStats ( )
self . active_builds = 0
self . runq_buildable = [ ]
self . runq_running = [ ]
self . runq_complete = [ ]
self . build_pids = { }
self . failed_fnids = [ ]
self . master_process = True
# Mark initial buildable tasks
for task in range ( len ( self . runq_fnid ) ) :
self . runq_running . append ( 0 )
self . runq_complete . append ( 0 )
if len ( self . runq_depends [ task ] ) == 0 :
self . runq_buildable . append ( 1 )
else :
self . runq_buildable . append ( 0 )
2006-11-16 15:02:15 +00:00
2007-04-01 15:04:49 +00:00
def task_complete ( self , task ) :
"""
Mark a task as completed
Look at the reverse dependencies and mark any task with
completed dependencies as buildable
"""
self . runq_complete [ task ] = 1
for revdep in self . runq_revdeps [ task ] :
if self . runq_running [ revdep ] == 1 :
continue
if self . runq_buildable [ revdep ] == 1 :
continue
alldeps = 1
for dep in self . runq_depends [ revdep ] :
if self . runq_complete [ dep ] != 1 :
alldeps = 0
if alldeps == 1 :
self . runq_buildable [ revdep ] = 1
fn = self . taskData . fn_index [ self . runq_fnid [ revdep ] ]
taskname = self . runq_task [ revdep ]
bb . msg . debug ( 1 , bb . msg . domain . RunQueue , " Marking task %s ( %s , %s ) as buildable " % ( revdep , fn , taskname ) )
def execute_runqueue_internal ( self ) :
2006-11-16 15:02:15 +00:00
"""
Run the tasks in a queue prepared by prepare_runqueue
"""
bb . msg . note ( 1 , bb . msg . domain . RunQueue , " Executing runqueue " )
2007-04-01 15:04:49 +00:00
self . execute_runqueue_initVars ( )
2006-11-16 15:02:15 +00:00
if len ( self . runq_fnid ) == 0 :
# nothing to do
2007-02-21 20:15:13 +00:00
return [ ]
2006-11-16 15:02:15 +00:00
2006-11-29 22:52:37 +00:00
def sigint_handler ( signum , frame ) :
raise KeyboardInterrupt
2008-01-06 16:51:51 +00:00
# RP - this code allows tasks to run out of the correct order - disabled, FIXME
2007-08-09 10:51:58 +00:00
# Find any tasks with current stamps and remove them from the queue
2008-01-06 16:51:51 +00:00
#for task1 in range(len(self.runq_fnid)):
# task = self.prio_map[task1]
# fn = self.taskData.fn_index[self.runq_fnid[task]]
# taskname = self.runq_task[task]
# if bb.build.stamp_is_current(taskname, self.dataCache, fn):
# bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
# self.runq_running[task] = 1
# self.task_complete(task)
# self.stats.taskCompleted()
# self.stats.taskSkipped()
2007-08-09 10:51:58 +00:00
2007-04-01 15:04:49 +00:00
while True :
2008-01-06 16:51:51 +00:00
task = self . sched . next ( )
2007-04-01 15:04:49 +00:00
if task is not None :
fn = self . taskData . fn_index [ self . runq_fnid [ task ] ]
taskname = self . runq_task [ task ]
if bb . build . stamp_is_current ( taskname , self . dataCache , fn ) :
bb . msg . debug ( 2 , bb . msg . domain . RunQueue , " Stamp current task %s ( %s ) " % ( task , self . get_user_idstring ( task ) ) )
self . runq_running [ task ] = 1
self . task_complete ( task )
self . stats . taskCompleted ( )
self . stats . taskSkipped ( )
2006-11-16 15:02:15 +00:00
continue
2007-04-01 15:04:49 +00:00
bb . msg . note ( 1 , bb . msg . domain . RunQueue , " Running task %d of %d (ID: %s , %s ) " % ( self . stats . completed + self . active_builds + 1 , len ( self . runq_fnid ) , task , self . get_user_idstring ( task ) ) )
try :
pid = os . fork ( )
except OSError , e :
bb . msg . fatal ( bb . msg . domain . RunQueue , " fork failed: %d ( %s ) " % ( e . errno , e . strerror ) )
if pid == 0 :
# Bypass master process' handling
self . master_process = False
# Stop Ctrl+C being sent to children
# signal.signal(signal.SIGINT, signal.SIG_IGN)
# Make the child the process group leader
os . setpgid ( 0 , 0 )
2007-06-12 09:23:39 +00:00
newsi = os . open ( ' /dev/null ' , os . O_RDWR )
os . dup2 ( newsi , sys . stdin . fileno ( ) )
2007-04-01 15:04:49 +00:00
self . cooker . configuration . cmd = taskname [ 3 : ]
try :
self . cooker . tryBuild ( fn , False )
except bb . build . EventException :
bb . msg . error ( bb . msg . domain . Build , " Build of " + fn + " " + taskname + " failed " )
sys . exit ( 1 )
except :
bb . msg . error ( bb . msg . domain . Build , " Build of " + fn + " " + taskname + " failed " )
raise
sys . exit ( 0 )
self . build_pids [ pid ] = task
self . runq_running [ task ] = 1
self . active_builds = self . active_builds + 1
if self . active_builds < self . number_tasks :
2006-11-16 15:02:15 +00:00
continue
2007-04-01 15:04:49 +00:00
if self . active_builds > 0 :
result = os . waitpid ( - 1 , 0 )
self . active_builds = self . active_builds - 1
task = self . build_pids [ result [ 0 ] ]
if result [ 1 ] != 0 :
del self . build_pids [ result [ 0 ] ]
bb . msg . error ( bb . msg . domain . RunQueue , " Task %s ( %s ) failed " % ( task , self . get_user_idstring ( task ) ) )
self . failed_fnids . append ( self . runq_fnid [ task ] )
self . stats . taskFailed ( )
break
self . task_complete ( task )
self . stats . taskCompleted ( )
del self . build_pids [ result [ 0 ] ]
continue
return
2006-11-16 15:02:15 +00:00
2007-04-01 15:04:49 +00:00
def finish_runqueue ( self ) :
2006-11-16 15:02:15 +00:00
try :
2007-04-01 15:04:49 +00:00
while self . active_builds > 0 :
bb . msg . note ( 1 , bb . msg . domain . RunQueue , " Waiting for %s active tasks to finish " % self . active_builds )
tasknum = 1
for k , v in self . build_pids . iteritems ( ) :
bb . msg . note ( 1 , bb . msg . domain . RunQueue , " %s : %s ( %s ) " % ( tasknum , self . get_user_idstring ( v ) , k ) )
tasknum = tasknum + 1
result = os . waitpid ( - 1 , 0 )
task = self . build_pids [ result [ 0 ] ]
if result [ 1 ] != 0 :
bb . msg . error ( bb . msg . domain . RunQueue , " Task %s ( %s ) failed " % ( task , self . get_user_idstring ( task ) ) )
self . failed_fnids . append ( self . runq_fnid [ task ] )
self . stats . taskFailed ( )
del self . build_pids [ result [ 0 ] ]
self . active_builds = self . active_builds - 1
2007-05-22 11:50:37 +00:00
bb . msg . note ( 1 , bb . msg . domain . RunQueue , " Tasks Summary: Attempted %d tasks of which %d didn ' t need to be rerun and %d failed. " % ( self . stats . completed , self . stats . skipped , self . stats . failed ) )
return self . failed_fnids
2007-04-01 15:04:49 +00:00
except KeyboardInterrupt :
bb . msg . note ( 1 , bb . msg . domain . RunQueue , " Sending SIGINT to remaining %s tasks " % self . active_builds )
for k , v in self . build_pids . iteritems ( ) :
try :
2006-12-06 16:29:08 +00:00
os . kill ( - k , signal . SIGINT )
2007-04-01 15:04:49 +00:00
except :
pass
raise
2006-11-16 15:02:15 +00:00
# Sanity Checks
for task in range ( len ( self . runq_fnid ) ) :
2007-04-01 15:04:49 +00:00
if self . runq_buildable [ task ] == 0 :
2006-11-16 15:02:15 +00:00
bb . msg . error ( bb . msg . domain . RunQueue , " Task %s never buildable! " % task )
2007-04-01 15:04:49 +00:00
if self . runq_running [ task ] == 0 :
2006-11-16 15:02:15 +00:00
bb . msg . error ( bb . msg . domain . RunQueue , " Task %s never ran! " % task )
2007-04-01 15:04:49 +00:00
if self . runq_complete [ task ] == 0 :
2006-11-16 15:02:15 +00:00
bb . msg . error ( bb . msg . domain . RunQueue , " Task %s never completed! " % task )
2007-04-01 15:04:49 +00:00
bb . msg . note ( 1 , bb . msg . domain . RunQueue , " Tasks Summary: Attempted %d tasks of which %d didn ' t need to be rerun and %d failed. " % ( self . stats . completed , self . stats . skipped , self . stats . failed ) )
2007-01-08 23:53:01 +00:00
2007-04-01 15:04:49 +00:00
return self . failed_fnids
2006-11-16 15:02:15 +00:00
def dump_data ( self , taskQueue ) :
"""
Dump some debug information on the internal data structures
"""
bb . msg . debug ( 3 , bb . msg . domain . RunQueue , " run_tasks: " )
for task in range ( len ( self . runq_fnid ) ) :
bb . msg . debug ( 3 , bb . msg . domain . RunQueue , " ( %s ) %s - %s : %s Deps %s RevDeps %s " % ( task ,
taskQueue . fn_index [ self . runq_fnid [ task ] ] ,
self . runq_task [ task ] ,
self . runq_weight [ task ] ,
self . runq_depends [ task ] ,
self . runq_revdeps [ task ] ) )
bb . msg . debug ( 3 , bb . msg . domain . RunQueue , " sorted_tasks: " )
for task1 in range ( len ( self . runq_fnid ) ) :
if task1 in self . prio_map :
task = self . prio_map [ task1 ]
bb . msg . debug ( 3 , bb . msg . domain . RunQueue , " ( %s ) %s - %s : %s Deps %s RevDeps %s " % ( task ,
taskQueue . fn_index [ self . runq_fnid [ task ] ] ,
self . runq_task [ task ] ,
self . runq_weight [ task ] ,
self . runq_depends [ task ] ,
self . runq_revdeps [ task ] ) )