bitbake: Upgrade from 1.4 -> 1.7.4ish
git-svn-id: https://svn.o-hand.com/repos/poky/trunk@863 311d38ba-8fff-0310-9ca6-ca027cbcb966
This commit is contained in:
parent
65930a38e4
commit
306b7c7a97
|
@ -1,3 +1,21 @@
|
|||
Changes in BitBake 1.7.3:
|
||||
|
||||
Changes in BitBake 1.7.1:
|
||||
- Major updates of the dependency handling and execution
|
||||
of tasks
|
||||
- Change of the SVN Fetcher to keep the checkout around
|
||||
courtsey to Paul Sokolovsky (#1367)
|
||||
|
||||
Changes in Bitbake 1.6.0:
|
||||
- Better msg handling
|
||||
- COW dict implementation from Tim Ansell (mithro) leading
|
||||
to better performance
|
||||
- Speed up of -s
|
||||
|
||||
Changes in Bitbake 1.4.4:
|
||||
- SRCDATE now handling courtsey Justin Patrin
|
||||
- #1017 fix to work with rm_work
|
||||
|
||||
Changes in BitBake 1.4.2:
|
||||
- Send logs to oe.pastebin.com instead of pastebin.com
|
||||
fixes #856
|
||||
|
|
|
@ -5,29 +5,34 @@ setup.py
|
|||
bin/bitdoc
|
||||
bin/bbimage
|
||||
bin/bitbake
|
||||
lib/bb/COW.py
|
||||
lib/bb/__init__.py
|
||||
lib/bb/build.py
|
||||
lib/bb/cache.py
|
||||
lib/bb/data.py
|
||||
lib/bb/data_smart.py
|
||||
lib/bb/event.py
|
||||
lib/bb/fetch/bk.py
|
||||
lib/bb/manifest.py
|
||||
lib/bb/methodpool.py
|
||||
lib/bb/msg.py
|
||||
lib/bb/providers.py
|
||||
lib/bb/runqueue.py
|
||||
lib/bb/shell.py
|
||||
lib/bb/taskdata.py
|
||||
lib/bb/utils.py
|
||||
lib/bb/fetch/cvs.py
|
||||
lib/bb/fetch/git.py
|
||||
lib/bb/fetch/__init__.py
|
||||
lib/bb/fetch/local.py
|
||||
lib/bb/fetch/perforce.py
|
||||
lib/bb/fetch/ssh.py
|
||||
lib/bb/fetch/svk.py
|
||||
lib/bb/fetch/svn.py
|
||||
lib/bb/fetch/wget.py
|
||||
lib/bb/fetch/ssh.py
|
||||
lib/bb/manifest.py
|
||||
lib/bb/methodpool.py
|
||||
lib/bb/parse/__init__.py
|
||||
lib/bb/parse/parse_py/BBHandler.py
|
||||
lib/bb/parse/parse_py/ConfHandler.py
|
||||
lib/bb/parse/parse_py/__init__.py
|
||||
lib/bb/shell.py
|
||||
lib/bb/utils.py
|
||||
doc/COPYING.GPL
|
||||
doc/COPYING.MIT
|
||||
doc/manual/html.css
|
||||
|
|
18
bitbake/TODO
18
bitbake/TODO
|
@ -1,18 +0,0 @@
|
|||
On popular request by popular people a list of tasks to-do:
|
||||
|
||||
-Kill insecure usage of os.system either by properly escaping
|
||||
the strings or a faster replacement not involving /bin/sh
|
||||
-Introduce a -p option to automatically hotshot/profile the
|
||||
run
|
||||
-Cache dependencies separately and invalidate them when any file
|
||||
changed.
|
||||
-...
|
||||
|
||||
|
||||
DONE:
|
||||
· -On generating the inter package deps do not parse each file multiply
|
||||
· times.
|
||||
-We build the lists while parsing the data now
|
||||
· (WAS: Do not generate the world dependency tree, only when someone
|
||||
· requests it.
|
||||
|
|
@ -18,15 +18,16 @@
|
|||
# Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
|
||||
import sys, os
|
||||
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
sys.path.insert(0,os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
import bb
|
||||
from bb import *
|
||||
|
||||
__version__ = 1.0
|
||||
__version__ = 1.1
|
||||
type = "jffs2"
|
||||
cfg_bb = data.init()
|
||||
cfg_oespawn = data.init()
|
||||
|
||||
bb.msg.set_debug_level(0)
|
||||
|
||||
def usage():
|
||||
print "Usage: bbimage [options ...]"
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -442,7 +442,7 @@ Create a set of html pages (documentation) for a bitbake.conf....
|
|||
options, args = parser.parse_args( sys.argv )
|
||||
|
||||
if options.debug:
|
||||
bb.debug_level = options.debug
|
||||
bb.msg.set_debug_level(options.debug)
|
||||
|
||||
return options.config, options.output
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ bbdebug() {
|
|||
exit 1
|
||||
}
|
||||
|
||||
test ${@bb.debug_level} -ge $1 && {
|
||||
test ${@bb.msg.debug_level} -ge $1 && {
|
||||
shift
|
||||
echo "DEBUG:" $*
|
||||
}
|
||||
|
|
|
@ -26,11 +26,12 @@ DEPLOY_DIR_IMAGE = "${DEPLOY_DIR}/images"
|
|||
DL_DIR = "${TMPDIR}/downloads"
|
||||
FETCHCOMMAND = ""
|
||||
FETCHCOMMAND_cvs = "/usr/bin/env cvs -d${CVSROOT} co ${CVSCOOPTS} ${CVSMODULE}"
|
||||
FETCHCOMMAND_svn = "/usr/bin/env svn co http://${SVNROOT} ${SVNCOOPTS} ${SVNMODULE}"
|
||||
FETCHCOMMAND_svn = "/usr/bin/env svn co ${SVNCOOPTS} ${SVNROOT} ${SVNMODULE}"
|
||||
FETCHCOMMAND_wget = "/usr/bin/env wget -t 5 --passive-ftp -P ${DL_DIR} ${URI}"
|
||||
FILESDIR = "${@bb.which(bb.data.getVar('FILESPATH', d, 1), '.')}"
|
||||
FILESPATH = "${FILE_DIRNAME}/${PF}:${FILE_DIRNAME}/${P}:${FILE_DIRNAME}/${PN}:${FILE_DIRNAME}/files:${FILE_DIRNAME}"
|
||||
FILE_DIRNAME = "${@os.path.dirname(bb.data.getVar('FILE', d))}"
|
||||
GITDIR = "${DL_DIR}/git"
|
||||
IMAGE_CMD = "_NO_DEFINED_IMAGE_TYPES_"
|
||||
IMAGE_ROOTFS = "${TMPDIR}/rootfs"
|
||||
MKTEMPCMD = "mktemp -q ${TMPBASE}"
|
||||
|
@ -47,9 +48,11 @@ RESUMECOMMAND_wget = "/usr/bin/env wget -c -t 5 --passive-ftp -P ${DL_DIR} ${URI
|
|||
S = "${WORKDIR}/${P}"
|
||||
SRC_URI = "file://${FILE}"
|
||||
STAMP = "${TMPDIR}/stamps/${PF}"
|
||||
SVNDIR = "${DL_DIR}/svn"
|
||||
T = "${WORKDIR}/temp"
|
||||
TARGET_ARCH = "${BUILD_ARCH}"
|
||||
TMPDIR = "${TOPDIR}/tmp"
|
||||
UPDATECOMMAND = ""
|
||||
UPDATECOMMAND_cvs = "/usr/bin/env cvs -d${CVSROOT} update ${CVSCOOPTS}"
|
||||
UPDATECOMMAND_svn = "/usr/bin/env svn update ${SVNCOOPTS}"
|
||||
WORKDIR = "${TMPDIR}/work/${PF}"
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
au BufNewFile,BufRead *.bb setfiletype bitbake
|
||||
au BufNewFile,BufRead *.bbclass setfiletype bitbake
|
||||
au BufNewFile,BufRead *.inc setfiletype bitbake
|
||||
" au BufNewFile,BufRead *.conf setfiletype bitbake
|
|
@ -42,11 +42,11 @@ syn region bbString matchgroup=bbQuote start=/'/ skip=/\\$/ excludenl end=/'/ c
|
|||
|
||||
syn keyword bbExportFlag export contained nextgroup=bbIdentifier skipwhite
|
||||
syn match bbVarDeref "${[a-zA-Z0-9\-_\.]\+}" contained
|
||||
syn match bbVarDef "^\(export\s*\)\?\([a-zA-Z0-9\-_\.]\+\(_[${}a-zA-Z0-9\-_\.]\+\)\?\)\s*\(\(:=\)\|\(+=\)\|\(=+\)\|\(?=\)\|=\)\@=" contains=bbExportFlag,bbIdentifier,bbVarDeref nextgroup=bbVarEq
|
||||
syn match bbVarDef "^\(export\s*\)\?\([a-zA-Z0-9\-_\.]\+\(_[${}a-zA-Z0-9\-_\.]\+\)\?\)\s*\(:=\|+=\|=+\|\.=\|=\.\|?=\|=\)\@=" contains=bbExportFlag,bbIdentifier,bbVarDeref nextgroup=bbVarEq
|
||||
|
||||
syn match bbIdentifier "[a-zA-Z0-9\-_\.]\+" display contained
|
||||
"syn keyword bbVarEq = display contained nextgroup=bbVarValue
|
||||
syn match bbVarEq "\(:=\)\|\(+=\)\|\(=+\)\|\(?=\)\|=" contained nextgroup=bbVarValue
|
||||
syn match bbVarEq "\(:=\|+=\|=+\|\.=\|=\.\|?=\|=\)" contained nextgroup=bbVarValue
|
||||
syn match bbVarValue ".*$" contained contains=bbString,bbVarDeref
|
||||
|
||||
|
||||
|
@ -90,8 +90,8 @@ syn region bbDefRegion start='^def\s\+\w\+\s*([^)]*)\s*:\s*$' end='^\(\s\|$\)\@
|
|||
|
||||
|
||||
" BitBake statements
|
||||
syn keyword bbStatement include inherit addtask addhandler EXPORT_FUNCTIONS display contained
|
||||
syn match bbStatementLine "^\(include\|inherit\|addtask\|addhandler\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest
|
||||
syn keyword bbStatement include inherit require addtask addhandler EXPORT_FUNCTIONS display contained
|
||||
syn match bbStatementLine "^\(include\|inherit\|require\|addtask\|addhandler\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest
|
||||
syn match bbStatementRest ".*$" contained contains=bbString,bbVarDeref
|
||||
|
||||
" Highlight
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
<holder>Phil Blundell</holder>
|
||||
</copyright>
|
||||
<legalnotice>
|
||||
<para>This work is licensed under the Creative Commons Attribution License. To view a copy of this license, visit <ulink url="http://creativecommons.org/licenses/by/2.0/">http://creativecommons.org/licenses/by/2.0/</ulink> or send a letter to Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.</para>
|
||||
<para>This work is licensed under the Creative Commons Attribution License. To view a copy of this license, visit <ulink url="http://creativecommons.org/licenses/by/2.5/">http://creativecommons.org/licenses/by/2.5/</ulink> or send a letter to Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.</para>
|
||||
</legalnotice>
|
||||
</bookinfo>
|
||||
<chapter>
|
||||
|
@ -195,7 +195,7 @@ addtask printdate before do_build</screen></para>
|
|||
<section>
|
||||
<title>Events</title>
|
||||
<para><emphasis>NOTE:</emphasis> This is only supported in .bb and .bbclass files.</para>
|
||||
<para>BitBake also implements a means of registering event handlers. Events are triggered at certain points during operation, such as, the beginning of operation against a given .bb, the start of a given task, task failure, task success, et cetera. The intent was to make it easy to do things like email notifications on build failure.</para>
|
||||
<para>BitBake allows to install event handlers. Events are triggered at certain points during operation, such as, the beginning of operation against a given .bb, the start of a given task, task failure, task success, et cetera. The intent was to make it easy to do things like email notifications on build failure.</para>
|
||||
<para><screen>addhandler myclass_eventhandler
|
||||
python myclass_eventhandler() {
|
||||
from bb.event import NotHandled, getName
|
||||
|
@ -205,6 +205,7 @@ python myclass_eventhandler() {
|
|||
print "The file we run for is %s" % data.getVar('FILE', e.data, True)
|
||||
|
||||
return NotHandled
|
||||
}
|
||||
</screen></para><para>
|
||||
This event handler gets called every time an event is triggered. A global variable <varname>e</varname> is defined. <varname>e</varname>.data contains an instance of bb.data. With the getName(<varname>e</varname>)
|
||||
method one can get the name of the triggered event.</para><para>The above event handler prints the name
|
||||
|
@ -344,15 +345,19 @@ options:
|
|||
cannot be remade, the other dependencies of these
|
||||
targets can be processed all the same.
|
||||
-f, --force force run of specified cmd, regardless of stamp status
|
||||
-i, --interactive drop into the interactive mode.
|
||||
-i, --interactive drop into the interactive mode also called the BitBake
|
||||
shell.
|
||||
-c CMD, --cmd=CMD Specify task to execute. Note that this only executes
|
||||
the specified task for the providee and the packages
|
||||
it depends on, i.e. 'compile' does not implicitly call
|
||||
stage for the dependencies (IOW: use only if you know
|
||||
what you are doing)
|
||||
what you are doing). Depending on the base.bbclass a
|
||||
listtaks tasks is defined and will show available
|
||||
tasks
|
||||
-r FILE, --read=FILE read the specified file before bitbake.conf
|
||||
-v, --verbose output more chit-chat to the terminal
|
||||
-D, --debug Increase the debug level
|
||||
-D, --debug Increase the debug level. You can specify this more
|
||||
than once.
|
||||
-n, --dry-run don't execute, just go through the motions
|
||||
-p, --parse-only quit after parsing the BB files (developers only)
|
||||
-d, --disable-psyco disable using the psyco just-in-time compiler (not
|
||||
|
@ -360,6 +365,12 @@ options:
|
|||
-s, --show-versions show current and preferred versions of all packages
|
||||
-e, --environment show the global or per-package environment (this is
|
||||
what used to be bbread)
|
||||
-g, --graphviz emit the dependency trees of the specified packages in
|
||||
the dot syntax
|
||||
-I IGNORED_DOT_DEPS, --ignore-deps=IGNORED_DOT_DEPS
|
||||
Stop processing at the given list of dependencies when
|
||||
generating dependency graphs. This can help to make
|
||||
the graph more appealing
|
||||
|
||||
</screen>
|
||||
</para>
|
||||
|
@ -386,6 +397,14 @@ options:
|
|||
<screen><prompt>$ </prompt>bitbake virtual/whatever</screen>
|
||||
<screen><prompt>$ </prompt>bitbake -c clean virtual/whatever</screen>
|
||||
</example>
|
||||
<example>
|
||||
<title>Generating dependency graphs</title>
|
||||
<para>BitBake is able to generate dependency graphs using the dot syntax. These graphs can be converted
|
||||
to images using the <application>dot</application> application from <ulink url="http://www.graphviz.org">graphviz</ulink>.
|
||||
Three files will be written into the current working directory, <emphasis>depends.dot</emphasis> containing <varname>DEPENDS</varname> variables, <emphasis>rdepends.dot</emphasis> and <emphasis>alldepends.dot</emphasis> containing both <varname>DEPENDS</varname> and <varname>RDEPENDS</varname>. To stop depending on common depends one can use the <prompt>-I depend</prompt> to omit these from the graph. This can lead to more readable graphs. E.g. this way <varname>DEPENDS</varname> from inherited classes, e.g. base.bbclass, can be removed from the graph.</para>
|
||||
<screen><prompt>$ </prompt>bitbake -g blah</screen>
|
||||
<screen><prompt>$ </prompt>bitbake -g -I virtual/whatever -I bloom blah</screen>
|
||||
</example>
|
||||
</para>
|
||||
</section>
|
||||
<section>
|
||||
|
|
|
@ -0,0 +1,305 @@
|
|||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
This is a copy on write dictionary and set which abuses classes to try and be nice and fast.
|
||||
|
||||
Please Note:
|
||||
Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW.
|
||||
Assign a file to __warn__ to get warnings about slow operations.
|
||||
"""
|
||||
|
||||
from inspect import getmro
|
||||
|
||||
import copy
|
||||
import types, sets
|
||||
types.ImmutableTypes = tuple([ \
|
||||
types.BooleanType, \
|
||||
types.ComplexType, \
|
||||
types.FloatType, \
|
||||
types.IntType, \
|
||||
types.LongType, \
|
||||
types.NoneType, \
|
||||
types.TupleType, \
|
||||
sets.ImmutableSet] + \
|
||||
list(types.StringTypes))
|
||||
|
||||
MUTABLE = "__mutable__"
|
||||
|
||||
class COWMeta(type):
|
||||
pass
|
||||
|
||||
class COWDictMeta(COWMeta):
|
||||
__warn__ = False
|
||||
__hasmutable__ = False
|
||||
__marker__ = tuple()
|
||||
|
||||
def __str__(cls):
|
||||
# FIXME: I have magic numbers!
|
||||
return "<COWDict Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) - 3)
|
||||
__repr__ = __str__
|
||||
|
||||
def cow(cls):
|
||||
class C(cls):
|
||||
__count__ = cls.__count__ + 1
|
||||
return C
|
||||
copy = cow
|
||||
__call__ = cow
|
||||
|
||||
def __setitem__(cls, key, value):
|
||||
if not isinstance(value, types.ImmutableTypes):
|
||||
if not isinstance(value, COWMeta):
|
||||
cls.__hasmutable__ = True
|
||||
key += MUTABLE
|
||||
setattr(cls, key, value)
|
||||
|
||||
def __getmutable__(cls, key, readonly=False):
|
||||
nkey = key + MUTABLE
|
||||
try:
|
||||
return cls.__dict__[nkey]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
value = getattr(cls, nkey)
|
||||
if readonly:
|
||||
return value
|
||||
|
||||
if not cls.__warn__ is False and not isinstance(value, COWMeta):
|
||||
print >> cls.__warn__, "Warning: Doing a copy because %s is a mutable type." % key
|
||||
try:
|
||||
value = value.copy()
|
||||
except AttributeError, e:
|
||||
value = copy.copy(value)
|
||||
setattr(cls, nkey, value)
|
||||
return value
|
||||
|
||||
__getmarker__ = []
|
||||
def __getreadonly__(cls, key, default=__getmarker__):
|
||||
"""\
|
||||
Get a value (even if mutable) which you promise not to change.
|
||||
"""
|
||||
return cls.__getitem__(key, default, True)
|
||||
|
||||
def __getitem__(cls, key, default=__getmarker__, readonly=False):
|
||||
try:
|
||||
try:
|
||||
value = getattr(cls, key)
|
||||
except AttributeError:
|
||||
value = cls.__getmutable__(key, readonly)
|
||||
|
||||
# This is for values which have been deleted
|
||||
if value is cls.__marker__:
|
||||
raise AttributeError("key %s does not exist." % key)
|
||||
|
||||
return value
|
||||
except AttributeError, e:
|
||||
if not default is cls.__getmarker__:
|
||||
return default
|
||||
|
||||
raise KeyError(str(e))
|
||||
|
||||
def __delitem__(cls, key):
|
||||
cls.__setitem__(key, cls.__marker__)
|
||||
|
||||
def __revertitem__(cls, key):
|
||||
if not cls.__dict__.has_key(key):
|
||||
key += MUTABLE
|
||||
delattr(cls, key)
|
||||
|
||||
def has_key(cls, key):
|
||||
value = cls.__getreadonly__(key, cls.__marker__)
|
||||
if value is cls.__marker__:
|
||||
return False
|
||||
return True
|
||||
|
||||
def iter(cls, type, readonly=False):
|
||||
for key in dir(cls):
|
||||
if key.startswith("__"):
|
||||
continue
|
||||
|
||||
if key.endswith(MUTABLE):
|
||||
key = key[:-len(MUTABLE)]
|
||||
|
||||
if type == "keys":
|
||||
yield key
|
||||
|
||||
try:
|
||||
if readonly:
|
||||
value = cls.__getreadonly__(key)
|
||||
else:
|
||||
value = cls[key]
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
if type == "values":
|
||||
yield value
|
||||
if type == "items":
|
||||
yield (key, value)
|
||||
raise StopIteration()
|
||||
|
||||
def iterkeys(cls):
|
||||
return cls.iter("keys")
|
||||
def itervalues(cls, readonly=False):
|
||||
if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False:
|
||||
print >> cls.__warn__, "Warning: If you arn't going to change any of the values call with True."
|
||||
return cls.iter("values", readonly)
|
||||
def iteritems(cls, readonly=False):
|
||||
if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False:
|
||||
print >> cls.__warn__, "Warning: If you arn't going to change any of the values call with True."
|
||||
return cls.iter("items", readonly)
|
||||
|
||||
class COWSetMeta(COWDictMeta):
|
||||
def __str__(cls):
|
||||
# FIXME: I have magic numbers!
|
||||
return "<COWSet Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) -3)
|
||||
__repr__ = __str__
|
||||
|
||||
def cow(cls):
|
||||
class C(cls):
|
||||
__count__ = cls.__count__ + 1
|
||||
return C
|
||||
|
||||
def add(cls, value):
|
||||
COWDictMeta.__setitem__(cls, repr(hash(value)), value)
|
||||
|
||||
def remove(cls, value):
|
||||
COWDictMeta.__delitem__(cls, repr(hash(value)))
|
||||
|
||||
def __in__(cls, value):
|
||||
return COWDictMeta.has_key(repr(hash(value)))
|
||||
|
||||
def iterkeys(cls):
|
||||
raise TypeError("sets don't have keys")
|
||||
|
||||
def iteritems(cls):
|
||||
raise TypeError("sets don't have 'items'")
|
||||
|
||||
# These are the actual classes you use!
|
||||
class COWDictBase(object):
|
||||
__metaclass__ = COWDictMeta
|
||||
__count__ = 0
|
||||
|
||||
class COWSetBase(object):
|
||||
__metaclass__ = COWSetMeta
|
||||
__count__ = 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
COWDictBase.__warn__ = sys.stderr
|
||||
a = COWDictBase()
|
||||
print "a", a
|
||||
|
||||
a['a'] = 'a'
|
||||
a['b'] = 'b'
|
||||
a['dict'] = {}
|
||||
|
||||
b = a.copy()
|
||||
print "b", b
|
||||
b['c'] = 'b'
|
||||
|
||||
print
|
||||
|
||||
print "a", a
|
||||
for x in a.iteritems():
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b.iteritems():
|
||||
print x
|
||||
print
|
||||
|
||||
b['dict']['a'] = 'b'
|
||||
b['a'] = 'c'
|
||||
|
||||
print "a", a
|
||||
for x in a.iteritems():
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b.iteritems():
|
||||
print x
|
||||
print
|
||||
|
||||
try:
|
||||
b['dict2']
|
||||
except KeyError, e:
|
||||
print "Okay!"
|
||||
|
||||
a['set'] = COWSetBase()
|
||||
a['set'].add("o1")
|
||||
a['set'].add("o1")
|
||||
a['set'].add("o2")
|
||||
|
||||
print "a", a
|
||||
for x in a['set'].itervalues():
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b['set'].itervalues():
|
||||
print x
|
||||
print
|
||||
|
||||
b['set'].add('o3')
|
||||
|
||||
print "a", a
|
||||
for x in a['set'].itervalues():
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b['set'].itervalues():
|
||||
print x
|
||||
print
|
||||
|
||||
a['set2'] = set()
|
||||
a['set2'].add("o1")
|
||||
a['set2'].add("o1")
|
||||
a['set2'].add("o2")
|
||||
|
||||
print "a", a
|
||||
for x in a.iteritems():
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b.iteritems(readonly=True):
|
||||
print x
|
||||
print
|
||||
|
||||
del b['b']
|
||||
try:
|
||||
print b['b']
|
||||
except KeyError:
|
||||
print "Yay! deleted key raises error"
|
||||
|
||||
if b.has_key('b'):
|
||||
print "Boo!"
|
||||
else:
|
||||
print "Yay - has_key with delete works!"
|
||||
|
||||
print "a", a
|
||||
for x in a.iteritems():
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b.iteritems(readonly=True):
|
||||
print x
|
||||
print
|
||||
|
||||
b.__revertitem__('b')
|
||||
|
||||
print "a", a
|
||||
for x in a.iteritems():
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b.iteritems(readonly=True):
|
||||
print x
|
||||
print
|
||||
|
||||
b.__revertitem__('dict')
|
||||
print "a", a
|
||||
for x in a.iteritems():
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b.iteritems(readonly=True):
|
||||
print x
|
||||
print
|
|
@ -23,7 +23,7 @@ this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
|||
Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
"""
|
||||
|
||||
__version__ = "1.4.3"
|
||||
__version__ = "1.7.4"
|
||||
|
||||
__all__ = [
|
||||
|
||||
|
@ -63,24 +63,24 @@ __all__ = [
|
|||
"manifest",
|
||||
"methodpool",
|
||||
"cache",
|
||||
"runqueue",
|
||||
"taskdata",
|
||||
"providers",
|
||||
]
|
||||
|
||||
whitespace = '\t\n\x0b\x0c\r '
|
||||
lowercase = 'abcdefghijklmnopqrstuvwxyz'
|
||||
|
||||
import sys, os, types, re, string
|
||||
import sys, os, types, re, string, bb
|
||||
from bb import msg
|
||||
|
||||
#projectdir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
|
||||
projectdir = os.getcwd()
|
||||
|
||||
debug_level = 0
|
||||
|
||||
if "BBDEBUG" in os.environ:
|
||||
level = int(os.environ["BBDEBUG"])
|
||||
if level:
|
||||
debug_level = level
|
||||
else:
|
||||
debug_level = 0
|
||||
bb.msg.set_debug_level(level)
|
||||
|
||||
class VarExpandError(Exception):
|
||||
pass
|
||||
|
@ -99,22 +99,17 @@ class MalformedUrl(Exception):
|
|||
#######################################################################
|
||||
#######################################################################
|
||||
|
||||
debug_prepend = ''
|
||||
|
||||
|
||||
def debug(lvl, *args):
|
||||
if debug_level >= lvl:
|
||||
print debug_prepend + 'DEBUG:', ''.join(args)
|
||||
bb.msg.std_debug(lvl, ''.join(args))
|
||||
|
||||
def note(*args):
|
||||
print debug_prepend + 'NOTE:', ''.join(args)
|
||||
bb.msg.std_note(''.join(args))
|
||||
|
||||
def error(*args):
|
||||
print debug_prepend + 'ERROR:', ''.join(args)
|
||||
bb.msg.std_error(''.join(args))
|
||||
|
||||
def fatal(*args):
|
||||
print debug_prepend + 'ERROR:', ''.join(args)
|
||||
sys.exit(1)
|
||||
bb.msg.std_fatal(''.join(args))
|
||||
|
||||
|
||||
#######################################################################
|
||||
|
|
|
@ -25,18 +25,9 @@ You should have received a copy of the GNU General Public License along with
|
|||
Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
"""
|
||||
|
||||
from bb import debug, data, fetch, fatal, error, note, event, mkdirhier, utils
|
||||
from bb import data, fetch, event, mkdirhier, utils
|
||||
import bb, os
|
||||
|
||||
# data holds flags and function name for a given task
|
||||
_task_data = data.init()
|
||||
|
||||
# graph represents task interdependencies
|
||||
_task_graph = bb.digraph()
|
||||
|
||||
# stack represents execution order, excepting dependencies
|
||||
_task_stack = []
|
||||
|
||||
# events
|
||||
class FuncFailed(Exception):
|
||||
"""Executed function failed"""
|
||||
|
@ -76,13 +67,6 @@ class InvalidTask(TaskBase):
|
|||
|
||||
# functions
|
||||
|
||||
def init(data):
|
||||
global _task_data, _task_graph, _task_stack
|
||||
_task_data = data.init()
|
||||
_task_graph = bb.digraph()
|
||||
_task_stack = []
|
||||
|
||||
|
||||
def exec_func(func, d, dirs = None):
|
||||
"""Execute an BB 'function'"""
|
||||
|
||||
|
@ -163,7 +147,7 @@ def exec_func_shell(func, d):
|
|||
|
||||
f = open(runfile, "w")
|
||||
f.write("#!/bin/sh -e\n")
|
||||
if bb.debug_level > 0: f.write("set -x\n")
|
||||
if bb.msg.debug_level['default'] > 0: f.write("set -x\n")
|
||||
data.emit_env(f, d)
|
||||
|
||||
f.write("cd %s\n" % os.getcwd())
|
||||
|
@ -171,18 +155,18 @@ def exec_func_shell(func, d):
|
|||
f.close()
|
||||
os.chmod(runfile, 0775)
|
||||
if not func:
|
||||
error("Function not specified")
|
||||
bb.msg.error(bb.msg.domain.Build, "Function not specified")
|
||||
raise FuncFailed()
|
||||
|
||||
# open logs
|
||||
si = file('/dev/null', 'r')
|
||||
try:
|
||||
if bb.debug_level > 0:
|
||||
if bb.msg.debug_level['default'] > 0:
|
||||
so = os.popen("tee \"%s\"" % logfile, "w")
|
||||
else:
|
||||
so = file(logfile, 'w')
|
||||
except OSError, e:
|
||||
bb.error("opening log file: %s" % e)
|
||||
bb.msg.error(bb.msg.domain.Build, "opening log file: %s" % e)
|
||||
pass
|
||||
|
||||
se = so
|
||||
|
@ -205,7 +189,10 @@ def exec_func_shell(func, d):
|
|||
else:
|
||||
maybe_fakeroot = ''
|
||||
ret = os.system('%ssh -e %s' % (maybe_fakeroot, runfile))
|
||||
os.chdir(prevdir)
|
||||
try:
|
||||
os.chdir(prevdir)
|
||||
except:
|
||||
pass
|
||||
|
||||
if not interact:
|
||||
# restore the backups
|
||||
|
@ -224,14 +211,14 @@ def exec_func_shell(func, d):
|
|||
os.close(ose[0])
|
||||
|
||||
if ret==0:
|
||||
if bb.debug_level > 0:
|
||||
if bb.msg.debug_level['default'] > 0:
|
||||
os.remove(runfile)
|
||||
# os.remove(logfile)
|
||||
return
|
||||
else:
|
||||
error("function %s failed" % func)
|
||||
bb.msg.error(bb.msg.domain.Build, "function %s failed" % func)
|
||||
if data.getVar("BBINCLUDELOGS", d):
|
||||
error("log data follows (%s)" % logfile)
|
||||
bb.msg.error(bb.msg.domain.Build, "log data follows (%s)" % logfile)
|
||||
f = open(logfile, "r")
|
||||
while True:
|
||||
l = f.readline()
|
||||
|
@ -241,7 +228,7 @@ def exec_func_shell(func, d):
|
|||
print '| %s' % l
|
||||
f.close()
|
||||
else:
|
||||
error("see log in %s" % logfile)
|
||||
bb.msg.error(bb.msg.domain.Build, "see log in %s" % logfile)
|
||||
raise FuncFailed( logfile )
|
||||
|
||||
|
||||
|
@ -281,7 +268,7 @@ def exec_task(task, d):
|
|||
return 1
|
||||
|
||||
try:
|
||||
debug(1, "Executing task %s" % item)
|
||||
bb.msg.debug(1, bb.msg.domain.Build, "Executing task %s" % item)
|
||||
old_overrides = data.getVar('OVERRIDES', d, 0)
|
||||
localdata = data.createCopy(d)
|
||||
data.setVar('OVERRIDES', 'task_%s:%s' % (item, old_overrides), localdata)
|
||||
|
@ -292,21 +279,63 @@ def exec_task(task, d):
|
|||
task_cache.append(item)
|
||||
data.setVar('_task_cache', task_cache, d)
|
||||
except FuncFailed, reason:
|
||||
note( "Task failed: %s" % reason )
|
||||
bb.msg.note(1, bb.msg.domain.Build, "Task failed: %s" % reason )
|
||||
failedevent = TaskFailed(item, d)
|
||||
event.fire(failedevent)
|
||||
raise EventException("Function failed in task: %s" % reason, failedevent)
|
||||
|
||||
# execute
|
||||
task_graph.walkdown(task, execute)
|
||||
if data.getVarFlag(task, 'dontrundeps', d):
|
||||
execute(None, task)
|
||||
else:
|
||||
task_graph.walkdown(task, execute)
|
||||
|
||||
# make stamp, or cause event and raise exception
|
||||
if not data.getVarFlag(task, 'nostamp', d):
|
||||
mkstamp(task, d)
|
||||
|
||||
def stamp_is_current_cache(dataCache, file_name, task, checkdeps = 1):
|
||||
"""
|
||||
Check status of a given task's stamp.
|
||||
Returns 0 if it is not current and needs updating.
|
||||
Same as stamp_is_current but works against the dataCache instead of d
|
||||
"""
|
||||
task_graph = dataCache.task_queues[file_name]
|
||||
|
||||
if not dataCache.stamp[file_name]:
|
||||
return 0
|
||||
|
||||
stampfile = "%s.%s" % (dataCache.stamp[file_name], task)
|
||||
if not os.access(stampfile, os.F_OK):
|
||||
return 0
|
||||
|
||||
if checkdeps == 0:
|
||||
return 1
|
||||
|
||||
import stat
|
||||
tasktime = os.stat(stampfile)[stat.ST_MTIME]
|
||||
|
||||
_deps = []
|
||||
def checkStamp(graph, task):
|
||||
# check for existance
|
||||
if 'nostamp' in dataCache.task_deps[file_name] and task in dataCache.task_deps[file_name]['nostamp']:
|
||||
return 1
|
||||
|
||||
if not stamp_is_current_cache(dataCache, file_name, task, 0):
|
||||
return 0
|
||||
|
||||
depfile = "%s.%s" % (dataCache.stamp[file_name], task)
|
||||
deptime = os.stat(depfile)[stat.ST_MTIME]
|
||||
if deptime > tasktime:
|
||||
return 0
|
||||
return 1
|
||||
|
||||
return task_graph.walkdown(task, checkStamp)
|
||||
|
||||
def stamp_is_current(task, d, checkdeps = 1):
|
||||
"""Check status of a given task's stamp. returns 0 if it is not current and needs updating."""
|
||||
"""
|
||||
Check status of a given task's stamp.
|
||||
Returns 0 if it is not current and needs updating.
|
||||
"""
|
||||
task_graph = data.getVar('_task_graph', d)
|
||||
if not task_graph:
|
||||
task_graph = bb.digraph()
|
||||
|
@ -360,7 +389,6 @@ def mkstamp(task, d):
|
|||
f = open(stamp, "w")
|
||||
f.close()
|
||||
|
||||
|
||||
def add_task(task, deps, d):
|
||||
task_graph = data.getVar('_task_graph', d)
|
||||
if not task_graph:
|
||||
|
@ -374,6 +402,21 @@ def add_task(task, deps, d):
|
|||
# don't assume holding a reference
|
||||
data.setVar('_task_graph', task_graph, d)
|
||||
|
||||
task_deps = data.getVar('_task_deps', d)
|
||||
if not task_deps:
|
||||
task_deps = {}
|
||||
def getTask(name):
|
||||
deptask = data.getVarFlag(task, name, d)
|
||||
if deptask:
|
||||
if not name in task_deps:
|
||||
task_deps[name] = {}
|
||||
task_deps[name][task] = deptask
|
||||
getTask('deptask')
|
||||
getTask('rdeptask')
|
||||
getTask('recrdeptask')
|
||||
getTask('nostamp')
|
||||
|
||||
data.setVar('_task_deps', task_deps, d)
|
||||
|
||||
def remove_task(task, kill, d):
|
||||
"""Remove an BB 'task'.
|
||||
|
@ -399,6 +442,3 @@ def task_exists(task, d):
|
|||
task_graph = bb.digraph()
|
||||
data.setVar('_task_graph', task_graph, d)
|
||||
return task_graph.hasnode(task)
|
||||
|
||||
def get_task_data():
|
||||
return _task_data
|
||||
|
|
|
@ -33,15 +33,15 @@ Place, Suite 330, Boston, MA 02111-1307 USA.
|
|||
import os, re
|
||||
import bb.data
|
||||
import bb.utils
|
||||
from sets import Set
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
print "NOTE: Importing cPickle failed. Falling back to a very slow implementation."
|
||||
bb.msg.note(1, bb.msg.domain.Cache, "Importing cPickle failed. Falling back to a very slow implementation.")
|
||||
|
||||
# __cache_version__ = "123"
|
||||
__cache_version__ = "124" # changes the __depends structure
|
||||
__cache_version__ = "125"
|
||||
|
||||
class Cache:
|
||||
"""
|
||||
|
@ -58,14 +58,12 @@ class Cache:
|
|||
|
||||
if self.cachedir in [None, '']:
|
||||
self.has_cache = False
|
||||
if cooker.cb is not None:
|
||||
print "NOTE: Not using a cache. Set CACHE = <directory> to enable."
|
||||
bb.msg.note(1, bb.msg.domain.Cache, "Not using a cache. Set CACHE = <directory> to enable.")
|
||||
else:
|
||||
self.has_cache = True
|
||||
self.cachefile = os.path.join(self.cachedir,"bb_cache.dat")
|
||||
|
||||
if cooker.cb is not None:
|
||||
print "NOTE: Using cache in '%s'" % self.cachedir
|
||||
bb.msg.debug(1, bb.msg.domain.Cache, "Using cache in '%s'" % self.cachedir)
|
||||
try:
|
||||
os.stat( self.cachedir )
|
||||
except OSError:
|
||||
|
@ -80,7 +78,7 @@ class Cache:
|
|||
if version_data['BITBAKE_VER'] != bb.__version__:
|
||||
raise ValueError, 'Bitbake Version Mismatch'
|
||||
except (ValueError, KeyError):
|
||||
bb.note("Invalid cache found, rebuilding...")
|
||||
bb.msg.note(1, bb.msg.domain.Cache, "Invalid cache found, rebuilding...")
|
||||
self.depends_cache = {}
|
||||
|
||||
if self.depends_cache:
|
||||
|
@ -108,7 +106,7 @@ class Cache:
|
|||
if fn != self.data_fn:
|
||||
# We're trying to access data in the cache which doesn't exist
|
||||
# yet setData hasn't been called to setup the right access. Very bad.
|
||||
bb.error("Parsing error data_fn %s and fn %s don't match" % (self.data_fn, fn))
|
||||
bb.msg.error(bb.msg.domain.Cache, "Parsing error data_fn %s and fn %s don't match" % (self.data_fn, fn))
|
||||
|
||||
result = bb.data.getVar(var, self.data, exp)
|
||||
self.depends_cache[fn][var] = result
|
||||
|
@ -127,15 +125,15 @@ class Cache:
|
|||
self.getVar("__depends", fn, True)
|
||||
self.depends_cache[fn]["CACHETIMESTAMP"] = bb.parse.cached_mtime(fn)
|
||||
|
||||
def loadDataFull(self, fn, cooker):
|
||||
def loadDataFull(self, fn, cfgData):
|
||||
"""
|
||||
Return a complete set of data for fn.
|
||||
To do this, we need to parse the file.
|
||||
"""
|
||||
bb_data, skipped = self.load_bbfile(fn, cooker)
|
||||
bb_data, skipped = self.load_bbfile(fn, cfgData)
|
||||
return bb_data
|
||||
|
||||
def loadData(self, fn, cooker):
|
||||
def loadData(self, fn, cfgData):
|
||||
"""
|
||||
Load a subset of data for fn.
|
||||
If the cached data is valid we do nothing,
|
||||
|
@ -148,7 +146,7 @@ class Cache:
|
|||
return True, True
|
||||
return True, False
|
||||
|
||||
bb_data, skipped = self.load_bbfile(fn, cooker)
|
||||
bb_data, skipped = self.load_bbfile(fn, cfgData)
|
||||
self.setData(fn, bb_data)
|
||||
return False, skipped
|
||||
|
||||
|
@ -175,32 +173,36 @@ class Cache:
|
|||
|
||||
# Check file still exists
|
||||
if self.mtime(fn) == 0:
|
||||
bb.debug(2, "Cache: %s not longer exists" % fn)
|
||||
bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s not longer exists" % fn)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
# File isn't in depends_cache
|
||||
if not fn in self.depends_cache:
|
||||
bb.debug(2, "Cache: %s is not cached" % fn)
|
||||
bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s is not cached" % fn)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
# Check the file's timestamp
|
||||
if bb.parse.cached_mtime(fn) > self.getVar("CACHETIMESTAMP", fn, True):
|
||||
bb.debug(2, "Cache: %s changed" % fn)
|
||||
bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s changed" % fn)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
# Check dependencies are still valid
|
||||
depends = self.getVar("__depends", fn, True)
|
||||
for f,old_mtime in depends:
|
||||
# Check if file still exists
|
||||
if self.mtime(f) == 0:
|
||||
return False
|
||||
|
||||
new_mtime = bb.parse.cached_mtime(f)
|
||||
if (new_mtime > old_mtime):
|
||||
bb.debug(2, "Cache: %s's dependency %s changed" % (fn, f))
|
||||
bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s's dependency %s changed" % (fn, f))
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
bb.debug(2, "Depends Cache: %s is clean" % fn)
|
||||
bb.msg.debug(2, bb.msg.domain.Cache, "Depends Cache: %s is clean" % fn)
|
||||
if not fn in self.clean:
|
||||
self.clean[fn] = ""
|
||||
|
||||
|
@ -220,7 +222,7 @@ class Cache:
|
|||
Remove a fn from the cache
|
||||
Called from the parser in error cases
|
||||
"""
|
||||
bb.debug(1, "Removing %s from cache" % fn)
|
||||
bb.msg.debug(1, bb.msg.domain.Cache, "Removing %s from cache" % fn)
|
||||
if fn in self.depends_cache:
|
||||
del self.depends_cache[fn]
|
||||
if fn in self.clean:
|
||||
|
@ -229,7 +231,7 @@ class Cache:
|
|||
def sync(self):
|
||||
"""
|
||||
Save the cache
|
||||
Called from the parser when complete (or exitting)
|
||||
Called from the parser when complete (or exiting)
|
||||
"""
|
||||
|
||||
if not self.has_cache:
|
||||
|
@ -243,12 +245,103 @@ class Cache:
|
|||
p.dump([self.depends_cache, version_data])
|
||||
|
||||
def mtime(self, cachefile):
|
||||
try:
|
||||
return os.stat(cachefile)[8]
|
||||
except OSError:
|
||||
return 0
|
||||
return bb.parse.cached_mtime_noerror(cachefile)
|
||||
|
||||
def load_bbfile( self, bbfile , cooker):
|
||||
def handle_data(self, file_name, cacheData):
|
||||
"""
|
||||
Save data we need into the cache
|
||||
"""
|
||||
|
||||
pn = self.getVar('PN', file_name, True)
|
||||
pv = self.getVar('PV', file_name, True)
|
||||
pr = self.getVar('PR', file_name, True)
|
||||
dp = int(self.getVar('DEFAULT_PREFERENCE', file_name, True) or "0")
|
||||
provides = Set([pn] + (self.getVar("PROVIDES", file_name, True) or "").split())
|
||||
depends = bb.utils.explode_deps(self.getVar("DEPENDS", file_name, True) or "")
|
||||
packages = (self.getVar('PACKAGES', file_name, True) or "").split()
|
||||
packages_dynamic = (self.getVar('PACKAGES_DYNAMIC', file_name, True) or "").split()
|
||||
rprovides = (self.getVar("RPROVIDES", file_name, True) or "").split()
|
||||
|
||||
cacheData.task_queues[file_name] = self.getVar("_task_graph", file_name, True)
|
||||
cacheData.task_deps[file_name] = self.getVar("_task_deps", file_name, True)
|
||||
|
||||
# build PackageName to FileName lookup table
|
||||
if pn not in cacheData.pkg_pn:
|
||||
cacheData.pkg_pn[pn] = []
|
||||
cacheData.pkg_pn[pn].append(file_name)
|
||||
|
||||
cacheData.stamp[file_name] = self.getVar('STAMP', file_name, True)
|
||||
|
||||
# build FileName to PackageName lookup table
|
||||
cacheData.pkg_fn[file_name] = pn
|
||||
cacheData.pkg_pvpr[file_name] = (pv,pr)
|
||||
cacheData.pkg_dp[file_name] = dp
|
||||
|
||||
# Build forward and reverse provider hashes
|
||||
# Forward: virtual -> [filenames]
|
||||
# Reverse: PN -> [virtuals]
|
||||
if pn not in cacheData.pn_provides:
|
||||
cacheData.pn_provides[pn] = Set()
|
||||
cacheData.pn_provides[pn] |= provides
|
||||
|
||||
for provide in provides:
|
||||
if provide not in cacheData.providers:
|
||||
cacheData.providers[provide] = []
|
||||
cacheData.providers[provide].append(file_name)
|
||||
|
||||
cacheData.deps[file_name] = Set()
|
||||
for dep in depends:
|
||||
cacheData.all_depends.add(dep)
|
||||
cacheData.deps[file_name].add(dep)
|
||||
|
||||
# Build reverse hash for PACKAGES, so runtime dependencies
|
||||
# can be be resolved (RDEPENDS, RRECOMMENDS etc.)
|
||||
for package in packages:
|
||||
if not package in cacheData.packages:
|
||||
cacheData.packages[package] = []
|
||||
cacheData.packages[package].append(file_name)
|
||||
rprovides += (self.getVar("RPROVIDES_%s" % package, file_name, 1) or "").split()
|
||||
|
||||
for package in packages_dynamic:
|
||||
if not package in cacheData.packages_dynamic:
|
||||
cacheData.packages_dynamic[package] = []
|
||||
cacheData.packages_dynamic[package].append(file_name)
|
||||
|
||||
for rprovide in rprovides:
|
||||
if not rprovide in cacheData.rproviders:
|
||||
cacheData.rproviders[rprovide] = []
|
||||
cacheData.rproviders[rprovide].append(file_name)
|
||||
|
||||
# Build hash of runtime depends and rececommends
|
||||
|
||||
def add_dep(deplist, deps):
|
||||
for dep in deps:
|
||||
if not dep in deplist:
|
||||
deplist[dep] = ""
|
||||
|
||||
if not file_name in cacheData.rundeps:
|
||||
cacheData.rundeps[file_name] = {}
|
||||
if not file_name in cacheData.runrecs:
|
||||
cacheData.runrecs[file_name] = {}
|
||||
|
||||
for package in packages + [pn]:
|
||||
if not package in cacheData.rundeps[file_name]:
|
||||
cacheData.rundeps[file_name][package] = {}
|
||||
if not package in cacheData.runrecs[file_name]:
|
||||
cacheData.runrecs[file_name][package] = {}
|
||||
|
||||
add_dep(cacheData.rundeps[file_name][package], bb.utils.explode_deps(self.getVar('RDEPENDS', file_name, True) or ""))
|
||||
add_dep(cacheData.runrecs[file_name][package], bb.utils.explode_deps(self.getVar('RRECOMMENDS', file_name, True) or ""))
|
||||
add_dep(cacheData.rundeps[file_name][package], bb.utils.explode_deps(self.getVar("RDEPENDS_%s" % package, file_name, True) or ""))
|
||||
add_dep(cacheData.runrecs[file_name][package], bb.utils.explode_deps(self.getVar("RRECOMMENDS_%s" % package, file_name, True) or ""))
|
||||
|
||||
# Collect files we may need for possible world-dep
|
||||
# calculations
|
||||
if not self.getVar('BROKEN', file_name, True) and not self.getVar('EXCLUDE_FROM_WORLD', file_name, True):
|
||||
cacheData.possible_world.append(file_name)
|
||||
|
||||
|
||||
def load_bbfile( self, bbfile , config):
|
||||
"""
|
||||
Load and parse one .bb build file
|
||||
Return the data and whether parsing resulted in the file being skipped
|
||||
|
@ -257,25 +350,15 @@ class Cache:
|
|||
import bb
|
||||
from bb import utils, data, parse, debug, event, fatal
|
||||
|
||||
topdir = data.getVar('TOPDIR', cooker.configuration.data)
|
||||
if not topdir:
|
||||
topdir = os.path.abspath(os.getcwd())
|
||||
# set topdir to here
|
||||
data.setVar('TOPDIR', topdir, cooker.configuration)
|
||||
bbfile = os.path.abspath(bbfile)
|
||||
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
||||
# expand tmpdir to include this topdir
|
||||
data.setVar('TMPDIR', data.getVar('TMPDIR', cooker.configuration.data, 1) or "", cooker.configuration.data)
|
||||
# set topdir to location of .bb file
|
||||
topdir = bbfile_loc
|
||||
#data.setVar('TOPDIR', topdir, cfg)
|
||||
# go there
|
||||
data.setVar('TMPDIR', data.getVar('TMPDIR', config, 1) or "", config)
|
||||
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
||||
oldpath = os.path.abspath(os.getcwd())
|
||||
if self.mtime(topdir):
|
||||
os.chdir(topdir)
|
||||
bb_data = data.init_db(cooker.configuration.data)
|
||||
if self.mtime(bbfile_loc):
|
||||
os.chdir(bbfile_loc)
|
||||
bb_data = data.init_db(config)
|
||||
try:
|
||||
parse.handle(bbfile, bb_data) # read .bb data
|
||||
bb_data = parse.handle(bbfile, bb_data) # read .bb data
|
||||
os.chdir(oldpath)
|
||||
return bb_data, False
|
||||
except bb.parse.SkipPackage:
|
||||
|
@ -304,3 +387,45 @@ def init(cooker):
|
|||
"""
|
||||
return Cache(cooker)
|
||||
|
||||
|
||||
|
||||
#============================================================================#
|
||||
# CacheData
|
||||
#============================================================================#
|
||||
class CacheData:
|
||||
"""
|
||||
The data structures we compile from the cached data
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Direct cache variables
|
||||
(from Cache.handle_data)
|
||||
"""
|
||||
self.providers = {}
|
||||
self.rproviders = {}
|
||||
self.packages = {}
|
||||
self.packages_dynamic = {}
|
||||
self.possible_world = []
|
||||
self.pkg_pn = {}
|
||||
self.pkg_fn = {}
|
||||
self.pkg_pvpr = {}
|
||||
self.pkg_dp = {}
|
||||
self.pn_provides = {}
|
||||
self.all_depends = Set()
|
||||
self.deps = {}
|
||||
self.rundeps = {}
|
||||
self.runrecs = {}
|
||||
self.task_queues = {}
|
||||
self.task_deps = {}
|
||||
self.stamp = {}
|
||||
self.preferred = {}
|
||||
|
||||
"""
|
||||
Indirect Cache variables
|
||||
(set elsewhere)
|
||||
"""
|
||||
self.ignored_dependencies = []
|
||||
self.world_target = Set()
|
||||
self.bbfile_priority = {}
|
||||
self.bbfile_config_priorities = []
|
||||
|
|
|
@ -45,7 +45,8 @@ else:
|
|||
path = os.path.dirname(os.path.dirname(sys.argv[0]))
|
||||
sys.path.insert(0,path)
|
||||
|
||||
from bb import note, debug, data_smart
|
||||
from bb import data_smart
|
||||
import bb
|
||||
|
||||
_dict_type = data_smart.DataSmart
|
||||
|
||||
|
@ -362,10 +363,12 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
|
|||
val.rstrip()
|
||||
if not val:
|
||||
return 0
|
||||
|
||||
varExpanded = expand(var, d)
|
||||
|
||||
if getVarFlag(var, "func", d):
|
||||
# NOTE: should probably check for unbalanced {} within the var
|
||||
o.write("%s() {\n%s\n}\n" % (var, val))
|
||||
o.write("%s() {\n%s\n}\n" % (varExpanded, val))
|
||||
else:
|
||||
if getVarFlag(var, "export", d):
|
||||
o.write('export ')
|
||||
|
@ -375,7 +378,7 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
|
|||
# if we're going to output this within doublequotes,
|
||||
# to a shell, we need to escape the quotes in the var
|
||||
alter = re.sub('"', '\\"', val.strip())
|
||||
o.write('%s="%s"\n' % (var, alter))
|
||||
o.write('%s="%s"\n' % (varExpanded, alter))
|
||||
return 1
|
||||
|
||||
|
||||
|
@ -430,8 +433,38 @@ def update_data(d):
|
|||
>>> update_data(d)
|
||||
>>> print getVar('TEST', d)
|
||||
local
|
||||
|
||||
CopyMonster:
|
||||
>>> e = d.createCopy()
|
||||
>>> setVar('TEST_foo', 'foo', e)
|
||||
>>> update_data(e)
|
||||
>>> print getVar('TEST', e)
|
||||
local
|
||||
|
||||
>>> setVar('OVERRIDES', 'arm:ramses:local:foo', e)
|
||||
>>> update_data(e)
|
||||
>>> print getVar('TEST', e)
|
||||
foo
|
||||
|
||||
>>> f = d.createCopy()
|
||||
>>> setVar('TEST_moo', 'something', f)
|
||||
>>> setVar('OVERRIDES', 'moo:arm:ramses:local:foo', e)
|
||||
>>> update_data(e)
|
||||
>>> print getVar('TEST', e)
|
||||
foo
|
||||
|
||||
|
||||
>>> h = init()
|
||||
>>> setVar('SRC_URI', 'file://append.foo;patch=1 ', h)
|
||||
>>> g = h.createCopy()
|
||||
>>> setVar('SRC_URI_append_arm', 'file://other.foo;patch=1', g)
|
||||
>>> setVar('OVERRIDES', 'arm:moo', g)
|
||||
>>> update_data(g)
|
||||
>>> print getVar('SRC_URI', g)
|
||||
file://append.foo;patch=1 file://other.foo;patch=1
|
||||
|
||||
"""
|
||||
debug(2, "update_data()")
|
||||
bb.msg.debug(2, bb.msg.domain.Data, "update_data()")
|
||||
|
||||
# now ask the cookie monster for help
|
||||
#print "Cookie Monster"
|
||||
|
@ -460,7 +493,7 @@ def update_data(d):
|
|||
l = len(o)+1
|
||||
|
||||
# see if one should even try
|
||||
if not o in d._seen_overrides:
|
||||
if not d._seen_overrides.has_key(o):
|
||||
continue
|
||||
|
||||
vars = d._seen_overrides[o]
|
||||
|
@ -469,10 +502,10 @@ def update_data(d):
|
|||
try:
|
||||
d[name] = d[var]
|
||||
except:
|
||||
note ("Untracked delVar")
|
||||
bb.msg.note(1, bb.msg.domain.Data, "Untracked delVar")
|
||||
|
||||
# now on to the appends and prepends
|
||||
if '_append' in d._special_values:
|
||||
if d._special_values.has_key('_append'):
|
||||
appends = d._special_values['_append'] or []
|
||||
for append in appends:
|
||||
for (a, o) in getVarFlag(append, '_append', d) or []:
|
||||
|
@ -487,7 +520,7 @@ def update_data(d):
|
|||
setVar(append, sval, d)
|
||||
|
||||
|
||||
if '_prepend' in d._special_values:
|
||||
if d._special_values.has_key('_prepend'):
|
||||
prepends = d._special_values['_prepend'] or []
|
||||
|
||||
for prepend in prepends:
|
||||
|
|
|
@ -29,14 +29,12 @@ Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
|||
"""
|
||||
|
||||
import copy, os, re, sys, time, types
|
||||
from bb import note, debug, error, fatal, utils, methodpool
|
||||
import bb
|
||||
from bb import utils, methodpool
|
||||
from COW import COWDictBase
|
||||
from sets import Set
|
||||
from new import classobj
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
print "NOTE: Importing cPickle failed. Falling back to a very slow implementation."
|
||||
|
||||
__setvar_keyword__ = ["_append","_prepend"]
|
||||
__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend)(_(?P<add>.*))?')
|
||||
|
@ -45,12 +43,14 @@ __expand_python_regexp__ = re.compile(r"\${@.+?}")
|
|||
|
||||
|
||||
class DataSmart:
|
||||
def __init__(self):
|
||||
def __init__(self, special = COWDictBase.copy(), seen = COWDictBase.copy() ):
|
||||
self.dict = {}
|
||||
|
||||
# cookie monster tribute
|
||||
self._special_values = {}
|
||||
self._seen_overrides = {}
|
||||
self._special_values = special
|
||||
self._seen_overrides = seen
|
||||
|
||||
self.expand_cache = {}
|
||||
|
||||
def expand(self,s, varname):
|
||||
def var_sub(match):
|
||||
|
@ -75,6 +75,9 @@ class DataSmart:
|
|||
if type(s) is not types.StringType: # sanity check
|
||||
return s
|
||||
|
||||
if varname and varname in self.expand_cache:
|
||||
return self.expand_cache[varname]
|
||||
|
||||
while s.find('$') != -1:
|
||||
olds = s
|
||||
try:
|
||||
|
@ -82,15 +85,20 @@ class DataSmart:
|
|||
s = __expand_python_regexp__.sub(python_sub, s)
|
||||
if s == olds: break
|
||||
if type(s) is not types.StringType: # sanity check
|
||||
error('expansion of %s returned non-string %s' % (olds, s))
|
||||
bb.msg.error(bb.msg.domain.Data, 'expansion of %s returned non-string %s' % (olds, s))
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
note("%s:%s while evaluating:\n%s" % (sys.exc_info()[0], sys.exc_info()[1], s))
|
||||
bb.msg.note(1, bb.msg.domain.Data, "%s:%s while evaluating:\n%s" % (sys.exc_info()[0], sys.exc_info()[1], s))
|
||||
raise
|
||||
|
||||
if varname:
|
||||
self.expand_cache[varname] = s
|
||||
|
||||
return s
|
||||
|
||||
def initVar(self, var):
|
||||
self.expand_cache = {}
|
||||
if not var in self.dict:
|
||||
self.dict[var] = {}
|
||||
|
||||
|
@ -119,6 +127,7 @@ class DataSmart:
|
|||
self.initVar(var)
|
||||
|
||||
def setVar(self,var,value):
|
||||
self.expand_cache = {}
|
||||
match = __setvar_regexp__.match(var)
|
||||
if match and match.group("keyword") in __setvar_keyword__:
|
||||
base = match.group('base')
|
||||
|
@ -128,6 +137,7 @@ class DataSmart:
|
|||
l.append([value, override])
|
||||
self.setVarFlag(base, keyword, l)
|
||||
|
||||
# todo make sure keyword is not __doc__ or __module__
|
||||
# pay the cookie monster
|
||||
try:
|
||||
self._special_values[keyword].add( base )
|
||||
|
@ -135,10 +145,6 @@ class DataSmart:
|
|||
self._special_values[keyword] = Set()
|
||||
self._special_values[keyword].add( base )
|
||||
|
||||
# SRC_URI_append_simpad is both a flag and a override
|
||||
#if not override in self._seen_overrides:
|
||||
# self._seen_overrides[override] = Set()
|
||||
#self._seen_overrides[override].add( base )
|
||||
return
|
||||
|
||||
if not var in self.dict:
|
||||
|
@ -150,7 +156,7 @@ class DataSmart:
|
|||
# more cookies for the cookie monster
|
||||
if '_' in var:
|
||||
override = var[var.rfind('_')+1:]
|
||||
if not override in self._seen_overrides:
|
||||
if not self._seen_overrides.has_key(override):
|
||||
self._seen_overrides[override] = Set()
|
||||
self._seen_overrides[override].add( var )
|
||||
|
||||
|
@ -165,6 +171,7 @@ class DataSmart:
|
|||
return value
|
||||
|
||||
def delVar(self,var):
|
||||
self.expand_cache = {}
|
||||
self.dict[var] = {}
|
||||
|
||||
def setVarFlag(self,var,flag,flagvalue):
|
||||
|
@ -234,10 +241,8 @@ class DataSmart:
|
|||
Create a copy of self by setting _data to self
|
||||
"""
|
||||
# we really want this to be a DataSmart...
|
||||
data = DataSmart()
|
||||
data = DataSmart(seen=self._seen_overrides.copy(), special=self._special_values.copy())
|
||||
data.dict["_data"] = self.dict
|
||||
data._seen_overrides = copy.deepcopy(self._seen_overrides)
|
||||
data._special_values = copy.deepcopy(self._special_values)
|
||||
|
||||
return data
|
||||
|
||||
|
|
|
@ -38,13 +38,16 @@ class NoMethodError(Exception):
|
|||
class MissingParameterError(Exception):
|
||||
"""Exception raised when a fetch method is missing a critical parameter in the url"""
|
||||
|
||||
class ParameterError(Exception):
|
||||
"""Exception raised when a url cannot be proccessed due to invalid parameters."""
|
||||
|
||||
class MD5SumError(Exception):
|
||||
"""Exception raised when a MD5SUM of a file does not match the expected one"""
|
||||
|
||||
def uri_replace(uri, uri_find, uri_replace, d):
|
||||
# bb.note("uri_replace: operating on %s" % uri)
|
||||
# bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: operating on %s" % uri)
|
||||
if not uri or not uri_find or not uri_replace:
|
||||
bb.debug(1, "uri_replace: passed an undefined value, not replacing")
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "uri_replace: passed an undefined value, not replacing")
|
||||
uri_decoded = list(bb.decodeurl(uri))
|
||||
uri_find_decoded = list(bb.decodeurl(uri_find))
|
||||
uri_replace_decoded = list(bb.decodeurl(uri_replace))
|
||||
|
@ -62,9 +65,9 @@ def uri_replace(uri, uri_find, uri_replace, d):
|
|||
localfn = bb.fetch.localpath(uri, d)
|
||||
if localfn:
|
||||
result_decoded[loc] = os.path.dirname(result_decoded[loc]) + "/" + os.path.basename(bb.fetch.localpath(uri, d))
|
||||
# bb.note("uri_replace: matching %s against %s and replacing with %s" % (i, uri_decoded[loc], uri_replace_decoded[loc]))
|
||||
# bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: matching %s against %s and replacing with %s" % (i, uri_decoded[loc], uri_replace_decoded[loc]))
|
||||
else:
|
||||
# bb.note("uri_replace: no match")
|
||||
# bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: no match")
|
||||
return uri
|
||||
# else:
|
||||
# for j in i.keys():
|
||||
|
@ -72,62 +75,94 @@ def uri_replace(uri, uri_find, uri_replace, d):
|
|||
return bb.encodeurl(result_decoded)
|
||||
|
||||
methods = []
|
||||
urldata = {}
|
||||
|
||||
def init(urls = [], d = None):
|
||||
if d == None:
|
||||
bb.debug(2,"BUG init called with None as data object!!!")
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "BUG init called with None as data object!!!")
|
||||
return
|
||||
|
||||
for m in methods:
|
||||
m.urls = []
|
||||
|
||||
for u in urls:
|
||||
ud = initdata(u, d)
|
||||
if ud.method:
|
||||
ud.method.urls.append(u)
|
||||
|
||||
def initdata(url, d):
|
||||
if url not in urldata:
|
||||
ud = FetchData()
|
||||
(ud.type, ud.host, ud.path, ud.user, ud.pswd, ud.parm) = bb.decodeurl(data.expand(url, d))
|
||||
ud.date = Fetch.getSRCDate(d)
|
||||
for m in methods:
|
||||
m.data = d
|
||||
if m.supports(u, d):
|
||||
m.urls.append(u)
|
||||
if m.supports(url, ud, d):
|
||||
ud.localpath = m.localpath(url, ud, d)
|
||||
ud.md5 = ud.localpath + '.md5'
|
||||
# if user sets localpath for file, use it instead.
|
||||
if "localpath" in ud.parm:
|
||||
ud.localpath = ud.parm["localpath"]
|
||||
ud.method = m
|
||||
break
|
||||
urldata[url] = ud
|
||||
return urldata[url]
|
||||
|
||||
def go(d):
|
||||
"""Fetch all urls"""
|
||||
for m in methods:
|
||||
if m.urls:
|
||||
m.go(d)
|
||||
for u in m.urls:
|
||||
ud = urldata[u]
|
||||
if ud.localfile and not m.forcefetch(u, ud, d) and os.path.exists(urldata[u].md5):
|
||||
# File already present along with md5 stamp file
|
||||
# Touch md5 file to show activity
|
||||
os.utime(ud.md5, None)
|
||||
continue
|
||||
# RP - is olddir needed?
|
||||
# olddir = os.path.abspath(os.getcwd())
|
||||
m.go(u, ud , d)
|
||||
# os.chdir(olddir)
|
||||
if ud.localfile and not m.forcefetch(u, ud, d):
|
||||
Fetch.write_md5sum(u, ud, d)
|
||||
|
||||
def localpaths(d):
|
||||
"""Return a list of the local filenames, assuming successful fetch"""
|
||||
local = []
|
||||
for m in methods:
|
||||
for u in m.urls:
|
||||
local.append(m.localpath(u, d))
|
||||
local.append(urldata[u].localpath)
|
||||
return local
|
||||
|
||||
def localpath(url, d):
|
||||
for m in methods:
|
||||
if m.supports(url, d):
|
||||
return m.localpath(url, d)
|
||||
ud = initdata(url, d)
|
||||
if ud.method:
|
||||
return ud.localpath
|
||||
return url
|
||||
|
||||
class FetchData(object):
|
||||
"""Class for fetcher variable store"""
|
||||
def __init__(self):
|
||||
self.localfile = ""
|
||||
|
||||
|
||||
class Fetch(object):
|
||||
"""Base class for 'fetch'ing data"""
|
||||
|
||||
def __init__(self, urls = []):
|
||||
self.urls = []
|
||||
for url in urls:
|
||||
if self.supports(bb.decodeurl(url), d) is 1:
|
||||
self.urls.append(url)
|
||||
|
||||
def supports(url, d):
|
||||
"""Check to see if this fetch class supports a given url.
|
||||
Expects supplied url in list form, as outputted by bb.decodeurl().
|
||||
def supports(self, url, urldata, d):
|
||||
"""
|
||||
Check to see if this fetch class supports a given url.
|
||||
"""
|
||||
return 0
|
||||
supports = staticmethod(supports)
|
||||
|
||||
def localpath(url, d):
|
||||
"""Return the local filename of a given url assuming a successful fetch.
|
||||
def localpath(self, url, urldata, d):
|
||||
"""
|
||||
Return the local filename of a given url assuming a successful fetch.
|
||||
Can also setup variables in urldata for use in go (saving code duplication
|
||||
and duplicate code execution)
|
||||
"""
|
||||
return url
|
||||
localpath = staticmethod(localpath)
|
||||
|
||||
def setUrls(self, urls):
|
||||
self.__urls = urls
|
||||
|
@ -137,16 +172,17 @@ class Fetch(object):
|
|||
|
||||
urls = property(getUrls, setUrls, None, "Urls property")
|
||||
|
||||
def setData(self, data):
|
||||
self.__data = data
|
||||
def forcefetch(self, url, urldata, d):
|
||||
"""
|
||||
Force a fetch, even if localpath exists?
|
||||
"""
|
||||
return False
|
||||
|
||||
def getData(self):
|
||||
return self.__data
|
||||
|
||||
data = property(getData, setData, None, "Data property")
|
||||
|
||||
def go(self, urls = []):
|
||||
"""Fetch urls"""
|
||||
def go(self, url, urldata, d):
|
||||
"""
|
||||
Fetch urls
|
||||
Assumes localpath was called first
|
||||
"""
|
||||
raise NoMethodError("Missing implementation for url")
|
||||
|
||||
def getSRCDate(d):
|
||||
|
@ -155,7 +191,12 @@ class Fetch(object):
|
|||
|
||||
d the bb.data module
|
||||
"""
|
||||
return data.getVar("SRCDATE", d, 1) or data.getVar("CVSDATE", d, 1) or data.getVar("DATE", d, 1 )
|
||||
pn = data.getVar("PN", d, 1)
|
||||
|
||||
if pn:
|
||||
return data.getVar("SRCDATE_%s" % pn, d, 1) or data.getVar("CVSDATE_%s" % pn, d, 1) or data.getVar("DATE", d, 1)
|
||||
|
||||
return data.getVar("SRCDATE", d, 1) or data.getVar("CVSDATE", d, 1) or data.getVar("DATE", d, 1)
|
||||
getSRCDate = staticmethod(getSRCDate)
|
||||
|
||||
def try_mirror(d, tarfn):
|
||||
|
@ -168,6 +209,11 @@ class Fetch(object):
|
|||
d Is a bb.data instance
|
||||
tarfn is the name of the tarball
|
||||
"""
|
||||
tarpath = os.path.join(data.getVar("DL_DIR", d, 1), tarfn)
|
||||
if os.access(tarpath, os.R_OK):
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists, skipping checkout." % tarfn)
|
||||
return True
|
||||
|
||||
pn = data.getVar('PN', d, True)
|
||||
src_tarball_stash = None
|
||||
if pn:
|
||||
|
@ -176,36 +222,45 @@ class Fetch(object):
|
|||
for stash in src_tarball_stash:
|
||||
fetchcmd = data.getVar("FETCHCOMMAND_mirror", d, True) or data.getVar("FETCHCOMMAND_wget", d, True)
|
||||
uri = stash + tarfn
|
||||
bb.note("fetch " + uri)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "fetch " + uri)
|
||||
fetchcmd = fetchcmd.replace("${URI}", uri)
|
||||
ret = os.system(fetchcmd)
|
||||
if ret == 0:
|
||||
bb.note("Fetched %s from tarball stash, skipping checkout" % tarfn)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Fetched %s from tarball stash, skipping checkout" % tarfn)
|
||||
return True
|
||||
return False
|
||||
try_mirror = staticmethod(try_mirror)
|
||||
|
||||
def check_for_tarball(d, tarfn, dldir, date):
|
||||
def verify_md5sum(ud, got_sum):
|
||||
"""
|
||||
Check for a local copy then check the tarball stash.
|
||||
Both checks are skipped if date == 'now'.
|
||||
|
||||
d Is a bb.data instance
|
||||
tarfn is the name of the tarball
|
||||
date is the SRCDATE
|
||||
Verify the md5sum we wanted with the one we got
|
||||
"""
|
||||
if "now" != date:
|
||||
dl = os.path.join(dldir, tarfn)
|
||||
if os.access(dl, os.R_OK):
|
||||
bb.debug(1, "%s already exists, skipping checkout." % tarfn)
|
||||
return True
|
||||
wanted_sum = None
|
||||
if 'md5sum' in ud.parm:
|
||||
wanted_sum = ud.parm['md5sum']
|
||||
if not wanted_sum:
|
||||
return True
|
||||
|
||||
# try to use the tarball stash
|
||||
if Fetch.try_mirror(d, tarfn):
|
||||
return True
|
||||
return False
|
||||
check_for_tarball = staticmethod(check_for_tarball)
|
||||
return wanted_sum == got_sum
|
||||
verify_md5sum = staticmethod(verify_md5sum)
|
||||
|
||||
def write_md5sum(url, ud, d):
|
||||
if bb.which(data.getVar('PATH', d), 'md5sum'):
|
||||
try:
|
||||
md5pipe = os.popen('md5sum ' + ud.localpath)
|
||||
md5data = (md5pipe.readline().split() or [ "" ])[0]
|
||||
md5pipe.close()
|
||||
except OSError:
|
||||
md5data = ""
|
||||
|
||||
# verify the md5sum
|
||||
if not Fetch.verify_md5sum(ud, md5data):
|
||||
raise MD5SumError(url)
|
||||
|
||||
md5out = file(ud.md5, 'w')
|
||||
md5out.write(md5data)
|
||||
md5out.close()
|
||||
write_md5sum = staticmethod(write_md5sum)
|
||||
|
||||
import cvs
|
||||
import git
|
||||
|
@ -214,6 +269,7 @@ import svn
|
|||
import wget
|
||||
import svk
|
||||
import ssh
|
||||
import perforce
|
||||
|
||||
methods.append(cvs.Cvs())
|
||||
methods.append(git.Git())
|
||||
|
@ -222,3 +278,4 @@ methods.append(svn.Svn())
|
|||
methods.append(wget.Wget())
|
||||
methods.append(svk.Svk())
|
||||
methods.append(ssh.SSH())
|
||||
methods.append(perforce.Perforce())
|
||||
|
|
|
@ -33,164 +33,119 @@ from bb.fetch import FetchError
|
|||
from bb.fetch import MissingParameterError
|
||||
|
||||
class Cvs(Fetch):
|
||||
"""Class to fetch a module or modules from cvs repositories"""
|
||||
def supports(url, d):
|
||||
"""Check to see if a given url can be fetched with cvs.
|
||||
Expects supplied url in list form, as outputted by bb.decodeurl().
|
||||
"""
|
||||
Class to fetch a module or modules from cvs repositories
|
||||
"""
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(url, d))
|
||||
return type in ['cvs', 'pserver']
|
||||
supports = staticmethod(supports)
|
||||
Check to see if a given url can be fetched with cvs.
|
||||
"""
|
||||
return ud.type in ['cvs', 'pserver']
|
||||
|
||||
def localpath(url, d):
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(url, d))
|
||||
if "localpath" in parm:
|
||||
# if user overrides local path, use it.
|
||||
return parm["localpath"]
|
||||
|
||||
if not "module" in parm:
|
||||
def localpath(self, url, ud, d):
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError("cvs method needs a 'module' parameter")
|
||||
else:
|
||||
module = parm["module"]
|
||||
if 'tag' in parm:
|
||||
tag = parm['tag']
|
||||
else:
|
||||
tag = ""
|
||||
if 'date' in parm:
|
||||
date = parm['date']
|
||||
else:
|
||||
if not tag:
|
||||
date = Fetch.getSRCDate(d)
|
||||
else:
|
||||
date = ""
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
return os.path.join(data.getVar("DL_DIR", d, 1),data.expand('%s_%s_%s_%s.tar.gz' % ( module.replace('/', '.'), host, tag, date), d))
|
||||
localpath = staticmethod(localpath)
|
||||
ud.tag = ""
|
||||
if 'tag' in ud.parm:
|
||||
ud.tag = ud.parm['tag']
|
||||
|
||||
def go(self, d, urls = []):
|
||||
"""Fetch urls"""
|
||||
if not urls:
|
||||
urls = self.urls
|
||||
# Override the default date in certain cases
|
||||
if 'date' in ud.parm:
|
||||
ud.date = ud.parm['date']
|
||||
elif ud.tag:
|
||||
ud.date = ""
|
||||
|
||||
ud.localfile = data.expand('%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date), d)
|
||||
|
||||
return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
|
||||
|
||||
def forcefetch(self, url, ud, d):
|
||||
if (ud.date == "now"):
|
||||
return True
|
||||
return False
|
||||
|
||||
def go(self, loc, ud, d):
|
||||
|
||||
# try to use the tarball stash
|
||||
if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile):
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping cvs checkout." % ud.localpath)
|
||||
return
|
||||
|
||||
method = "pserver"
|
||||
if "method" in ud.parm:
|
||||
method = ud.parm["method"]
|
||||
|
||||
localdir = ud.module
|
||||
if "localdir" in ud.parm:
|
||||
localdir = ud.parm["localdir"]
|
||||
|
||||
cvs_rsh = None
|
||||
if method == "ext":
|
||||
if "rsh" in ud.parm:
|
||||
cvs_rsh = ud.parm["rsh"]
|
||||
|
||||
if method == "dir":
|
||||
cvsroot = ud.path
|
||||
else:
|
||||
cvsroot = ":" + method + ":" + ud.user
|
||||
if ud.pswd:
|
||||
cvsroot += ":" + ud.pswd
|
||||
cvsroot += "@" + ud.host + ":" + ud.path
|
||||
|
||||
options = []
|
||||
if ud.date:
|
||||
options.append("-D %s" % ud.date)
|
||||
if ud.tag:
|
||||
options.append("-r %s" % ud.tag)
|
||||
|
||||
localdata = data.createCopy(d)
|
||||
data.setVar('OVERRIDES', "cvs:%s" % data.getVar('OVERRIDES', localdata), localdata)
|
||||
data.update_data(localdata)
|
||||
|
||||
for loc in urls:
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(loc, localdata))
|
||||
if not "module" in parm:
|
||||
raise MissingParameterError("cvs method needs a 'module' parameter")
|
||||
else:
|
||||
module = parm["module"]
|
||||
data.setVar('CVSROOT', cvsroot, localdata)
|
||||
data.setVar('CVSCOOPTS', " ".join(options), localdata)
|
||||
data.setVar('CVSMODULE', ud.module, localdata)
|
||||
cvscmd = data.getVar('FETCHCOMMAND', localdata, 1)
|
||||
cvsupdatecmd = data.getVar('UPDATECOMMAND', localdata, 1)
|
||||
|
||||
dlfile = self.localpath(loc, localdata)
|
||||
dldir = data.getVar('DL_DIR', localdata, 1)
|
||||
# if local path contains the cvs
|
||||
# module, consider the dir above it to be the
|
||||
# download directory
|
||||
# pos = dlfile.find(module)
|
||||
# if pos:
|
||||
# dldir = dlfile[:pos]
|
||||
# else:
|
||||
# dldir = os.path.dirname(dlfile)
|
||||
|
||||
# setup cvs options
|
||||
options = []
|
||||
if 'tag' in parm:
|
||||
tag = parm['tag']
|
||||
else:
|
||||
tag = ""
|
||||
|
||||
if 'date' in parm:
|
||||
date = parm['date']
|
||||
else:
|
||||
if not tag:
|
||||
date = Fetch.getSRCDate(d)
|
||||
else:
|
||||
date = ""
|
||||
|
||||
if "method" in parm:
|
||||
method = parm["method"]
|
||||
else:
|
||||
method = "pserver"
|
||||
|
||||
if "localdir" in parm:
|
||||
localdir = parm["localdir"]
|
||||
else:
|
||||
localdir = module
|
||||
|
||||
cvs_rsh = None
|
||||
if method == "ext":
|
||||
if "rsh" in parm:
|
||||
cvs_rsh = parm["rsh"]
|
||||
|
||||
tarfn = data.expand('%s_%s_%s_%s.tar.gz' % (module.replace('/', '.'), host, tag, date), localdata)
|
||||
data.setVar('TARFILES', dlfile, localdata)
|
||||
data.setVar('TARFN', tarfn, localdata)
|
||||
|
||||
if Fetch.check_for_tarball(d, tarfn, dldir, date):
|
||||
continue
|
||||
|
||||
if date:
|
||||
options.append("-D %s" % date)
|
||||
if tag:
|
||||
options.append("-r %s" % tag)
|
||||
|
||||
olddir = os.path.abspath(os.getcwd())
|
||||
os.chdir(data.expand(dldir, localdata))
|
||||
|
||||
# setup cvsroot
|
||||
if method == "dir":
|
||||
cvsroot = path
|
||||
else:
|
||||
cvsroot = ":" + method + ":" + user
|
||||
if pswd:
|
||||
cvsroot += ":" + pswd
|
||||
cvsroot += "@" + host + ":" + path
|
||||
|
||||
data.setVar('CVSROOT', cvsroot, localdata)
|
||||
data.setVar('CVSCOOPTS', " ".join(options), localdata)
|
||||
data.setVar('CVSMODULE', module, localdata)
|
||||
cvscmd = data.getVar('FETCHCOMMAND', localdata, 1)
|
||||
cvsupdatecmd = data.getVar('UPDATECOMMAND', localdata, 1)
|
||||
|
||||
if cvs_rsh:
|
||||
cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd)
|
||||
cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd)
|
||||
|
||||
# create module directory
|
||||
bb.debug(2, "Fetch: checking for module directory")
|
||||
pkg=data.expand('${PN}', d)
|
||||
pkgdir=os.path.join(data.expand('${CVSDIR}', localdata), pkg)
|
||||
moddir=os.path.join(pkgdir,localdir)
|
||||
if os.access(os.path.join(moddir,'CVS'), os.R_OK):
|
||||
bb.note("Update " + loc)
|
||||
# update sources there
|
||||
os.chdir(moddir)
|
||||
myret = os.system(cvsupdatecmd)
|
||||
else:
|
||||
bb.note("Fetch " + loc)
|
||||
# check out sources there
|
||||
bb.mkdirhier(pkgdir)
|
||||
os.chdir(pkgdir)
|
||||
bb.debug(1, "Running %s" % cvscmd)
|
||||
myret = os.system(cvscmd)
|
||||
|
||||
if myret != 0 or not os.access(moddir, os.R_OK):
|
||||
try:
|
||||
os.rmdir(moddir)
|
||||
except OSError:
|
||||
pass
|
||||
raise FetchError(module)
|
||||
if cvs_rsh:
|
||||
cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd)
|
||||
cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd)
|
||||
|
||||
# create module directory
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory")
|
||||
pkg = data.expand('${PN}', d)
|
||||
pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg)
|
||||
moddir = os.path.join(pkgdir,localdir)
|
||||
if os.access(os.path.join(moddir,'CVS'), os.R_OK):
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc)
|
||||
# update sources there
|
||||
os.chdir(moddir)
|
||||
os.chdir('..')
|
||||
# tar them up to a defined filename
|
||||
myret = os.system("tar -czf %s %s" % (os.path.join(dldir,tarfn), os.path.basename(moddir)))
|
||||
if myret != 0:
|
||||
try:
|
||||
os.unlink(tarfn)
|
||||
except OSError:
|
||||
pass
|
||||
os.chdir(olddir)
|
||||
del localdata
|
||||
myret = os.system(cvsupdatecmd)
|
||||
else:
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
|
||||
# check out sources there
|
||||
bb.mkdirhier(pkgdir)
|
||||
os.chdir(pkgdir)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % cvscmd)
|
||||
myret = os.system(cvscmd)
|
||||
|
||||
if myret != 0 or not os.access(moddir, os.R_OK):
|
||||
try:
|
||||
os.rmdir(moddir)
|
||||
except OSError:
|
||||
pass
|
||||
raise FetchError(ud.module)
|
||||
|
||||
os.chdir(moddir)
|
||||
os.chdir('..')
|
||||
# tar them up to a defined filename
|
||||
myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(moddir)))
|
||||
if myret != 0:
|
||||
try:
|
||||
os.unlink(ud.localpath)
|
||||
except OSError:
|
||||
pass
|
||||
raise FetchError(ud.module)
|
||||
|
|
|
@ -37,7 +37,7 @@ def prunedir(topdir):
|
|||
|
||||
def rungitcmd(cmd,d):
|
||||
|
||||
bb.debug(1, "Running %s" % cmd)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % cmd)
|
||||
|
||||
# Need to export PATH as git is likely to be in metadata paths
|
||||
# rather than host provided
|
||||
|
@ -48,108 +48,80 @@ def rungitcmd(cmd,d):
|
|||
if myret != 0:
|
||||
raise FetchError("Git: %s failed" % pathcmd)
|
||||
|
||||
def gettag(parm):
|
||||
if 'tag' in parm:
|
||||
tag = parm['tag']
|
||||
else:
|
||||
tag = ""
|
||||
if not tag:
|
||||
tag = "master"
|
||||
|
||||
return tag
|
||||
|
||||
def getprotocol(parm):
|
||||
if 'protocol' in parm:
|
||||
proto = parm['protocol']
|
||||
else:
|
||||
proto = ""
|
||||
if not proto:
|
||||
proto = "rsync"
|
||||
|
||||
return proto
|
||||
|
||||
def localfile(url, d):
|
||||
"""Return the filename to cache the checkout in"""
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(url, d))
|
||||
|
||||
#if user sets localpath for file, use it instead.
|
||||
if "localpath" in parm:
|
||||
return parm["localpath"]
|
||||
|
||||
tag = gettag(parm)
|
||||
|
||||
return data.expand('git_%s%s_%s.tar.gz' % (host, path.replace('/', '.'), tag), d)
|
||||
|
||||
class Git(Fetch):
|
||||
"""Class to fetch a module or modules from git repositories"""
|
||||
def supports(url, d):
|
||||
"""Check to see if a given url can be fetched with cvs.
|
||||
Expects supplied url in list form, as outputted by bb.decodeurl().
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(url, d))
|
||||
return type in ['git']
|
||||
supports = staticmethod(supports)
|
||||
Check to see if a given url can be fetched with cvs.
|
||||
"""
|
||||
return ud.type in ['git']
|
||||
|
||||
def localpath(url, d):
|
||||
def localpath(self, url, ud, d):
|
||||
|
||||
return os.path.join(data.getVar("DL_DIR", d, 1), localfile(url, d))
|
||||
ud.proto = "rsync"
|
||||
if 'protocol' in ud.parm:
|
||||
ud.proto = ud.parm['protocol']
|
||||
|
||||
localpath = staticmethod(localpath)
|
||||
ud.tag = "master"
|
||||
if 'tag' in ud.parm:
|
||||
ud.tag = ud.parm['tag']
|
||||
|
||||
def go(self, d, urls = []):
|
||||
"""Fetch urls"""
|
||||
if not urls:
|
||||
urls = self.urls
|
||||
ud.localfile = data.expand('git_%s%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.tag), d)
|
||||
|
||||
for loc in urls:
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(loc, d))
|
||||
return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
|
||||
|
||||
tag = gettag(parm)
|
||||
proto = getprotocol(parm)
|
||||
def forcefetch(self, url, ud, d):
|
||||
# tag=="master" must always update
|
||||
if (ud.tag == "master"):
|
||||
return True
|
||||
return False
|
||||
|
||||
gitsrcname = '%s%s' % (host, path.replace('/', '.'))
|
||||
def go(self, loc, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
repofilename = 'git_%s.tar.gz' % (gitsrcname)
|
||||
repofile = os.path.join(data.getVar("DL_DIR", d, 1), repofilename)
|
||||
repodir = os.path.join(data.expand('${GITDIR}', d), gitsrcname)
|
||||
if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile):
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists (or was stashed). Skipping git checkout." % ud.localpath)
|
||||
return
|
||||
|
||||
coname = '%s' % (tag)
|
||||
codir = os.path.join(repodir, coname)
|
||||
gitsrcname = '%s%s' % (ud.host, ud.path.replace('/', '.'))
|
||||
|
||||
cofile = self.localpath(loc, d)
|
||||
repofilename = 'git_%s.tar.gz' % (gitsrcname)
|
||||
repofile = os.path.join(data.getVar("DL_DIR", d, 1), repofilename)
|
||||
repodir = os.path.join(data.expand('${GITDIR}', d), gitsrcname)
|
||||
|
||||
# tag=="master" must always update
|
||||
if (tag != "master") and Fetch.try_mirror(d, localfile(loc, d)):
|
||||
bb.debug(1, "%s already exists (or was stashed). Skipping git checkout." % cofile)
|
||||
continue
|
||||
coname = '%s' % (ud.tag)
|
||||
codir = os.path.join(repodir, coname)
|
||||
|
||||
if not os.path.exists(repodir):
|
||||
if Fetch.try_mirror(d, repofilename):
|
||||
bb.mkdirhier(repodir)
|
||||
os.chdir(repodir)
|
||||
rungitcmd("tar -xzf %s" % (repofile),d)
|
||||
else:
|
||||
rungitcmd("git clone -n %s://%s%s %s" % (proto, host, path, repodir),d)
|
||||
if not os.path.exists(repodir):
|
||||
if Fetch.try_mirror(d, repofilename):
|
||||
bb.mkdirhier(repodir)
|
||||
os.chdir(repodir)
|
||||
rungitcmd("tar -xzf %s" % (repofile),d)
|
||||
else:
|
||||
rungitcmd("git clone -n %s://%s%s %s" % (ud.proto, ud.host, ud.path, repodir),d)
|
||||
|
||||
os.chdir(repodir)
|
||||
rungitcmd("git pull %s://%s%s" % (proto, host, path),d)
|
||||
rungitcmd("git pull --tags %s://%s%s" % (proto, host, path),d)
|
||||
rungitcmd("git prune-packed", d)
|
||||
# old method of downloading tags
|
||||
#rungitcmd("rsync -a --verbose --stats --progress rsync://%s%s/ %s" % (host, path, os.path.join(repodir, ".git", "")),d)
|
||||
os.chdir(repodir)
|
||||
rungitcmd("git pull %s://%s%s" % (ud.proto, ud.host, ud.path),d)
|
||||
rungitcmd("git pull --tags %s://%s%s" % (ud.proto, ud.host, ud.path),d)
|
||||
rungitcmd("git prune-packed", d)
|
||||
rungitcmd("git pack-redundant --all | xargs -r rm", d)
|
||||
# Remove all but the .git directory
|
||||
rungitcmd("rm * -Rf", d)
|
||||
# old method of downloading tags
|
||||
#rungitcmd("rsync -a --verbose --stats --progress rsync://%s%s/ %s" % (ud.host, ud.path, os.path.join(repodir, ".git", "")),d)
|
||||
|
||||
os.chdir(repodir)
|
||||
bb.note("Creating tarball of git repository")
|
||||
rungitcmd("tar -czf %s %s" % (repofile, os.path.join(".", ".git", "*") ),d)
|
||||
os.chdir(repodir)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Creating tarball of git repository")
|
||||
rungitcmd("tar -czf %s %s" % (repofile, os.path.join(".", ".git", "*") ),d)
|
||||
|
||||
if os.path.exists(codir):
|
||||
prunedir(codir)
|
||||
if os.path.exists(codir):
|
||||
prunedir(codir)
|
||||
|
||||
bb.mkdirhier(codir)
|
||||
os.chdir(repodir)
|
||||
rungitcmd("git read-tree %s" % (tag),d)
|
||||
rungitcmd("git checkout-index -q -f --prefix=%s -a" % (os.path.join(codir, "git", "")),d)
|
||||
bb.mkdirhier(codir)
|
||||
os.chdir(repodir)
|
||||
rungitcmd("git read-tree %s" % (ud.tag),d)
|
||||
rungitcmd("git checkout-index -q -f --prefix=%s -a" % (os.path.join(codir, "git", "")),d)
|
||||
|
||||
os.chdir(codir)
|
||||
bb.note("Creating tarball of git checkout")
|
||||
rungitcmd("tar -czf %s %s" % (cofile, os.path.join(".", "*") ),d)
|
||||
os.chdir(codir)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Creating tarball of git checkout")
|
||||
rungitcmd("tar -czf %s %s" % (ud.localpath, os.path.join(".", "*") ),d)
|
||||
|
|
|
@ -31,15 +31,13 @@ from bb import data
|
|||
from bb.fetch import Fetch
|
||||
|
||||
class Local(Fetch):
|
||||
def supports(url, d):
|
||||
"""Check to see if a given url can be fetched in the local filesystem.
|
||||
Expects supplied url in list form, as outputted by bb.decodeurl().
|
||||
def supports(self, url, urldata, d):
|
||||
"""
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(url, d))
|
||||
return type in ['file','patch']
|
||||
supports = staticmethod(supports)
|
||||
Check to see if a given url can be fetched with cvs.
|
||||
"""
|
||||
return urldata.type in ['file','patch']
|
||||
|
||||
def localpath(url, d):
|
||||
def localpath(self, url, urldata, d):
|
||||
"""Return the local filename of a given url assuming a successful fetch.
|
||||
"""
|
||||
path = url.split("://")[1]
|
||||
|
@ -52,10 +50,10 @@ class Local(Fetch):
|
|||
filesdir = data.getVar('FILESDIR', d, 1)
|
||||
if filesdir:
|
||||
newpath = os.path.join(filesdir, path)
|
||||
# We don't set localfile as for this fetcher the file is already local!
|
||||
return newpath
|
||||
localpath = staticmethod(localpath)
|
||||
|
||||
def go(self, urls = []):
|
||||
def go(self, url, urldata, d):
|
||||
"""Fetch urls (no-op for Local method)"""
|
||||
# no need to fetch local files, we'll deal with them in place.
|
||||
# no need to fetch local files, we'll deal with them in place.
|
||||
return 1
|
||||
|
|
|
@ -0,0 +1,213 @@
|
|||
#!/usr/bin/env python
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' implementations
|
||||
|
||||
Classes for obtaining upstream sources for the
|
||||
BitBake build tools.
|
||||
|
||||
Copyright (C) 2003, 2004 Chris Larson
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
Foundation; either version 2 of the License, or (at your option) any later
|
||||
version.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
||||
Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
|
||||
Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
"""
|
||||
|
||||
import os, re
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch import Fetch
|
||||
from bb.fetch import FetchError
|
||||
from bb.fetch import MissingParameterError
|
||||
|
||||
class Perforce(Fetch):
|
||||
def supports(self, url, ud, d):
|
||||
return ud.type in ['p4']
|
||||
|
||||
def doparse(url,d):
|
||||
parm=[]
|
||||
path = url.split("://")[1]
|
||||
delim = path.find("@");
|
||||
if delim != -1:
|
||||
(user,pswd,host,port) = path.split('@')[0].split(":")
|
||||
path = path.split('@')[1]
|
||||
else:
|
||||
(host,port) = data.getVar('P4PORT', d).split(':')
|
||||
user = ""
|
||||
pswd = ""
|
||||
|
||||
if path.find(";") != -1:
|
||||
keys=[]
|
||||
values=[]
|
||||
plist = path.split(';')
|
||||
for item in plist:
|
||||
if item.count('='):
|
||||
(key,value) = item.split('=')
|
||||
keys.append(key)
|
||||
values.append(value)
|
||||
|
||||
parm = dict(zip(keys,values))
|
||||
path = "//" + path.split(';')[0]
|
||||
host += ":%s" % (port)
|
||||
parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm)
|
||||
|
||||
return host,path,user,pswd,parm
|
||||
doparse = staticmethod(doparse)
|
||||
|
||||
def getcset(d, depot,host,user,pswd,parm):
|
||||
if "cset" in parm:
|
||||
return parm["cset"];
|
||||
if user:
|
||||
data.setVar('P4USER', user, d)
|
||||
if pswd:
|
||||
data.setVar('P4PASSWD', pswd, d)
|
||||
if host:
|
||||
data.setVar('P4PORT', host, d)
|
||||
|
||||
p4date = data.getVar("P4DATE", d, 1)
|
||||
if "revision" in parm:
|
||||
depot += "#%s" % (parm["revision"])
|
||||
elif "label" in parm:
|
||||
depot += "@%s" % (parm["label"])
|
||||
elif p4date:
|
||||
depot += "@%s" % (p4date)
|
||||
|
||||
p4cmd = data.getVar('FETCHCOMMAND_p4', d, 1)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s changes -m 1 %s" % (p4cmd, depot))
|
||||
p4file = os.popen("%s changes -m 1 %s" % (p4cmd,depot))
|
||||
cset = p4file.readline().strip()
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "READ %s" % (cset))
|
||||
if not cset:
|
||||
return -1
|
||||
|
||||
return cset.split(' ')[1]
|
||||
getcset = staticmethod(getcset)
|
||||
|
||||
def localpath(self, url, ud, d):
|
||||
|
||||
(host,path,user,pswd,parm) = Perforce.doparse(url,d)
|
||||
|
||||
# If a label is specified, we use that as our filename
|
||||
|
||||
if "label" in parm:
|
||||
ud.localfile = "%s.tar.gz" % (parm["label"])
|
||||
return os.path.join(data.getVar("DL_DIR", d, 1), ud.localfile)
|
||||
|
||||
base = path
|
||||
which = path.find('/...')
|
||||
if which != -1:
|
||||
base = path[:which]
|
||||
|
||||
if base[0] == "/":
|
||||
base = base[1:]
|
||||
|
||||
cset = Perforce.getcset(d, path, host, user, pswd, parm)
|
||||
|
||||
ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host,base.replace('/', '.'), cset), d)
|
||||
|
||||
return os.path.join(data.getVar("DL_DIR", d, 1), ud.localfile)
|
||||
|
||||
def go(self, loc, ud, d):
|
||||
"""
|
||||
Fetch urls
|
||||
"""
|
||||
|
||||
# try to use the tarball stash
|
||||
if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile):
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping perforce checkout." % ud.localpath)
|
||||
return
|
||||
|
||||
(host,depot,user,pswd,parm) = Perforce.doparse(loc, d)
|
||||
|
||||
if depot.find('/...') != -1:
|
||||
path = depot[:depot.find('/...')]
|
||||
else:
|
||||
path = depot
|
||||
|
||||
if "module" in parm:
|
||||
module = parm["module"]
|
||||
else:
|
||||
module = os.path.basename(path)
|
||||
|
||||
localdata = data.createCopy(d)
|
||||
data.setVar('OVERRIDES', "p4:%s" % data.getVar('OVERRIDES', localdata), localdata)
|
||||
data.update_data(localdata)
|
||||
|
||||
# Get the p4 command
|
||||
if user:
|
||||
data.setVar('P4USER', user, localdata)
|
||||
|
||||
if pswd:
|
||||
data.setVar('P4PASSWD', pswd, localdata)
|
||||
|
||||
if host:
|
||||
data.setVar('P4PORT', host, localdata)
|
||||
|
||||
p4cmd = data.getVar('FETCHCOMMAND', localdata, 1)
|
||||
|
||||
# create temp directory
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory")
|
||||
bb.mkdirhier(data.expand('${WORKDIR}', localdata))
|
||||
data.setVar('TMPBASE', data.expand('${WORKDIR}/oep4.XXXXXX', localdata), localdata)
|
||||
tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false")
|
||||
tmpfile = tmppipe.readline().strip()
|
||||
if not tmpfile:
|
||||
bb.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.")
|
||||
raise FetchError(module)
|
||||
|
||||
if "label" in parm:
|
||||
depot = "%s@%s" % (depot,parm["label"])
|
||||
else:
|
||||
cset = Perforce.getcset(d, depot, host, user, pswd, parm)
|
||||
depot = "%s@%s" % (depot,cset)
|
||||
|
||||
os.chdir(tmpfile)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "%s files %s" % (p4cmd, depot))
|
||||
p4file = os.popen("%s files %s" % (p4cmd, depot))
|
||||
|
||||
if not p4file:
|
||||
bb.error("Fetch: unable to get the P4 files from %s" % (depot))
|
||||
raise FetchError(module)
|
||||
|
||||
count = 0
|
||||
|
||||
for file in p4file:
|
||||
list = file.split()
|
||||
|
||||
if list[2] == "delete":
|
||||
continue
|
||||
|
||||
dest = list[0][len(path)+1:]
|
||||
where = dest.find("#")
|
||||
|
||||
os.system("%s print -o %s/%s %s" % (p4cmd, module,dest[:where],list[0]))
|
||||
count = count + 1
|
||||
|
||||
if count == 0:
|
||||
bb.error("Fetch: No files gathered from the P4 fetch")
|
||||
raise FetchError(module)
|
||||
|
||||
myret = os.system("tar -czf %s %s" % (ud.localpath, module))
|
||||
if myret != 0:
|
||||
try:
|
||||
os.unlink(ud.localpath)
|
||||
except OSError:
|
||||
pass
|
||||
raise FetchError(module)
|
||||
# cleanup
|
||||
os.system('rm -rf %s' % tmpfile)
|
||||
|
||||
|
|
@ -64,59 +64,55 @@ __pattern__ = re.compile(r'''
|
|||
class SSH(Fetch):
|
||||
'''Class to fetch a module or modules via Secure Shell'''
|
||||
|
||||
def supports(self, url, d):
|
||||
def supports(self, url, urldata, d):
|
||||
return __pattern__.match(url) != None
|
||||
|
||||
def localpath(self, url, d):
|
||||
def localpath(self, url, urldata, d):
|
||||
m = __pattern__.match(url)
|
||||
path = m.group('path')
|
||||
host = m.group('host')
|
||||
lpath = os.path.join(data.getVar('DL_DIR', d, 1), host, os.path.basename(path))
|
||||
lpath = os.path.join(data.getVar('DL_DIR', d, True), host, os.path.basename(path))
|
||||
return lpath
|
||||
|
||||
def go(self, d, urls = []):
|
||||
if not urls:
|
||||
urls = self.urls
|
||||
def go(self, url, urldata, d):
|
||||
dldir = data.getVar('DL_DIR', d, 1)
|
||||
|
||||
for url in urls:
|
||||
dldir = data.getVar('DL_DIR', d, 1)
|
||||
m = __pattern__.match(url)
|
||||
path = m.group('path')
|
||||
host = m.group('host')
|
||||
port = m.group('port')
|
||||
user = m.group('user')
|
||||
password = m.group('pass')
|
||||
|
||||
m = __pattern__.match(url)
|
||||
path = m.group('path')
|
||||
host = m.group('host')
|
||||
port = m.group('port')
|
||||
user = m.group('user')
|
||||
password = m.group('pass')
|
||||
ldir = os.path.join(dldir, host)
|
||||
lpath = os.path.join(ldir, os.path.basename(path))
|
||||
|
||||
ldir = os.path.join(dldir, host)
|
||||
lpath = os.path.join(ldir, os.path.basename(path))
|
||||
if not os.path.exists(ldir):
|
||||
os.makedirs(ldir)
|
||||
|
||||
if not os.path.exists(ldir):
|
||||
os.makedirs(ldir)
|
||||
if port:
|
||||
port = '-P %s' % port
|
||||
else:
|
||||
port = ''
|
||||
|
||||
if port:
|
||||
port = '-P %s' % port
|
||||
else:
|
||||
port = ''
|
||||
|
||||
if user:
|
||||
fr = user
|
||||
if password:
|
||||
fr += ':%s' % password
|
||||
fr += '@%s' % host
|
||||
else:
|
||||
fr = host
|
||||
fr += ':%s' % path
|
||||
if user:
|
||||
fr = user
|
||||
if password:
|
||||
fr += ':%s' % password
|
||||
fr += '@%s' % host
|
||||
else:
|
||||
fr = host
|
||||
fr += ':%s' % path
|
||||
|
||||
|
||||
import commands
|
||||
cmd = 'scp -B -r %s %s %s/' % (
|
||||
port,
|
||||
commands.mkarg(fr),
|
||||
commands.mkarg(ldir)
|
||||
)
|
||||
import commands
|
||||
cmd = 'scp -B -r %s %s %s/' % (
|
||||
port,
|
||||
commands.mkarg(fr),
|
||||
commands.mkarg(ldir)
|
||||
)
|
||||
|
||||
(exitstatus, output) = commands.getstatusoutput(cmd)
|
||||
if exitstatus != 0:
|
||||
print output
|
||||
raise FetchError('Unable to fetch %s' % url)
|
||||
(exitstatus, output) = commands.getstatusoutput(cmd)
|
||||
if exitstatus != 0:
|
||||
print output
|
||||
raise FetchError('Unable to fetch %s' % url)
|
||||
|
|
|
@ -42,112 +42,76 @@ from bb.fetch import MissingParameterError
|
|||
|
||||
class Svk(Fetch):
|
||||
"""Class to fetch a module or modules from svk repositories"""
|
||||
def supports(url, d):
|
||||
"""Check to see if a given url can be fetched with svk.
|
||||
Expects supplied url in list form, as outputted by bb.decodeurl().
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(url, d))
|
||||
return type in ['svk']
|
||||
supports = staticmethod(supports)
|
||||
Check to see if a given url can be fetched with cvs.
|
||||
"""
|
||||
return ud.type in ['svk']
|
||||
|
||||
def localpath(url, d):
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(url, d))
|
||||
if "localpath" in parm:
|
||||
# if user overrides local path, use it.
|
||||
return parm["localpath"]
|
||||
|
||||
if not "module" in parm:
|
||||
def localpath(self, url, ud, d):
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError("svk method needs a 'module' parameter")
|
||||
else:
|
||||
module = parm["module"]
|
||||
if 'rev' in parm:
|
||||
revision = parm['rev']
|
||||
else:
|
||||
revision = ""
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
date = Fetch.getSRCDate(d)
|
||||
ud.revision = ""
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
|
||||
return os.path.join(data.getVar("DL_DIR", d, 1),data.expand('%s_%s_%s_%s_%s.tar.gz' % ( module.replace('/', '.'), host, path.replace('/', '.'), revision, date), d))
|
||||
localpath = staticmethod(localpath)
|
||||
ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d)
|
||||
|
||||
def go(self, d, urls = []):
|
||||
return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
|
||||
|
||||
def forcefetch(self, url, ud, d):
|
||||
if (ud.date == "now"):
|
||||
return True
|
||||
return False
|
||||
|
||||
def go(self, loc, ud, d):
|
||||
"""Fetch urls"""
|
||||
if not urls:
|
||||
urls = self.urls
|
||||
|
||||
if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile):
|
||||
return
|
||||
|
||||
svkroot = ud.host + ud.path
|
||||
|
||||
svkcmd = "svk co -r {%s} %s/%s" % (date, svkroot, ud.module)
|
||||
|
||||
if ud.revision:
|
||||
svkcmd = "svk co -r %s/%s" % (ud.revision, svkroot, ud.module)
|
||||
|
||||
# create temp directory
|
||||
localdata = data.createCopy(d)
|
||||
data.setVar('OVERRIDES', "svk:%s" % data.getVar('OVERRIDES', localdata), localdata)
|
||||
data.update_data(localdata)
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory")
|
||||
bb.mkdirhier(data.expand('${WORKDIR}', localdata))
|
||||
data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata)
|
||||
tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false")
|
||||
tmpfile = tmppipe.readline().strip()
|
||||
if not tmpfile:
|
||||
bb.msg.error(bb.msg.domain.Fetcher, "Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.")
|
||||
raise FetchError(ud.module)
|
||||
|
||||
for loc in urls:
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(loc, localdata))
|
||||
if not "module" in parm:
|
||||
raise MissingParameterError("svk method needs a 'module' parameter")
|
||||
else:
|
||||
module = parm["module"]
|
||||
# check out sources there
|
||||
os.chdir(tmpfile)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svkcmd)
|
||||
myret = os.system(svkcmd)
|
||||
if myret != 0:
|
||||
try:
|
||||
os.rmdir(tmpfile)
|
||||
except OSError:
|
||||
pass
|
||||
raise FetchError(ud.module)
|
||||
|
||||
dlfile = self.localpath(loc, localdata)
|
||||
dldir = data.getVar('DL_DIR', localdata, 1)
|
||||
|
||||
# setup svk options
|
||||
options = []
|
||||
if 'rev' in parm:
|
||||
revision = parm['rev']
|
||||
else:
|
||||
revision = ""
|
||||
|
||||
date = Fetch.getSRCDate(d)
|
||||
tarfn = data.expand('%s_%s_%s_%s_%s.tar.gz' % (module.replace('/', '.'), host, path.replace('/', '.'), revision, date), localdata)
|
||||
data.setVar('TARFILES', dlfile, localdata)
|
||||
data.setVar('TARFN', tarfn, localdata)
|
||||
|
||||
if Fetch.check_for_tarball(d, tarfn, dldir, date):
|
||||
continue
|
||||
|
||||
olddir = os.path.abspath(os.getcwd())
|
||||
os.chdir(data.expand(dldir, localdata))
|
||||
|
||||
svkroot = host + path
|
||||
|
||||
data.setVar('SVKROOT', svkroot, localdata)
|
||||
data.setVar('SVKCOOPTS', " ".join(options), localdata)
|
||||
data.setVar('SVKMODULE', module, localdata)
|
||||
svkcmd = "svk co -r {%s} %s/%s" % (date, svkroot, module)
|
||||
|
||||
if revision:
|
||||
svkcmd = "svk co -r %s/%s" % (revision, svkroot, module)
|
||||
|
||||
# create temp directory
|
||||
bb.debug(2, "Fetch: creating temporary directory")
|
||||
bb.mkdirhier(data.expand('${WORKDIR}', localdata))
|
||||
data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata)
|
||||
tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false")
|
||||
tmpfile = tmppipe.readline().strip()
|
||||
if not tmpfile:
|
||||
bb.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.")
|
||||
raise FetchError(module)
|
||||
|
||||
# check out sources there
|
||||
os.chdir(tmpfile)
|
||||
bb.note("Fetch " + loc)
|
||||
bb.debug(1, "Running %s" % svkcmd)
|
||||
myret = os.system(svkcmd)
|
||||
if myret != 0:
|
||||
try:
|
||||
os.rmdir(tmpfile)
|
||||
except OSError:
|
||||
pass
|
||||
raise FetchError(module)
|
||||
|
||||
os.chdir(os.path.join(tmpfile, os.path.dirname(module)))
|
||||
# tar them up to a defined filename
|
||||
myret = os.system("tar -czf %s %s" % (os.path.join(dldir,tarfn), os.path.basename(module)))
|
||||
if myret != 0:
|
||||
try:
|
||||
os.unlink(tarfn)
|
||||
except OSError:
|
||||
pass
|
||||
# cleanup
|
||||
os.system('rm -rf %s' % tmpfile)
|
||||
os.chdir(olddir)
|
||||
del localdata
|
||||
os.chdir(os.path.join(tmpfile, os.path.dirname(ud.module)))
|
||||
# tar them up to a defined filename
|
||||
myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.module)))
|
||||
if myret != 0:
|
||||
try:
|
||||
os.unlink(ud.localpath)
|
||||
except OSError:
|
||||
pass
|
||||
raise FetchError(ud.module)
|
||||
# cleanup
|
||||
os.system('rm -rf %s' % tmpfile)
|
||||
|
|
|
@ -26,6 +26,7 @@ Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
|||
"""
|
||||
|
||||
import os, re
|
||||
import sys
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch import Fetch
|
||||
|
@ -34,136 +35,98 @@ from bb.fetch import MissingParameterError
|
|||
|
||||
class Svn(Fetch):
|
||||
"""Class to fetch a module or modules from svn repositories"""
|
||||
def supports(url, d):
|
||||
"""Check to see if a given url can be fetched with svn.
|
||||
Expects supplied url in list form, as outputted by bb.decodeurl().
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(url, d))
|
||||
return type in ['svn']
|
||||
supports = staticmethod(supports)
|
||||
Check to see if a given url can be fetched with svn.
|
||||
"""
|
||||
return ud.type in ['svn']
|
||||
|
||||
def localpath(url, d):
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(url, d))
|
||||
if "localpath" in parm:
|
||||
# if user overrides local path, use it.
|
||||
return parm["localpath"]
|
||||
|
||||
if not "module" in parm:
|
||||
def localpath(self, url, ud, d):
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError("svn method needs a 'module' parameter")
|
||||
else:
|
||||
module = parm["module"]
|
||||
if 'rev' in parm:
|
||||
revision = parm['rev']
|
||||
else:
|
||||
revision = ""
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
date = Fetch.getSRCDate(d)
|
||||
ud.revision = ""
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
|
||||
return os.path.join(data.getVar("DL_DIR", d, 1),data.expand('%s_%s_%s_%s_%s.tar.gz' % ( module.replace('/', '.'), host, path.replace('/', '.'), revision, date), d))
|
||||
localpath = staticmethod(localpath)
|
||||
ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d)
|
||||
|
||||
def go(self, d, urls = []):
|
||||
"""Fetch urls"""
|
||||
if not urls:
|
||||
urls = self.urls
|
||||
return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
|
||||
|
||||
def forcefetch(self, url, ud, d):
|
||||
if (ud.date == "now"):
|
||||
return True
|
||||
return False
|
||||
|
||||
def go(self, loc, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
# try to use the tarball stash
|
||||
if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile):
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping svn checkout." % ud.localpath)
|
||||
return
|
||||
|
||||
proto = "svn"
|
||||
if "proto" in ud.parm:
|
||||
proto = ud.parm["proto"]
|
||||
|
||||
svn_rsh = None
|
||||
if proto == "svn+ssh" and "rsh" in ud.parm:
|
||||
svn_rsh = ud.parm["rsh"]
|
||||
|
||||
svnroot = ud.host + ud.path
|
||||
|
||||
# either use the revision, or SRCDATE in braces, or nothing for SRCDATE = "now"
|
||||
options = []
|
||||
if ud.revision:
|
||||
options.append("-r %s" % ud.revision)
|
||||
elif ud.date != "now":
|
||||
options.append("-r {%s}" % ud.date)
|
||||
|
||||
localdata = data.createCopy(d)
|
||||
data.setVar('OVERRIDES', "svn:%s" % data.getVar('OVERRIDES', localdata), localdata)
|
||||
data.update_data(localdata)
|
||||
|
||||
for loc in urls:
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(loc, localdata))
|
||||
if not "module" in parm:
|
||||
raise MissingParameterError("svn method needs a 'module' parameter")
|
||||
else:
|
||||
module = parm["module"]
|
||||
data.setVar('SVNROOT', "%s://%s/%s" % (proto, svnroot, ud.module), localdata)
|
||||
data.setVar('SVNCOOPTS', " ".join(options), localdata)
|
||||
data.setVar('SVNMODULE', ud.module, localdata)
|
||||
svncmd = data.getVar('FETCHCOMMAND', localdata, 1)
|
||||
svnupcmd = data.getVar('UPDATECOMMAND', localdata, 1)
|
||||
|
||||
dlfile = self.localpath(loc, localdata)
|
||||
dldir = data.getVar('DL_DIR', localdata, 1)
|
||||
# if local path contains the svn
|
||||
# module, consider the dir above it to be the
|
||||
# download directory
|
||||
# pos = dlfile.find(module)
|
||||
# if pos:
|
||||
# dldir = dlfile[:pos]
|
||||
# else:
|
||||
# dldir = os.path.dirname(dlfile)
|
||||
if svn_rsh:
|
||||
svncmd = "svn_RSH=\"%s\" %s" % (svn_rsh, svncmd)
|
||||
svnupcmd = "svn_RSH=\"%s\" %s" % (svn_rsh, svnupcmd)
|
||||
|
||||
# setup svn options
|
||||
options = []
|
||||
if 'rev' in parm:
|
||||
revision = parm['rev']
|
||||
else:
|
||||
revision = ""
|
||||
pkg = data.expand('${PN}', d)
|
||||
pkgdir = os.path.join(data.expand('${SVNDIR}', localdata), pkg)
|
||||
moddir = os.path.join(pkgdir, ud.module)
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory '" + moddir + "'")
|
||||
|
||||
date = Fetch.getSRCDate(d)
|
||||
|
||||
if "proto" in parm:
|
||||
proto = parm["proto"]
|
||||
else:
|
||||
proto = "svn"
|
||||
|
||||
svn_rsh = None
|
||||
if proto == "svn+ssh" and "rsh" in parm:
|
||||
svn_rsh = parm["rsh"]
|
||||
|
||||
tarfn = data.expand('%s_%s_%s_%s_%s.tar.gz' % (module.replace('/', '.'), host, path.replace('/', '.'), revision, date), localdata)
|
||||
data.setVar('TARFILES', dlfile, localdata)
|
||||
data.setVar('TARFN', tarfn, localdata)
|
||||
|
||||
if Fetch.check_for_tarball(d, tarfn, dldir, date):
|
||||
continue
|
||||
|
||||
olddir = os.path.abspath(os.getcwd())
|
||||
os.chdir(data.expand(dldir, localdata))
|
||||
|
||||
svnroot = host + path
|
||||
|
||||
data.setVar('SVNROOT', svnroot, localdata)
|
||||
data.setVar('SVNCOOPTS', " ".join(options), localdata)
|
||||
data.setVar('SVNMODULE', module, localdata)
|
||||
svncmd = data.getVar('FETCHCOMMAND', localdata, 1)
|
||||
svncmd = "svn co -r {%s} %s://%s/%s" % (date, proto, svnroot, module)
|
||||
|
||||
if revision:
|
||||
svncmd = "svn co -r %s %s://%s/%s" % (revision, proto, svnroot, module)
|
||||
elif date == "now":
|
||||
svncmd = "svn co %s://%s/%s" % (proto, svnroot, module)
|
||||
|
||||
if svn_rsh:
|
||||
svncmd = "svn_RSH=\"%s\" %s" % (svn_rsh, svncmd)
|
||||
|
||||
# create temp directory
|
||||
bb.debug(2, "Fetch: creating temporary directory")
|
||||
bb.mkdirhier(data.expand('${WORKDIR}', localdata))
|
||||
data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvn.XXXXXX', localdata), localdata)
|
||||
tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false")
|
||||
tmpfile = tmppipe.readline().strip()
|
||||
if not tmpfile:
|
||||
bb.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.")
|
||||
raise FetchError(module)
|
||||
|
||||
# check out sources there
|
||||
os.chdir(tmpfile)
|
||||
bb.note("Fetch " + loc)
|
||||
bb.debug(1, "Running %s" % svncmd)
|
||||
if os.access(os.path.join(moddir, '.svn'), os.R_OK):
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc)
|
||||
# update sources there
|
||||
os.chdir(moddir)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svnupcmd)
|
||||
myret = os.system(svnupcmd)
|
||||
else:
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
|
||||
# check out sources there
|
||||
bb.mkdirhier(pkgdir)
|
||||
os.chdir(pkgdir)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svncmd)
|
||||
myret = os.system(svncmd)
|
||||
if myret != 0:
|
||||
try:
|
||||
os.rmdir(tmpfile)
|
||||
except OSError:
|
||||
pass
|
||||
raise FetchError(module)
|
||||
|
||||
os.chdir(os.path.join(tmpfile, os.path.dirname(module)))
|
||||
# tar them up to a defined filename
|
||||
myret = os.system("tar -czf %s %s" % (os.path.join(dldir,tarfn), os.path.basename(module)))
|
||||
if myret != 0:
|
||||
try:
|
||||
os.unlink(tarfn)
|
||||
except OSError:
|
||||
pass
|
||||
# cleanup
|
||||
os.system('rm -rf %s' % tmpfile)
|
||||
os.chdir(olddir)
|
||||
del localdata
|
||||
if myret != 0:
|
||||
raise FetchError(ud.module)
|
||||
|
||||
os.chdir(pkgdir)
|
||||
# tar them up to a defined filename
|
||||
myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.module)))
|
||||
if myret != 0:
|
||||
try:
|
||||
os.unlink(ud.localpath)
|
||||
except OSError:
|
||||
pass
|
||||
raise FetchError(ud.module)
|
||||
|
|
|
@ -30,138 +30,70 @@ import bb
|
|||
from bb import data
|
||||
from bb.fetch import Fetch
|
||||
from bb.fetch import FetchError
|
||||
from bb.fetch import MD5SumError
|
||||
from bb.fetch import uri_replace
|
||||
|
||||
class Wget(Fetch):
|
||||
"""Class to fetch urls via 'wget'"""
|
||||
def supports(url, d):
|
||||
"""Check to see if a given url can be fetched using wget.
|
||||
Expects supplied url in list form, as outputted by bb.decodeurl().
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(url, d))
|
||||
return type in ['http','https','ftp']
|
||||
supports = staticmethod(supports)
|
||||
Check to see if a given url can be fetched with cvs.
|
||||
"""
|
||||
return ud.type in ['http','https','ftp']
|
||||
|
||||
def localpath(url, d):
|
||||
# strip off parameters
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(url, d))
|
||||
if "localpath" in parm:
|
||||
# if user overrides local path, use it.
|
||||
return parm["localpath"]
|
||||
url = bb.encodeurl([type, host, path, user, pswd, {}])
|
||||
def localpath(self, url, ud, d):
|
||||
|
||||
return os.path.join(data.getVar("DL_DIR", d), os.path.basename(url))
|
||||
localpath = staticmethod(localpath)
|
||||
url = bb.encodeurl([ud.type, ud.host, ud.path, ud.user, ud.pswd, {}])
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
ud.localfile = data.expand(os.path.basename(url), d)
|
||||
|
||||
def go(self, d, urls = []):
|
||||
return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
|
||||
|
||||
def go(self, uri, ud, d):
|
||||
"""Fetch urls"""
|
||||
|
||||
def md5_sum(parm, d):
|
||||
"""
|
||||
Return the MD5SUM associated with the to be downloaded
|
||||
file.
|
||||
It can return None if no md5sum is associated
|
||||
"""
|
||||
try:
|
||||
return parm['md5sum']
|
||||
except:
|
||||
return None
|
||||
|
||||
def verify_md5sum(wanted_sum, got_sum):
|
||||
"""
|
||||
Verify the md5sum we wanted with the one we got
|
||||
"""
|
||||
if not wanted_sum:
|
||||
return True
|
||||
|
||||
return wanted_sum == got_sum
|
||||
|
||||
def fetch_uri(uri, basename, dl, md5, parm, d):
|
||||
# the MD5 sum we want to verify
|
||||
wanted_md5sum = md5_sum(parm, d)
|
||||
if os.path.exists(dl):
|
||||
# file exists, but we didnt complete it.. trying again..
|
||||
def fetch_uri(uri, ud, d):
|
||||
if os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again..
|
||||
fetchcmd = data.getVar("RESUMECOMMAND", d, 1)
|
||||
else:
|
||||
fetchcmd = data.getVar("FETCHCOMMAND", d, 1)
|
||||
|
||||
bb.note("fetch " + uri)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "fetch " + uri)
|
||||
fetchcmd = fetchcmd.replace("${URI}", uri)
|
||||
fetchcmd = fetchcmd.replace("${FILE}", basename)
|
||||
bb.debug(2, "executing " + fetchcmd)
|
||||
fetchcmd = fetchcmd.replace("${FILE}", ud.basename)
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "executing " + fetchcmd)
|
||||
ret = os.system(fetchcmd)
|
||||
if ret != 0:
|
||||
return False
|
||||
|
||||
# check if sourceforge did send us to the mirror page
|
||||
dl_dir = data.getVar("DL_DIR", d, True)
|
||||
if not os.path.exists(dl):
|
||||
os.system("rm %s*" % dl) # FIXME shell quote it
|
||||
bb.debug(2,"sourceforge.net send us to the mirror on %s" % basename)
|
||||
if not os.path.exists(ud.localpath):
|
||||
os.system("rm %s*" % ud.localpath) # FIXME shell quote it
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "sourceforge.net send us to the mirror on %s" % ud.basename)
|
||||
return False
|
||||
|
||||
# supposedly complete.. write out md5sum
|
||||
if bb.which(data.getVar('PATH', d), 'md5sum'):
|
||||
try:
|
||||
md5pipe = os.popen('md5sum ' + dl)
|
||||
md5data = (md5pipe.readline().split() or [ "" ])[0]
|
||||
md5pipe.close()
|
||||
except OSError:
|
||||
md5data = ""
|
||||
|
||||
# verify the md5sum
|
||||
if not verify_md5sum(wanted_md5sum, md5data):
|
||||
raise MD5SumError(uri)
|
||||
|
||||
md5out = file(md5, 'w')
|
||||
md5out.write(md5data)
|
||||
md5out.close()
|
||||
return True
|
||||
|
||||
if not urls:
|
||||
urls = self.urls
|
||||
|
||||
localdata = data.createCopy(d)
|
||||
data.setVar('OVERRIDES', "wget:" + data.getVar('OVERRIDES', localdata), localdata)
|
||||
data.update_data(localdata)
|
||||
|
||||
for uri in urls:
|
||||
completed = 0
|
||||
(type, host, path, user, pswd, parm) = bb.decodeurl(data.expand(uri, localdata))
|
||||
basename = os.path.basename(path)
|
||||
dl = self.localpath(uri, d)
|
||||
dl = data.expand(dl, localdata)
|
||||
md5 = dl + '.md5'
|
||||
premirrors = [ i.split() for i in (data.getVar('PREMIRRORS', localdata, 1) or "").split('\n') if i ]
|
||||
for (find, replace) in premirrors:
|
||||
newuri = uri_replace(uri, find, replace, d)
|
||||
if newuri != uri:
|
||||
if fetch_uri(newuri, ud, localdata):
|
||||
return
|
||||
|
||||
if os.path.exists(md5):
|
||||
# complete, nothing to see here..
|
||||
continue
|
||||
if fetch_uri(uri, ud, localdata):
|
||||
return
|
||||
|
||||
premirrors = [ i.split() for i in (data.getVar('PREMIRRORS', localdata, 1) or "").split('\n') if i ]
|
||||
for (find, replace) in premirrors:
|
||||
newuri = uri_replace(uri, find, replace, d)
|
||||
if newuri != uri:
|
||||
if fetch_uri(newuri, basename, dl, md5, parm, localdata):
|
||||
completed = 1
|
||||
break
|
||||
# try mirrors
|
||||
mirrors = [ i.split() for i in (data.getVar('MIRRORS', localdata, 1) or "").split('\n') if i ]
|
||||
for (find, replace) in mirrors:
|
||||
newuri = uri_replace(uri, find, replace, d)
|
||||
if newuri != uri:
|
||||
if fetch_uri(newuri, ud, localdata):
|
||||
return
|
||||
|
||||
if completed:
|
||||
continue
|
||||
|
||||
if fetch_uri(uri, basename, dl, md5, parm, localdata):
|
||||
continue
|
||||
|
||||
# try mirrors
|
||||
mirrors = [ i.split() for i in (data.getVar('MIRRORS', localdata, 1) or "").split('\n') if i ]
|
||||
for (find, replace) in mirrors:
|
||||
newuri = uri_replace(uri, find, replace, d)
|
||||
if newuri != uri:
|
||||
if fetch_uri(newuri, basename, dl, md5, parm, localdata):
|
||||
completed = 1
|
||||
break
|
||||
|
||||
if not completed:
|
||||
raise FetchError(uri)
|
||||
|
||||
del localdata
|
||||
raise FetchError(uri)
|
||||
|
|
|
@ -61,9 +61,6 @@ def insert_method(modulename, code, fn):
|
|||
comp = better_compile(code, "<bb>", fn )
|
||||
better_exec(comp, __builtins__, code, fn)
|
||||
|
||||
# hack hack hack XXX
|
||||
return
|
||||
|
||||
# now some instrumentation
|
||||
code = comp.co_names
|
||||
for name in code:
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
#!/usr/bin/env python
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'msg' implementation
|
||||
|
||||
Message handling infrastructure for bitbake
|
||||
|
||||
# Copyright (C) 2006 Richard Purdie
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
Foundation; either version 2 of the License, or (at your option) any later
|
||||
version.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
||||
Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
|
||||
"""
|
||||
|
||||
import sys, os, re, bb
|
||||
from bb import utils
|
||||
|
||||
debug_level = {}
|
||||
|
||||
verbose = False
|
||||
|
||||
domain = bb.utils.Enum(
|
||||
'Build',
|
||||
'Cache',
|
||||
'Collection',
|
||||
'Data',
|
||||
'Depends',
|
||||
'Fetcher',
|
||||
'Parsing',
|
||||
'Provider',
|
||||
'RunQueue',
|
||||
'TaskData',
|
||||
'Util')
|
||||
|
||||
#
|
||||
# Message control functions
|
||||
#
|
||||
|
||||
def set_debug_level(level):
|
||||
bb.msg.debug_level = {}
|
||||
for domain in bb.msg.domain:
|
||||
bb.msg.debug_level[domain] = level
|
||||
bb.msg.debug_level['default'] = level
|
||||
|
||||
def set_verbose(level):
|
||||
bb.msg.verbose = level
|
||||
|
||||
def set_debug_domains(domains):
|
||||
for domain in domains:
|
||||
found = False
|
||||
for ddomain in bb.msg.domain:
|
||||
if domain == str(ddomain):
|
||||
bb.msg.debug_level[ddomain] = bb.msg.debug_level[ddomain] + 1
|
||||
found = True
|
||||
if not found:
|
||||
std_warn("Logging domain %s is not valid, ignoring" % domain)
|
||||
|
||||
#
|
||||
# Message handling functions
|
||||
#
|
||||
|
||||
def debug(level, domain, msg, fn = None):
|
||||
if debug_level[domain] >= level:
|
||||
print 'DEBUG: ' + msg
|
||||
|
||||
def note(level, domain, msg, fn = None):
|
||||
if level == 1 or verbose or debug_level[domain] >= 1:
|
||||
std_note(msg)
|
||||
|
||||
def warn(domain, msg, fn = None):
|
||||
std_warn(msg)
|
||||
|
||||
def error(domain, msg, fn = None):
|
||||
std_error(msg)
|
||||
|
||||
def fatal(domain, msg, fn = None):
|
||||
std_fatal(msg)
|
||||
|
||||
#
|
||||
# Compatibility functions for the original message interface
|
||||
#
|
||||
def std_debug(lvl, msg):
|
||||
if debug_level['default'] >= lvl:
|
||||
print 'DEBUG: ' + msg
|
||||
|
||||
def std_note(msg):
|
||||
print 'NOTE: ' + msg
|
||||
|
||||
def std_warn(msg):
|
||||
print 'WARNING: ' + msg
|
||||
|
||||
def std_error(msg):
|
||||
print 'ERROR: ' + msg
|
||||
|
||||
def std_fatal(msg):
|
||||
print 'ERROR: ' + msg
|
||||
sys.exit(1)
|
|
@ -37,11 +37,16 @@ class SkipPackage(Exception):
|
|||
__mtime_cache = {}
|
||||
def cached_mtime(f):
|
||||
if not __mtime_cache.has_key(f):
|
||||
update_mtime(f)
|
||||
__mtime_cache[f] = os.stat(f)[8]
|
||||
return __mtime_cache[f]
|
||||
|
||||
def update_mtime(f):
|
||||
__mtime_cache[f] = os.stat(f)[8]
|
||||
def cached_mtime_noerror(f):
|
||||
if not __mtime_cache.has_key(f):
|
||||
try:
|
||||
__mtime_cache[f] = os.stat(f)[8]
|
||||
except OSError:
|
||||
return 0
|
||||
return __mtime_cache[f]
|
||||
|
||||
def mark_dependency(d, f):
|
||||
if f.startswith('./'):
|
||||
|
|
|
@ -5,33 +5,33 @@
|
|||
Reads a .bb file and obtains its metadata (using a C++ parser)
|
||||
|
||||
Copyright (C) 2006 Tim Robert Ansell
|
||||
Copyright (C) 2006 Holger Hans Peter Freyther
|
||||
|
||||
Copyright (C) 2006 Holger Hans Peter Freyther
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
Foundation; either version 2 of the License, or (at your option) any later
|
||||
version.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
|
||||
THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
|
||||
THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
"""
|
||||
|
||||
import os
|
||||
import os, sys
|
||||
|
||||
# The Module we will use here
|
||||
import bb
|
||||
|
@ -61,51 +61,126 @@ def supports(fn, data):
|
|||
return fn[-3:] == ".bb" or fn[-8:] == ".bbclass" or fn[-4:] == ".inc" or fn[-5:] == ".conf"
|
||||
|
||||
def init(fn, data):
|
||||
if not data.getVar('TOPDIR'):
|
||||
bb.error('TOPDIR is not set')
|
||||
if not data.getVar('BBPATH'):
|
||||
bb.error('BBPATH is not set')
|
||||
if not bb.data.getVar('TOPDIR', data):
|
||||
bb.data.setVar('TOPDIR', os.getcwd(), data)
|
||||
if not bb.data.getVar('BBPATH', data):
|
||||
bb.data.setVar('BBPATH', os.path.join(sys.prefix, 'share', 'bitbake'), data)
|
||||
|
||||
def handle_inherit(d):
|
||||
"""
|
||||
Handle inheriting of classes. This will load all default classes.
|
||||
It could be faster, it could detect infinite loops but this is todo
|
||||
Also this delayed loading of bb.parse could impose a penalty
|
||||
"""
|
||||
from bb.parse import handle
|
||||
|
||||
files = (data.getVar('INHERIT', d, True) or "").split()
|
||||
if not "base" in i:
|
||||
files[0:0] = ["base"]
|
||||
|
||||
__inherit_cache = data.getVar('__inherit_cache', d) or []
|
||||
for f in files:
|
||||
file = data.expand(f, d)
|
||||
if file[0] != "/" and file[-8:] != ".bbclass":
|
||||
file = os.path.join('classes', '%s.bbclass' % file)
|
||||
|
||||
if not file in __inherit_cache:
|
||||
debug(2, "BB %s:%d: inheriting %s" % (fn, lineno, file))
|
||||
__inherit_cache.append( file )
|
||||
|
||||
try:
|
||||
handle(file, d, True)
|
||||
except IOError:
|
||||
print "Failed to inherit %s" % file
|
||||
data.setVar('__inherit_cache', __inherit_cache, d)
|
||||
|
||||
|
||||
def handle(fn, d, include):
|
||||
print ""
|
||||
print "fn: %s" % fn
|
||||
print "data: %s" % d
|
||||
print dir(d)
|
||||
print d.getVar.__doc__
|
||||
print "include: %s" % include
|
||||
from bb import data, parse
|
||||
|
||||
(root, ext) = os.path.splitext(os.path.basename(fn))
|
||||
base_name = "%s%s" % (root,ext)
|
||||
|
||||
# initialize with some data
|
||||
init(fn,d)
|
||||
|
||||
# check if we include or are the beginning
|
||||
oldfile = None
|
||||
if include:
|
||||
oldfile = d.getVar('FILE')
|
||||
else:
|
||||
#d.inheritFromOS()
|
||||
oldfile = None
|
||||
oldfile = d.getVar('FILE', False)
|
||||
is_conf = False
|
||||
elif ext == ".conf":
|
||||
is_conf = True
|
||||
data.inheritFromOS(d)
|
||||
|
||||
# find the file
|
||||
if not os.path.isabs(fn):
|
||||
bb.error("No Absolute FILE name")
|
||||
abs_fn = bb.which(d.getVar('BBPATH'), fn)
|
||||
abs_fn = bb.which(d.getVar('BBPATH', True), fn)
|
||||
else:
|
||||
abs_fn = fn
|
||||
|
||||
# check if the file exists
|
||||
if not os.path.exists(abs_fn):
|
||||
raise IOError("file '%(fn)' not found" % locals() )
|
||||
raise IOError("file '%(fn)s' not found" % locals() )
|
||||
|
||||
# now we know the file is around mark it as dep
|
||||
if include:
|
||||
parse.mark_dependency(d, abs_fn)
|
||||
|
||||
# manipulate the bbpath
|
||||
if ext != ".bbclass" and ext != ".conf":
|
||||
old_bb_path = data.getVar('BBPATH', d)
|
||||
data.setVar('BBPATH', os.path.dirname(abs_fn) + (":%s" %old_bb_path) , d)
|
||||
|
||||
# handle INHERITS and base inherit
|
||||
if ext != ".bbclass" and ext != ".conf":
|
||||
data.setVar('FILE', fn, d)
|
||||
handle_interit(d)
|
||||
|
||||
# now parse this file - by defering it to C++
|
||||
parsefile(fn, d)
|
||||
parsefile(abs_fn, d, is_conf)
|
||||
|
||||
# Finish it up
|
||||
if include == 0:
|
||||
data.expandKeys(d)
|
||||
data.update_data(d)
|
||||
#### !!! XXX Finish it up by executing the anonfunc
|
||||
|
||||
|
||||
# restore the original FILE
|
||||
if oldfile:
|
||||
d.setVar('FILE', oldfile)
|
||||
|
||||
# restore bbpath
|
||||
if ext != ".bbclass" and ext != ".conf":
|
||||
data.setVar('BBPATH', old_bb_path, d )
|
||||
|
||||
|
||||
return d
|
||||
|
||||
|
||||
# Needed for BitBake files...
|
||||
__pkgsplit_cache__={}
|
||||
def vars_from_file(mypkg, d):
|
||||
if not mypkg:
|
||||
return (None, None, None)
|
||||
if mypkg in __pkgsplit_cache__:
|
||||
return __pkgsplit_cache__[mypkg]
|
||||
|
||||
myfile = os.path.splitext(os.path.basename(mypkg))
|
||||
parts = myfile[0].split('_')
|
||||
__pkgsplit_cache__[mypkg] = parts
|
||||
exp = 3 - len(parts)
|
||||
tmplist = []
|
||||
while exp != 0:
|
||||
exp -= 1
|
||||
tmplist.append(None)
|
||||
parts.extend(tmplist)
|
||||
return parts
|
||||
|
||||
|
||||
|
||||
|
||||
# Inform bitbake that we are a parser
|
||||
# We need to define all three
|
||||
from bb.parse import handlers
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
|
||||
test: bitbakec.so
|
||||
python test.py
|
||||
buil: bitbakec.so
|
||||
echo "Done"
|
||||
|
||||
bitbakescanner.cc: bitbakescanner.l
|
||||
flex -t bitbakescanner.l > bitbakescanner.cc
|
||||
|
@ -28,9 +28,9 @@ bitbakec.so: bitbakec.o bitbakeparser.o bitbakescanner.o
|
|||
g++ -shared -fPIC bitbakeparser.o bitbakescanner.o bitbakec.o -o bitbakec.so
|
||||
|
||||
clean:
|
||||
rm *.out
|
||||
rm *.cc
|
||||
rm bitbakec.c
|
||||
rm bitbakec-processed.c
|
||||
rm *.o
|
||||
rm *.so
|
||||
rm -f *.out
|
||||
rm -f *.cc
|
||||
rm -f bitbakec.c
|
||||
rm -f bitbakec-processed.c
|
||||
rm -f *.o
|
||||
rm -f *.so
|
||||
|
|
|
@ -6,96 +6,107 @@ cdef extern from "stdio.h":
|
|||
FILE *fopen(char*, char*)
|
||||
int fclose(FILE *fp)
|
||||
|
||||
cdef extern from "string.h":
|
||||
int strlen(char*)
|
||||
|
||||
cdef extern from "lexerc.h":
|
||||
ctypedef struct lex_t:
|
||||
void* parser
|
||||
void* scanner
|
||||
char* name
|
||||
FILE* file
|
||||
int config
|
||||
void* data
|
||||
|
||||
int lineError
|
||||
int errorParse
|
||||
|
||||
cdef extern void parse(FILE*, object)
|
||||
cdef extern int parse(FILE*, char*, object, int)
|
||||
|
||||
def parsefile(object file, object data):
|
||||
print "parsefile: 1", file, data
|
||||
def parsefile(object file, object data, object config):
|
||||
#print "parsefile: 1", file, data
|
||||
|
||||
# Open the file
|
||||
cdef FILE* f
|
||||
|
||||
f = fopen(file, "r")
|
||||
print "parsefile: 2 opening file"
|
||||
#print "parsefile: 2 opening file"
|
||||
if (f == NULL):
|
||||
raise IOError("No such file %s." % file)
|
||||
|
||||
print "parsefile: 3 parse"
|
||||
parse(f, data)
|
||||
#print "parsefile: 3 parse"
|
||||
parse(f, file, data, config)
|
||||
|
||||
# Close the file
|
||||
print "parsefile: 4 closing"
|
||||
fclose(f)
|
||||
|
||||
|
||||
|
||||
cdef public void e_assign(lex_t* container, char* key, char* what):
|
||||
print "e_assign", key, what
|
||||
#print "e_assign", key, what
|
||||
if what == NULL:
|
||||
print "FUTURE Warning empty string: use \"\""
|
||||
what = ""
|
||||
|
||||
d = <object>container.data
|
||||
d.setVar(key, what)
|
||||
d.setVar(key, what)
|
||||
|
||||
cdef public void e_export(lex_t* c, char* what):
|
||||
print "e_export", what
|
||||
#print "e_export", what
|
||||
#exp:
|
||||
# bb.data.setVarFlag(key, "export", 1, data)
|
||||
d = <object>container.data
|
||||
d.setVarFlag(key, "export", 1)
|
||||
d = <object>c.data
|
||||
d.setVarFlag(what, "export", 1)
|
||||
|
||||
cdef public void e_immediate(lex_t* c, char* key, char* what):
|
||||
print "e_immediate", key, what
|
||||
#print "e_immediate", key, what
|
||||
#colon:
|
||||
# val = bb.data.expand(groupd["value"], data)
|
||||
d = <object>c.data
|
||||
d.setVar(key, d.expand(what))
|
||||
d.setVar(key, d.expand(what,d))
|
||||
|
||||
cdef public void e_cond(lex_t* c, char* key, char* what):
|
||||
print "e_cond", key, what
|
||||
#print "e_cond", key, what
|
||||
#ques:
|
||||
# val = bb.data.getVar(key, data)
|
||||
# if val == None:
|
||||
# val = groupd["value"]
|
||||
if what == NULL:
|
||||
print "FUTURE warning: Use \"\" for", key
|
||||
what = ""
|
||||
|
||||
d = <object>c.data
|
||||
d.setVar(key, (d.getVar(key) or what))
|
||||
d.setVar(key, (d.getVar(key,False) or what))
|
||||
|
||||
cdef public void e_prepend(lex_t* c, char* key, char* what):
|
||||
print "e_prepend", key, what
|
||||
#print "e_prepend", key, what
|
||||
#prepend:
|
||||
# val = "%s %s" % (groupd["value"], (bb.data.getVar(key, data) or ""))
|
||||
d = <object>c.data
|
||||
d.setVar(key, what + " " + (d.getVar(key) or ""))
|
||||
d.setVar(key, what + " " + (d.getVar(key,0) or ""))
|
||||
|
||||
cdef public void e_append(lex_t* c, char* key, char* what):
|
||||
print "e_append", key, what
|
||||
#print "e_append", key, what
|
||||
#append:
|
||||
# val = "%s %s" % ((bb.data.getVar(key, data) or ""), groupd["value"])
|
||||
d = <object>c.data
|
||||
d.setVar(key, (d.getVar(key) or "") + " " + what)
|
||||
d.setVar(key, (d.getVar(key,0) or "") + " " + what)
|
||||
|
||||
cdef public void e_precat(lex_t* c, char* key, char* what):
|
||||
print "e_precat", key, what
|
||||
#print "e_precat", key, what
|
||||
#predot:
|
||||
# val = "%s%s" % (groupd["value"], (bb.data.getVar(key, data) or ""))
|
||||
d = <object>c.data
|
||||
d.setVar(key, what + (d.getVar(key) or ""))
|
||||
d.setVar(key, what + (d.getVar(key,0) or ""))
|
||||
|
||||
cdef public void e_postcat(lex_t* c, char* key, char* what):
|
||||
print "e_postcat", key, what
|
||||
#print "e_postcat", key, what
|
||||
#postdot:
|
||||
# val = "%s%s" % ((bb.data.getVar(key, data) or ""), groupd["value"])
|
||||
d = <object>c.data
|
||||
d.setVar(key, (d.getVar(key) or "") + what)
|
||||
d.setVar(key, (d.getVar(key,0) or "") + what)
|
||||
|
||||
cdef public void e_addtask(lex_t* c, char* name, char* before, char* after):
|
||||
print "e_addtask", name, before, after
|
||||
cdef public int e_addtask(lex_t* c, char* name, char* before, char* after) except -1:
|
||||
#print "e_addtask", name
|
||||
# func = m.group("func")
|
||||
# before = m.group("before")
|
||||
# after = m.group("after")
|
||||
|
@ -112,69 +123,131 @@ cdef public void e_addtask(lex_t* c, char* name, char* before, char* after):
|
|||
# # set up things that depend on this func
|
||||
# data.setVarFlag(var, "postdeps", before.split(), d)
|
||||
# return
|
||||
|
||||
do = "do_%s" % name
|
||||
|
||||
if c.config == 1:
|
||||
from bb.parse import ParseError
|
||||
raise ParseError("No tasks allowed in config files")
|
||||
return -1
|
||||
|
||||
d = <object>c.data
|
||||
do = "do_%s" % name
|
||||
d.setVarFlag(do, "task", 1)
|
||||
|
||||
if strlen(before) > 0:
|
||||
if before != NULL and strlen(before) > 0:
|
||||
#print "Before", before
|
||||
d.setVarFlag(do, "postdeps", ("%s" % before).split())
|
||||
if after != NULL and strlen(after) > 0:
|
||||
#print "After", after
|
||||
d.setVarFlag(do, "deps", ("%s" % after).split())
|
||||
if strlen(after) > 0:
|
||||
d.setVarFlag(do, "deps", ("%s" % before).split())
|
||||
|
||||
return 0
|
||||
|
||||
cdef public void e_addhandler(lex_t* c, char* h):
|
||||
print "e_addhandler", h
|
||||
cdef public int e_addhandler(lex_t* c, char* h) except -1:
|
||||
#print "e_addhandler", h
|
||||
# data.setVarFlag(h, "handler", 1, d)
|
||||
if c.config == 1:
|
||||
from bb.parse import ParseError
|
||||
raise ParseError("No handlers allowed in config files")
|
||||
return -1
|
||||
|
||||
d = <object>c.data
|
||||
d.setVarFlag(h, "handler", 1)
|
||||
return 0
|
||||
|
||||
cdef public void e_export_func(lex_t* c, char* function):
|
||||
print "e_export_func", function
|
||||
pass
|
||||
cdef public int e_export_func(lex_t* c, char* function) except -1:
|
||||
#print "e_export_func", function
|
||||
if c.config == 1:
|
||||
from bb.parse import ParseError
|
||||
raise ParseError("No functions allowed in config files")
|
||||
return -1
|
||||
|
||||
cdef public void e_inherit(lex_t* c, char* file):
|
||||
print "e_inherit", file
|
||||
pass
|
||||
return 0
|
||||
|
||||
cdef public int e_inherit(lex_t* c, char* file) except -1:
|
||||
#print "e_inherit", file
|
||||
|
||||
if c.config == 1:
|
||||
from bb.parse import ParseError
|
||||
raise ParseError("No inherits allowed in config files")
|
||||
return -1
|
||||
|
||||
return 0
|
||||
|
||||
cdef public void e_include(lex_t* c, char* file):
|
||||
print "e_include", file
|
||||
from bb.parse import handle
|
||||
d = <object>c.data
|
||||
d.expand(file)
|
||||
|
||||
|
||||
try:
|
||||
parsefile(file, d)
|
||||
handle(d.expand(file,d), d, True)
|
||||
except IOError:
|
||||
print "Could not include required file %s" % file
|
||||
print "Could not include file", file
|
||||
|
||||
|
||||
cdef public void e_require(lex_t* c, char* file):
|
||||
print "e_require", file
|
||||
cdef public int e_require(lex_t* c, char* file) except -1:
|
||||
#print "e_require", file
|
||||
from bb.parse import handle
|
||||
d = <object>c.data
|
||||
d.expand(file)
|
||||
|
||||
|
||||
try:
|
||||
parsefile(file, d)
|
||||
handle(d.expand(file,d), d, True)
|
||||
except IOError:
|
||||
raise CParseError("Could not include required file %s" % file)
|
||||
print "ParseError", file
|
||||
from bb.parse import ParseError
|
||||
raise ParseError("Could not include required file %s" % file)
|
||||
return -1
|
||||
|
||||
cdef public void e_proc(lex_t* c, char* key, char* what):
|
||||
print "e_proc", key, what
|
||||
pass
|
||||
return 0
|
||||
|
||||
cdef public void e_proc_python(lex_t* c, char* key, char* what):
|
||||
print "e_proc_python", key, what
|
||||
pass
|
||||
cdef public int e_proc(lex_t* c, char* key, char* what) except -1:
|
||||
#print "e_proc", key, what
|
||||
if c.config == 1:
|
||||
from bb.parse import ParseError
|
||||
raise ParseError("No inherits allowed in config files")
|
||||
return -1
|
||||
|
||||
cdef public void e_proc_fakeroot(lex_t* c, char* key, char* what):
|
||||
print "e_fakeroot", key, what
|
||||
pass
|
||||
return 0
|
||||
|
||||
cdef public void e_def(lex_t* c, char* a, char* b, char* d):
|
||||
print "e_def", key, what
|
||||
pass
|
||||
cdef public int e_proc_python(lex_t* c, char* key, char* what) except -1:
|
||||
#print "e_proc_python"
|
||||
if c.config == 1:
|
||||
from bb.parse import ParseError
|
||||
raise ParseError("No pythin allowed in config files")
|
||||
return -1
|
||||
|
||||
cdef public void e_parse_error(lex_t* c):
|
||||
print "e_parse_error", "line:", lineError, "parse:", errorParse
|
||||
raise CParseError("There was an parse error, sorry unable to give more information at the current time.")
|
||||
if key != NULL:
|
||||
pass
|
||||
#print "Key", key
|
||||
if what != NULL:
|
||||
pass
|
||||
#print "What", what
|
||||
|
||||
return 0
|
||||
|
||||
cdef public int e_proc_fakeroot(lex_t* c, char* key, char* what) except -1:
|
||||
#print "e_fakeroot", key, what
|
||||
|
||||
if c.config == 1:
|
||||
from bb.parse import ParseError
|
||||
raise ParseError("No fakeroot allowed in config files")
|
||||
return -1
|
||||
|
||||
return 0
|
||||
|
||||
cdef public int e_def(lex_t* c, char* a, char* b, char* d) except -1:
|
||||
#print "e_def", a, b, d
|
||||
|
||||
if c.config == 1:
|
||||
from bb.parse import ParseError
|
||||
raise ParseError("No defs allowed in config files")
|
||||
return -1
|
||||
|
||||
return 0
|
||||
|
||||
cdef public int e_parse_error(lex_t* c) except -1:
|
||||
print "e_parse_error", c.name, "line:", lineError, "parse:", errorParse
|
||||
|
||||
|
||||
from bb.parse import ParseError
|
||||
raise ParseError("There was an parse error, sorry unable to give more information at the current time. File: %s Line: %d" % (c.name,lineError) )
|
||||
return -1
|
||||
|
||||
|
|
|
@ -128,51 +128,49 @@ typedef union {
|
|||
*/
|
||||
static const YYACTIONTYPE yy_action[] = {
|
||||
/* 0 */ 82, 3, 7, 8, 38, 22, 39, 24, 26, 32,
|
||||
/* 10 */ 34, 28, 30, 128, 1, 40, 53, 70, 55, 5,
|
||||
/* 20 */ 60, 65, 67, 2, 21, 36, 69, 77, 9, 7,
|
||||
/* 30 */ 11, 6, 13, 15, 17, 19, 12, 52, 50, 4,
|
||||
/* 40 */ 74, 42, 46, 59, 57, 10, 64, 62, 38, 14,
|
||||
/* 50 */ 73, 16, 38, 38, 76, 81, 18, 20, 23, 25,
|
||||
/* 60 */ 27, 29, 31, 33, 35, 37, 56, 51, 90, 54,
|
||||
/* 70 */ 58, 71, 41, 43, 63, 45, 44, 47, 72, 48,
|
||||
/* 80 */ 75, 78, 80, 61, 90, 49, 66, 90, 90, 68,
|
||||
/* 90 */ 90, 90, 90, 90, 90, 79,
|
||||
/* 10 */ 34, 28, 30, 2, 21, 40, 53, 70, 55, 44,
|
||||
/* 20 */ 60, 65, 67, 128, 1, 36, 69, 77, 42, 46,
|
||||
/* 30 */ 11, 66, 13, 15, 17, 19, 64, 62, 9, 7,
|
||||
/* 40 */ 74, 38, 45, 81, 59, 57, 38, 38, 73, 76,
|
||||
/* 50 */ 5, 68, 52, 50, 14, 31, 47, 71, 48, 10,
|
||||
/* 60 */ 72, 33, 23, 49, 6, 41, 51, 78, 75, 16,
|
||||
/* 70 */ 4, 54, 35, 25, 18, 80, 79, 56, 27, 37,
|
||||
/* 80 */ 58, 12, 61, 29, 43, 63, 20,
|
||||
};
|
||||
static const YYCODETYPE yy_lookahead[] = {
|
||||
/* 0 */ 0, 1, 2, 3, 23, 4, 25, 6, 7, 8,
|
||||
/* 10 */ 9, 10, 11, 31, 32, 15, 16, 1, 18, 42,
|
||||
/* 20 */ 20, 21, 22, 33, 34, 24, 26, 27, 1, 2,
|
||||
/* 30 */ 4, 28, 6, 7, 8, 9, 5, 35, 36, 29,
|
||||
/* 40 */ 24, 13, 14, 37, 38, 34, 39, 40, 23, 5,
|
||||
/* 50 */ 25, 5, 23, 23, 25, 25, 5, 5, 5, 5,
|
||||
/* 60 */ 5, 5, 5, 5, 5, 41, 17, 35, 43, 1,
|
||||
/* 70 */ 37, 24, 12, 12, 39, 12, 14, 12, 41, 13,
|
||||
/* 80 */ 41, 1, 41, 19, 43, 12, 19, 43, 43, 19,
|
||||
/* 90 */ 43, 43, 43, 43, 43, 24,
|
||||
/* 10 */ 9, 10, 11, 33, 34, 15, 16, 1, 18, 14,
|
||||
/* 20 */ 20, 21, 22, 31, 32, 24, 26, 27, 13, 14,
|
||||
/* 30 */ 4, 19, 6, 7, 8, 9, 39, 40, 1, 2,
|
||||
/* 40 */ 24, 23, 12, 25, 37, 38, 23, 23, 25, 25,
|
||||
/* 50 */ 42, 19, 35, 36, 5, 5, 12, 24, 13, 34,
|
||||
/* 60 */ 41, 5, 5, 12, 28, 12, 35, 1, 41, 5,
|
||||
/* 70 */ 29, 1, 5, 5, 5, 41, 24, 17, 5, 41,
|
||||
/* 80 */ 37, 5, 19, 5, 12, 39, 5,
|
||||
};
|
||||
#define YY_SHIFT_USE_DFLT (-20)
|
||||
static const signed char yy_shift_ofst[] = {
|
||||
/* 0 */ -20, 0, -20, 10, -20, 3, -20, -20, 27, -20,
|
||||
/* 10 */ 26, 31, -20, 44, -20, 46, -20, 51, -20, 52,
|
||||
/* 20 */ -20, 1, 53, -20, 54, -20, 55, -20, 56, -20,
|
||||
/* 30 */ 57, -20, 58, -20, 59, -20, -20, -19, -20, -20,
|
||||
/* 40 */ 60, 28, 61, 62, 63, -20, 65, 66, 73, -20,
|
||||
/* 50 */ 60, -20, -20, 68, -20, 49, -20, 49, -20, -20,
|
||||
/* 60 */ 64, -20, 64, -20, -20, 67, -20, 70, -20, 16,
|
||||
/* 70 */ 47, -20, 25, -20, -20, 29, -20, 80, 71, -20,
|
||||
/* 80 */ 30, -20,
|
||||
/* 0 */ -20, 0, -20, 41, -20, 36, -20, -20, 37, -20,
|
||||
/* 10 */ 26, 76, -20, 49, -20, 64, -20, 69, -20, 81,
|
||||
/* 20 */ -20, 1, 57, -20, 68, -20, 73, -20, 78, -20,
|
||||
/* 30 */ 50, -20, 56, -20, 67, -20, -20, -19, -20, -20,
|
||||
/* 40 */ 53, 15, 72, 5, 30, -20, 44, 45, 51, -20,
|
||||
/* 50 */ 53, -20, -20, 70, -20, 60, -20, 60, -20, -20,
|
||||
/* 60 */ 63, -20, 63, -20, -20, 12, -20, 32, -20, 16,
|
||||
/* 70 */ 33, -20, 23, -20, -20, 24, -20, 66, 52, -20,
|
||||
/* 80 */ 18, -20,
|
||||
};
|
||||
#define YY_REDUCE_USE_DFLT (-24)
|
||||
#define YY_REDUCE_USE_DFLT (-21)
|
||||
static const signed char yy_reduce_ofst[] = {
|
||||
/* 0 */ -18, -10, -24, -24, -23, -24, -24, -24, 11, -24,
|
||||
/* 10 */ -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
|
||||
/* 20 */ -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
|
||||
/* 30 */ -24, -24, -24, -24, -24, -24, 24, -24, -24, -24,
|
||||
/* 40 */ 2, -24, -24, -24, -24, -24, -24, -24, -24, -24,
|
||||
/* 50 */ 32, -24, -24, -24, -24, 6, -24, 33, -24, -24,
|
||||
/* 60 */ 7, -24, 35, -24, -24, -24, -24, -24, -24, -24,
|
||||
/* 70 */ -24, 37, -24, -24, 39, -24, -24, -24, -24, 41,
|
||||
/* 80 */ -24, -24,
|
||||
/* 0 */ -8, -20, -21, -21, 8, -21, -21, -21, 25, -21,
|
||||
/* 10 */ -21, -21, -21, -21, -21, -21, -21, -21, -21, -21,
|
||||
/* 20 */ -21, -21, -21, -21, -21, -21, -21, -21, -21, -21,
|
||||
/* 30 */ -21, -21, -21, -21, -21, -21, 38, -21, -21, -21,
|
||||
/* 40 */ 17, -21, -21, -21, -21, -21, -21, -21, -21, -21,
|
||||
/* 50 */ 31, -21, -21, -21, -21, 7, -21, 43, -21, -21,
|
||||
/* 60 */ -3, -21, 46, -21, -21, -21, -21, -21, -21, -21,
|
||||
/* 70 */ -21, 19, -21, -21, 27, -21, -21, -21, -21, 34,
|
||||
/* 80 */ -21, -21,
|
||||
};
|
||||
static const YYACTIONTYPE yy_default[] = {
|
||||
/* 0 */ 84, 127, 83, 85, 125, 126, 124, 86, 127, 85,
|
||||
|
@ -420,7 +418,7 @@ static void yy_destructor(YYCODETYPE yymajor, YYMINORTYPE *yypminor){
|
|||
case 29:
|
||||
#line 50 "bitbakeparser.y"
|
||||
{ (yypminor->yy0).release_this (); }
|
||||
#line 425 "bitbakeparser.c"
|
||||
#line 423 "bitbakeparser.c"
|
||||
break;
|
||||
default: break; /* If no destructor action specified: do nothing */
|
||||
}
|
||||
|
@ -694,7 +692,7 @@ static void yy_reduce(
|
|||
{ yygotominor.yy0.assignString( (char*)yymsp[0].minor.yy0.string() );
|
||||
yymsp[0].minor.yy0.assignString( 0 );
|
||||
yymsp[0].minor.yy0.release_this(); }
|
||||
#line 699 "bitbakeparser.c"
|
||||
#line 697 "bitbakeparser.c"
|
||||
break;
|
||||
case 4:
|
||||
#line 64 "bitbakeparser.y"
|
||||
|
@ -702,7 +700,7 @@ static void yy_reduce(
|
|||
yygotominor.yy0.assignString( (char*)yymsp[0].minor.yy0.string() );
|
||||
yymsp[0].minor.yy0.assignString( 0 );
|
||||
yymsp[0].minor.yy0.release_this(); }
|
||||
#line 707 "bitbakeparser.c"
|
||||
#line 705 "bitbakeparser.c"
|
||||
break;
|
||||
case 5:
|
||||
#line 70 "bitbakeparser.y"
|
||||
|
@ -711,7 +709,7 @@ static void yy_reduce(
|
|||
yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(3,&yymsp[-3].minor);
|
||||
yy_destructor(4,&yymsp[-1].minor);
|
||||
}
|
||||
#line 716 "bitbakeparser.c"
|
||||
#line 714 "bitbakeparser.c"
|
||||
break;
|
||||
case 6:
|
||||
#line 74 "bitbakeparser.y"
|
||||
|
@ -720,7 +718,7 @@ static void yy_reduce(
|
|||
yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(3,&yymsp[-3].minor);
|
||||
yy_destructor(6,&yymsp[-1].minor);
|
||||
}
|
||||
#line 725 "bitbakeparser.c"
|
||||
#line 723 "bitbakeparser.c"
|
||||
break;
|
||||
case 7:
|
||||
#line 78 "bitbakeparser.y"
|
||||
|
@ -729,7 +727,7 @@ static void yy_reduce(
|
|||
yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(3,&yymsp[-3].minor);
|
||||
yy_destructor(7,&yymsp[-1].minor);
|
||||
}
|
||||
#line 734 "bitbakeparser.c"
|
||||
#line 732 "bitbakeparser.c"
|
||||
break;
|
||||
case 8:
|
||||
#line 82 "bitbakeparser.y"
|
||||
|
@ -738,7 +736,7 @@ static void yy_reduce(
|
|||
yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(3,&yymsp[-3].minor);
|
||||
yy_destructor(8,&yymsp[-1].minor);
|
||||
}
|
||||
#line 743 "bitbakeparser.c"
|
||||
#line 741 "bitbakeparser.c"
|
||||
break;
|
||||
case 9:
|
||||
#line 86 "bitbakeparser.y"
|
||||
|
@ -746,56 +744,56 @@ static void yy_reduce(
|
|||
yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(3,&yymsp[-3].minor);
|
||||
yy_destructor(9,&yymsp[-1].minor);
|
||||
}
|
||||
#line 751 "bitbakeparser.c"
|
||||
#line 749 "bitbakeparser.c"
|
||||
break;
|
||||
case 10:
|
||||
#line 90 "bitbakeparser.y"
|
||||
{ e_assign( lex, yymsp[-2].minor.yy0.string(), yymsp[0].minor.yy0.string() );
|
||||
yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(4,&yymsp[-1].minor);
|
||||
}
|
||||
#line 758 "bitbakeparser.c"
|
||||
#line 756 "bitbakeparser.c"
|
||||
break;
|
||||
case 11:
|
||||
#line 93 "bitbakeparser.y"
|
||||
{ e_precat( lex, yymsp[-2].minor.yy0.string(), yymsp[0].minor.yy0.string() );
|
||||
yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(6,&yymsp[-1].minor);
|
||||
}
|
||||
#line 765 "bitbakeparser.c"
|
||||
#line 763 "bitbakeparser.c"
|
||||
break;
|
||||
case 12:
|
||||
#line 96 "bitbakeparser.y"
|
||||
{ e_postcat( lex, yymsp[-2].minor.yy0.string(), yymsp[0].minor.yy0.string() );
|
||||
yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(7,&yymsp[-1].minor);
|
||||
}
|
||||
#line 772 "bitbakeparser.c"
|
||||
#line 770 "bitbakeparser.c"
|
||||
break;
|
||||
case 13:
|
||||
#line 99 "bitbakeparser.y"
|
||||
{ e_prepend( lex, yymsp[-2].minor.yy0.string(), yymsp[0].minor.yy0.string() );
|
||||
yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(10,&yymsp[-1].minor);
|
||||
}
|
||||
#line 779 "bitbakeparser.c"
|
||||
#line 777 "bitbakeparser.c"
|
||||
break;
|
||||
case 14:
|
||||
#line 102 "bitbakeparser.y"
|
||||
{ e_append( lex, yymsp[-2].minor.yy0.string() , yymsp[0].minor.yy0.string() );
|
||||
yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(11,&yymsp[-1].minor);
|
||||
}
|
||||
#line 786 "bitbakeparser.c"
|
||||
#line 784 "bitbakeparser.c"
|
||||
break;
|
||||
case 15:
|
||||
#line 105 "bitbakeparser.y"
|
||||
{ e_immediate( lex, yymsp[-2].minor.yy0.string(), yymsp[0].minor.yy0.string() );
|
||||
yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(8,&yymsp[-1].minor);
|
||||
}
|
||||
#line 793 "bitbakeparser.c"
|
||||
#line 791 "bitbakeparser.c"
|
||||
break;
|
||||
case 16:
|
||||
#line 108 "bitbakeparser.y"
|
||||
{ e_cond( lex, yymsp[-2].minor.yy0.string(), yymsp[0].minor.yy0.string() );
|
||||
yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(9,&yymsp[-1].minor);
|
||||
}
|
||||
#line 800 "bitbakeparser.c"
|
||||
#line 798 "bitbakeparser.c"
|
||||
break;
|
||||
case 17:
|
||||
#line 112 "bitbakeparser.y"
|
||||
|
@ -803,7 +801,7 @@ static void yy_reduce(
|
|||
yymsp[-4].minor.yy0.release_this(); yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(13,&yymsp[-3].minor);
|
||||
yy_destructor(14,&yymsp[-1].minor);
|
||||
}
|
||||
#line 808 "bitbakeparser.c"
|
||||
#line 806 "bitbakeparser.c"
|
||||
break;
|
||||
case 18:
|
||||
#line 115 "bitbakeparser.y"
|
||||
|
@ -811,55 +809,55 @@ static void yy_reduce(
|
|||
yymsp[-4].minor.yy0.release_this(); yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(14,&yymsp[-3].minor);
|
||||
yy_destructor(13,&yymsp[-1].minor);
|
||||
}
|
||||
#line 816 "bitbakeparser.c"
|
||||
#line 814 "bitbakeparser.c"
|
||||
break;
|
||||
case 19:
|
||||
#line 118 "bitbakeparser.y"
|
||||
{ e_addtask( lex, yymsp[0].minor.yy0.string(), NULL, NULL);
|
||||
yymsp[0].minor.yy0.release_this();}
|
||||
#line 822 "bitbakeparser.c"
|
||||
#line 820 "bitbakeparser.c"
|
||||
break;
|
||||
case 20:
|
||||
#line 121 "bitbakeparser.y"
|
||||
{ e_addtask( lex, yymsp[-2].minor.yy0.string(), yymsp[0].minor.yy0.string(), NULL);
|
||||
yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(13,&yymsp[-1].minor);
|
||||
}
|
||||
#line 829 "bitbakeparser.c"
|
||||
#line 827 "bitbakeparser.c"
|
||||
break;
|
||||
case 21:
|
||||
#line 124 "bitbakeparser.y"
|
||||
{ e_addtask( lex, yymsp[-2].minor.yy0.string(), NULL, yymsp[0].minor.yy0.string());
|
||||
yymsp[-2].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); yy_destructor(14,&yymsp[-1].minor);
|
||||
}
|
||||
#line 836 "bitbakeparser.c"
|
||||
#line 834 "bitbakeparser.c"
|
||||
break;
|
||||
case 25:
|
||||
#line 131 "bitbakeparser.y"
|
||||
{ e_addhandler( lex, yymsp[0].minor.yy0.string()); yymsp[0].minor.yy0.release_this (); yy_destructor(16,&yymsp[-1].minor);
|
||||
}
|
||||
#line 842 "bitbakeparser.c"
|
||||
#line 840 "bitbakeparser.c"
|
||||
break;
|
||||
case 26:
|
||||
#line 133 "bitbakeparser.y"
|
||||
{ e_export_func( lex, yymsp[0].minor.yy0.string()); yymsp[0].minor.yy0.release_this(); }
|
||||
#line 847 "bitbakeparser.c"
|
||||
#line 845 "bitbakeparser.c"
|
||||
break;
|
||||
case 30:
|
||||
#line 138 "bitbakeparser.y"
|
||||
{ e_inherit( lex, yymsp[0].minor.yy0.string() ); yymsp[0].minor.yy0.release_this (); }
|
||||
#line 852 "bitbakeparser.c"
|
||||
#line 850 "bitbakeparser.c"
|
||||
break;
|
||||
case 34:
|
||||
#line 144 "bitbakeparser.y"
|
||||
{ e_include( lex, yymsp[0].minor.yy0.string() ); yymsp[0].minor.yy0.release_this(); yy_destructor(21,&yymsp[-1].minor);
|
||||
}
|
||||
#line 858 "bitbakeparser.c"
|
||||
#line 856 "bitbakeparser.c"
|
||||
break;
|
||||
case 35:
|
||||
#line 147 "bitbakeparser.y"
|
||||
{ e_require( lex, yymsp[0].minor.yy0.string() ); yymsp[0].minor.yy0.release_this(); yy_destructor(22,&yymsp[-1].minor);
|
||||
}
|
||||
#line 864 "bitbakeparser.c"
|
||||
#line 862 "bitbakeparser.c"
|
||||
break;
|
||||
case 36:
|
||||
#line 150 "bitbakeparser.y"
|
||||
|
@ -868,12 +866,12 @@ static void yy_reduce(
|
|||
yymsp[-1].minor.yy0.release_this ();
|
||||
yymsp[0].minor.yy0.release_this ();
|
||||
}
|
||||
#line 873 "bitbakeparser.c"
|
||||
#line 871 "bitbakeparser.c"
|
||||
break;
|
||||
case 37:
|
||||
#line 155 "bitbakeparser.y"
|
||||
{ yygotominor.yy0.assignString(0); }
|
||||
#line 878 "bitbakeparser.c"
|
||||
#line 876 "bitbakeparser.c"
|
||||
break;
|
||||
case 38:
|
||||
#line 157 "bitbakeparser.y"
|
||||
|
@ -881,7 +879,7 @@ static void yy_reduce(
|
|||
yymsp[-3].minor.yy0.release_this(); yymsp[-1].minor.yy0.release_this(); yy_destructor(24,&yymsp[-2].minor);
|
||||
yy_destructor(25,&yymsp[0].minor);
|
||||
}
|
||||
#line 886 "bitbakeparser.c"
|
||||
#line 884 "bitbakeparser.c"
|
||||
break;
|
||||
case 39:
|
||||
#line 160 "bitbakeparser.y"
|
||||
|
@ -890,7 +888,7 @@ static void yy_reduce(
|
|||
yy_destructor(24,&yymsp[-2].minor);
|
||||
yy_destructor(25,&yymsp[0].minor);
|
||||
}
|
||||
#line 895 "bitbakeparser.c"
|
||||
#line 893 "bitbakeparser.c"
|
||||
break;
|
||||
case 40:
|
||||
#line 163 "bitbakeparser.y"
|
||||
|
@ -899,7 +897,7 @@ static void yy_reduce(
|
|||
yy_destructor(24,&yymsp[-2].minor);
|
||||
yy_destructor(25,&yymsp[0].minor);
|
||||
}
|
||||
#line 904 "bitbakeparser.c"
|
||||
#line 902 "bitbakeparser.c"
|
||||
break;
|
||||
case 41:
|
||||
#line 167 "bitbakeparser.y"
|
||||
|
@ -908,7 +906,7 @@ static void yy_reduce(
|
|||
yy_destructor(24,&yymsp[-2].minor);
|
||||
yy_destructor(25,&yymsp[0].minor);
|
||||
}
|
||||
#line 913 "bitbakeparser.c"
|
||||
#line 911 "bitbakeparser.c"
|
||||
break;
|
||||
case 42:
|
||||
#line 171 "bitbakeparser.y"
|
||||
|
@ -916,18 +914,18 @@ static void yy_reduce(
|
|||
yygotominor.yy0.assignString( token_t::concatString(yymsp[-1].minor.yy0.string(), yymsp[0].minor.yy0.string()) );
|
||||
yymsp[-1].minor.yy0.release_this (); yymsp[0].minor.yy0.release_this ();
|
||||
}
|
||||
#line 921 "bitbakeparser.c"
|
||||
#line 919 "bitbakeparser.c"
|
||||
break;
|
||||
case 43:
|
||||
#line 175 "bitbakeparser.y"
|
||||
{ yygotominor.yy0.assignString( 0 ); }
|
||||
#line 926 "bitbakeparser.c"
|
||||
#line 924 "bitbakeparser.c"
|
||||
break;
|
||||
case 44:
|
||||
#line 177 "bitbakeparser.y"
|
||||
{ e_def( lex, yymsp[-2].minor.yy0.string(), yymsp[-1].minor.yy0.string(), yymsp[0].minor.yy0.string());
|
||||
yymsp[-2].minor.yy0.release_this(); yymsp[-1].minor.yy0.release_this(); yymsp[0].minor.yy0.release_this(); }
|
||||
#line 932 "bitbakeparser.c"
|
||||
#line 930 "bitbakeparser.c"
|
||||
break;
|
||||
};
|
||||
yygoto = yyRuleInfo[yyruleno].lhs;
|
||||
|
@ -986,7 +984,7 @@ static void yy_syntax_error(
|
|||
#define TOKEN (yyminor.yy0)
|
||||
#line 52 "bitbakeparser.y"
|
||||
e_parse_error( lex );
|
||||
#line 992 "bitbakeparser.c"
|
||||
#line 990 "bitbakeparser.c"
|
||||
bbparseARG_STORE; /* Suppress warning about unused %extra_argument variable */
|
||||
}
|
||||
|
||||
|
@ -1042,7 +1040,7 @@ void bbparse(
|
|||
/* (re)initialize the parser, if necessary */
|
||||
yypParser = (yyParser*)yyp;
|
||||
if( yypParser->yyidx<0 ){
|
||||
/* if( yymajor==0 ) return; // not sure why this was here... */
|
||||
if( yymajor==0 ) return;
|
||||
yypParser->yyidx = 0;
|
||||
yypParser->yyerrcnt = -1;
|
||||
yypParser->yystack[0].stateno = 0;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#define FLEX_SCANNER
|
||||
#define YY_FLEX_MAJOR_VERSION 2
|
||||
#define YY_FLEX_MINOR_VERSION 5
|
||||
#define YY_FLEX_SUBMINOR_VERSION 31
|
||||
#define YY_FLEX_SUBMINOR_VERSION 33
|
||||
#if YY_FLEX_SUBMINOR_VERSION > 0
|
||||
#define FLEX_BETA
|
||||
#endif
|
||||
|
@ -30,7 +30,15 @@
|
|||
|
||||
/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
|
||||
|
||||
#if defined __STDC_VERSION__ && __STDC_VERSION__ >= 199901L
|
||||
#if __STDC_VERSION__ >= 199901L
|
||||
|
||||
/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
|
||||
* if you want the limit (max/min) macros for int types.
|
||||
*/
|
||||
#ifndef __STDC_LIMIT_MACROS
|
||||
#define __STDC_LIMIT_MACROS 1
|
||||
#endif
|
||||
|
||||
#include <inttypes.h>
|
||||
typedef int8_t flex_int8_t;
|
||||
typedef uint8_t flex_uint8_t;
|
||||
|
@ -153,6 +161,10 @@ int yylex_init (yyscan_t* scanner);
|
|||
#define YY_BUF_SIZE 16384
|
||||
#endif
|
||||
|
||||
/* The state buf must be large enough to hold one state per character in the main buffer.
|
||||
*/
|
||||
#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))
|
||||
|
||||
#ifndef YY_TYPEDEF_YY_BUFFER_STATE
|
||||
#define YY_TYPEDEF_YY_BUFFER_STATE
|
||||
typedef struct yy_buffer_state *YY_BUFFER_STATE;
|
||||
|
@ -493,7 +505,7 @@ static yyconst flex_int32_t yy_ec[256] =
|
|||
static yyconst flex_int32_t yy_meta[59] =
|
||||
{ 0,
|
||||
1, 1, 2, 3, 1, 1, 4, 1, 1, 1,
|
||||
5, 6, 5, 5, 7, 8, 1, 7, 1, 9,
|
||||
5, 6, 5, 5, 5, 7, 1, 8, 1, 9,
|
||||
9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
||||
9, 9, 10, 1, 11, 9, 9, 9, 9, 9,
|
||||
9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
||||
|
@ -565,18 +577,18 @@ static yyconst flex_int16_t yy_base[847] =
|
|||
2077, 2072, 2066, 2069, 2056, 2067, 1398, 1343, 1408, 1404,
|
||||
643, 1409, 2071, 2066, 2060, 2063, 2050, 2061, 2065, 2060,
|
||||
2054, 2057, 2044, 2055, 1420, 1445, 1413, 1447, 1453, 1454,
|
||||
2059, 2054, 2047, 2050, 2035, 2043, 1455, 1459, 1460, 1461,
|
||||
2059, 2053, 2047, 2049, 2032, 2043, 1455, 1459, 1460, 1461,
|
||||
1462, 1463, 1471, 1436, 1430, 1192, 1433, 1479, 1482, 1492,
|
||||
|
||||
1506, 1519, 1520, 1528, 2047, 2040, 2031, 0, 2034, 2019,
|
||||
2027, 1486, 1496, 1505, 1506, 1510, 1516, 1524, 2044, 2018,
|
||||
0, 0, 0, 0, 1281, 1517, 2043, 2042, 2039, 2035,
|
||||
2023, 1994, 2309, 2309, 2309, 2309, 2005, 1981, 0, 0,
|
||||
1506, 1519, 1520, 1528, 2046, 2037, 2031, 0, 2033, 2016,
|
||||
2027, 1486, 1496, 1505, 1506, 1510, 1516, 1524, 2043, 2015,
|
||||
0, 0, 0, 0, 1281, 1517, 2043, 2041, 2036, 2034,
|
||||
2024, 1995, 2309, 2309, 2309, 2309, 2005, 1981, 0, 0,
|
||||
0, 0, 1538, 1528, 1530, 1534, 1537, 1540, 1981, 1957,
|
||||
0, 0, 0, 0, 1557, 1558, 1559, 1560, 1561, 1563,
|
||||
1568, 1547, 1988, 1959, 1954, 1948, 1580, 1581, 1582, 1590,
|
||||
1592, 1594, 1923, 1863, 0, 0, 0, 0, 1598, 1599,
|
||||
1600, 1874, 1858, 1350, 1584, 1803, 1792, 1801, 1790, 1603,
|
||||
1568, 1547, 1988, 1959, 1955, 1948, 1580, 1581, 1582, 1590,
|
||||
1592, 1594, 1924, 1863, 0, 0, 0, 0, 1598, 1599,
|
||||
1600, 1875, 1859, 1350, 1584, 1803, 1792, 1801, 1790, 1603,
|
||||
1601, 1799, 1788, 1604, 1602, 1610, 1609, 1643, 1644, 1797,
|
||||
|
||||
1786, 1611, 1630, 1800, 1773, 1010, 1606, 1798, 1771, 1795,
|
||||
|
@ -593,8 +605,8 @@ static yyconst flex_int16_t yy_base[847] =
|
|||
1768, 0, 742, 2309, 0, 1764, 0, 1778, 678, 1801,
|
||||
0, 2309, 1835, 1847, 1859, 1871, 1883, 550, 1892, 1898,
|
||||
1907, 1919, 1931, 1939, 1945, 1950, 1956, 1965, 1977, 1989,
|
||||
2001, 2013, 2025, 2033, 2039, 2042, 306, 304, 301, 2049,
|
||||
213, 2057, 136, 2065, 2073, 2081
|
||||
2001, 2013, 2025, 2033, 2039, 2043, 306, 304, 301, 2050,
|
||||
213, 2058, 136, 2066, 2074, 2082
|
||||
} ;
|
||||
|
||||
static yyconst flex_int16_t yy_def[847] =
|
||||
|
@ -903,14 +915,14 @@ static yyconst flex_int16_t yy_nxt[2368] =
|
|||
112, 112, 112, 112, 112, 112, 112, 112, 112, 112,
|
||||
112, 128, 128, 128, 128, 128, 128, 128, 128, 128,
|
||||
128, 128, 128, 155, 155, 155, 155, 155, 155, 155,
|
||||
155, 155, 155, 155, 155, 167, 167, 167, 705, 167,
|
||||
155, 155, 155, 155, 155, 167, 167, 167, 167, 705,
|
||||
|
||||
167, 167, 177, 177, 704, 177, 177, 183, 701, 183,
|
||||
167, 167, 177, 177, 177, 704, 177, 183, 701, 183,
|
||||
183, 183, 183, 183, 183, 183, 183, 183, 183, 187,
|
||||
187, 187, 187, 187, 187, 187, 187, 187, 187, 187,
|
||||
187, 201, 201, 201, 201, 201, 201, 201, 201, 201,
|
||||
201, 201, 201, 209, 209, 700, 209, 209, 217, 217,
|
||||
238, 217, 217, 217, 223, 223, 238, 223, 223, 231,
|
||||
201, 201, 201, 209, 209, 209, 700, 209, 217, 217,
|
||||
238, 217, 217, 217, 223, 223, 223, 238, 223, 231,
|
||||
231, 238, 231, 231, 231, 237, 237, 237, 237, 237,
|
||||
237, 237, 237, 237, 237, 237, 237, 239, 239, 239,
|
||||
239, 239, 239, 239, 239, 239, 239, 239, 239, 256,
|
||||
|
@ -919,13 +931,13 @@ static yyconst flex_int16_t yy_nxt[2368] =
|
|||
256, 261, 693, 692, 261, 261, 261, 261, 261, 261,
|
||||
261, 261, 261, 264, 264, 264, 264, 264, 264, 264,
|
||||
264, 264, 264, 264, 264, 267, 689, 688, 267, 267,
|
||||
267, 267, 267, 267, 267, 267, 267, 284, 284, 687,
|
||||
284, 284, 292, 292, 292, 686, 292, 292, 292, 296,
|
||||
296, 184, 296, 418, 418, 184, 418, 418, 184, 184,
|
||||
418, 433, 433, 683, 433, 433, 682, 678, 433, 465,
|
||||
465, 677, 465, 465, 676, 675, 465, 500, 500, 674,
|
||||
500, 500, 673, 654, 500, 514, 514, 653, 514, 514,
|
||||
652, 651, 514, 650, 649, 642, 641, 640, 639, 638,
|
||||
267, 267, 267, 267, 267, 267, 267, 284, 284, 284,
|
||||
687, 284, 292, 292, 292, 292, 686, 292, 292, 296,
|
||||
184, 296, 184, 296, 418, 418, 418, 184, 418, 184,
|
||||
683, 418, 433, 433, 433, 682, 433, 678, 677, 433,
|
||||
465, 465, 465, 676, 465, 675, 674, 465, 500, 500,
|
||||
500, 673, 500, 654, 653, 500, 514, 514, 514, 652,
|
||||
514, 651, 650, 514, 649, 642, 641, 640, 639, 638,
|
||||
|
||||
637, 636, 635, 634, 633, 632, 631, 624, 623, 622,
|
||||
621, 620, 619, 611, 610, 609, 608, 607, 606, 605,
|
||||
|
@ -1167,14 +1179,14 @@ static yyconst flex_int16_t yy_chk[2368] =
|
|||
815, 815, 815, 815, 815, 815, 815, 815, 815, 815,
|
||||
815, 816, 816, 816, 816, 816, 816, 816, 816, 816,
|
||||
816, 816, 816, 817, 817, 817, 817, 817, 817, 817,
|
||||
817, 817, 817, 817, 817, 819, 819, 819, 683, 819,
|
||||
817, 817, 817, 817, 817, 819, 819, 819, 819, 683,
|
||||
|
||||
819, 819, 820, 820, 682, 820, 820, 821, 674, 821,
|
||||
819, 819, 820, 820, 820, 682, 820, 821, 674, 821,
|
||||
821, 821, 821, 821, 821, 821, 821, 821, 821, 822,
|
||||
822, 822, 822, 822, 822, 822, 822, 822, 822, 822,
|
||||
822, 823, 823, 823, 823, 823, 823, 823, 823, 823,
|
||||
823, 823, 823, 824, 824, 673, 824, 824, 825, 825,
|
||||
666, 825, 825, 825, 826, 826, 665, 826, 826, 827,
|
||||
823, 823, 823, 824, 824, 824, 673, 824, 825, 825,
|
||||
666, 825, 825, 825, 826, 826, 826, 665, 826, 827,
|
||||
827, 664, 827, 827, 827, 828, 828, 828, 828, 828,
|
||||
828, 828, 828, 828, 828, 828, 828, 829, 829, 829,
|
||||
829, 829, 829, 829, 829, 829, 829, 829, 829, 830,
|
||||
|
@ -1183,13 +1195,13 @@ static yyconst flex_int16_t yy_chk[2368] =
|
|||
830, 831, 650, 649, 831, 831, 831, 831, 831, 831,
|
||||
831, 831, 831, 832, 832, 832, 832, 832, 832, 832,
|
||||
832, 832, 832, 832, 832, 833, 638, 637, 833, 833,
|
||||
833, 833, 833, 833, 833, 833, 833, 834, 834, 632,
|
||||
834, 834, 835, 835, 835, 631, 835, 835, 835, 836,
|
||||
836, 630, 836, 840, 840, 629, 840, 840, 628, 627,
|
||||
840, 842, 842, 620, 842, 842, 619, 611, 842, 844,
|
||||
844, 610, 844, 844, 609, 607, 844, 845, 845, 606,
|
||||
845, 845, 605, 586, 845, 846, 846, 585, 846, 846,
|
||||
584, 583, 846, 582, 581, 574, 573, 572, 571, 570,
|
||||
833, 833, 833, 833, 833, 833, 833, 834, 834, 834,
|
||||
632, 834, 835, 835, 835, 835, 631, 835, 835, 836,
|
||||
630, 836, 629, 836, 840, 840, 840, 628, 840, 627,
|
||||
620, 840, 842, 842, 842, 619, 842, 611, 610, 842,
|
||||
844, 844, 844, 609, 844, 607, 606, 844, 845, 845,
|
||||
845, 605, 845, 586, 585, 845, 846, 846, 846, 584,
|
||||
846, 583, 582, 846, 581, 574, 573, 572, 571, 570,
|
||||
|
||||
569, 568, 567, 566, 565, 564, 563, 556, 555, 554,
|
||||
553, 552, 551, 541, 540, 539, 538, 536, 535, 534,
|
||||
|
@ -1323,7 +1335,7 @@ int errorParse;
|
|||
enum {
|
||||
errorNone = 0,
|
||||
errorUnexpectedInput,
|
||||
errorUnsupportedFeature,
|
||||
errorUnsupportedFeature,
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1351,7 +1363,7 @@ static const char* fixup_escapes (const char* sz);
|
|||
|
||||
|
||||
|
||||
#line 1355 "<stdout>"
|
||||
#line 1367 "<stdout>"
|
||||
|
||||
#define INITIAL 0
|
||||
#define S_DEF 1
|
||||
|
@ -1587,11 +1599,11 @@ YY_DECL
|
|||
#line 164 "bitbakescanner.l"
|
||||
|
||||
|
||||
#line 1591 "<stdout>"
|
||||
#line 1603 "<stdout>"
|
||||
|
||||
if ( yyg->yy_init )
|
||||
if ( !yyg->yy_init )
|
||||
{
|
||||
yyg->yy_init = 0;
|
||||
yyg->yy_init = 1;
|
||||
|
||||
#ifdef YY_USER_INIT
|
||||
YY_USER_INIT;
|
||||
|
@ -1972,7 +1984,7 @@ YY_RULE_SETUP
|
|||
#line 254 "bitbakescanner.l"
|
||||
ECHO;
|
||||
YY_BREAK
|
||||
#line 1976 "<stdout>"
|
||||
#line 1988 "<stdout>"
|
||||
|
||||
case YY_END_OF_BUFFER:
|
||||
{
|
||||
|
@ -2274,7 +2286,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
|
|||
static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state , yyscan_t yyscanner)
|
||||
{
|
||||
register int yy_is_jam;
|
||||
struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
|
||||
struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; /* This var may be unused depending upon options. */
|
||||
register char *yy_cp = yyg->yy_c_buf_p;
|
||||
|
||||
register YY_CHAR yy_c = 1;
|
||||
|
@ -2730,10 +2742,10 @@ YY_BUFFER_STATE yy_scan_buffer (char * base, yy_size_t size , yyscan_t yyscann
|
|||
* @note If you want to scan bytes that may contain NUL values, then use
|
||||
* yy_scan_bytes() instead.
|
||||
*/
|
||||
YY_BUFFER_STATE yy_scan_string (yyconst char * yy_str , yyscan_t yyscanner)
|
||||
YY_BUFFER_STATE yy_scan_string (yyconst char * yystr , yyscan_t yyscanner)
|
||||
{
|
||||
|
||||
return yy_scan_bytes(yy_str,strlen(yy_str) ,yyscanner);
|
||||
return yy_scan_bytes(yystr,strlen(yystr) ,yyscanner);
|
||||
}
|
||||
|
||||
/** Setup the input buffer state to scan the given bytes. The next call to yylex() will
|
||||
|
@ -2743,7 +2755,7 @@ YY_BUFFER_STATE yy_scan_string (yyconst char * yy_str , yyscan_t yyscanner)
|
|||
* @param yyscanner The scanner object.
|
||||
* @return the newly allocated buffer state object.
|
||||
*/
|
||||
YY_BUFFER_STATE yy_scan_bytes (yyconst char * bytes, int len , yyscan_t yyscanner)
|
||||
YY_BUFFER_STATE yy_scan_bytes (yyconst char * yybytes, int _yybytes_len , yyscan_t yyscanner)
|
||||
{
|
||||
YY_BUFFER_STATE b;
|
||||
char *buf;
|
||||
|
@ -2751,15 +2763,15 @@ YY_BUFFER_STATE yy_scan_bytes (yyconst char * bytes, int len , yyscan_t yyscan
|
|||
int i;
|
||||
|
||||
/* Get memory for full buffer, including space for trailing EOB's. */
|
||||
n = len + 2;
|
||||
n = _yybytes_len + 2;
|
||||
buf = (char *) yyalloc(n ,yyscanner );
|
||||
if ( ! buf )
|
||||
YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" );
|
||||
|
||||
for ( i = 0; i < len; ++i )
|
||||
buf[i] = bytes[i];
|
||||
for ( i = 0; i < _yybytes_len; ++i )
|
||||
buf[i] = yybytes[i];
|
||||
|
||||
buf[len] = buf[len+1] = YY_END_OF_BUFFER_CHAR;
|
||||
buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;
|
||||
|
||||
b = yy_scan_buffer(buf,n ,yyscanner);
|
||||
if ( ! b )
|
||||
|
@ -2987,37 +2999,6 @@ void yyset_debug (int bdebug , yyscan_t yyscanner)
|
|||
|
||||
/* Accessor methods for yylval and yylloc */
|
||||
|
||||
static int yy_init_globals (yyscan_t yyscanner)
|
||||
{
|
||||
struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
|
||||
/* Initialization is the same as for the non-reentrant scanner.
|
||||
This function is called once per scanner lifetime. */
|
||||
|
||||
yyg->yy_buffer_stack = 0;
|
||||
yyg->yy_buffer_stack_top = 0;
|
||||
yyg->yy_buffer_stack_max = 0;
|
||||
yyg->yy_c_buf_p = (char *) 0;
|
||||
yyg->yy_init = 1;
|
||||
yyg->yy_start = 0;
|
||||
yyg->yy_start_stack_ptr = 0;
|
||||
yyg->yy_start_stack_depth = 0;
|
||||
yyg->yy_start_stack = (int *) 0;
|
||||
|
||||
/* Defined in main.c */
|
||||
#ifdef YY_STDINIT
|
||||
yyin = stdin;
|
||||
yyout = stdout;
|
||||
#else
|
||||
yyin = (FILE *) 0;
|
||||
yyout = (FILE *) 0;
|
||||
#endif
|
||||
|
||||
/* For future reference: Set errno on error, since we are called by
|
||||
* yylex_init()
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* User-visible API */
|
||||
|
||||
/* yylex_init is special because it creates the scanner itself, so it is
|
||||
|
@ -3040,11 +3021,45 @@ int yylex_init(yyscan_t* ptr_yy_globals)
|
|||
return 1;
|
||||
}
|
||||
|
||||
memset(*ptr_yy_globals,0,sizeof(struct yyguts_t));
|
||||
/* By setting to 0xAA, we expose bugs in yy_init_globals. Leave at 0x00 for releases. */
|
||||
memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t));
|
||||
|
||||
return yy_init_globals ( *ptr_yy_globals );
|
||||
}
|
||||
|
||||
static int yy_init_globals (yyscan_t yyscanner)
|
||||
{
|
||||
struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
|
||||
/* Initialization is the same as for the non-reentrant scanner.
|
||||
* This function is called from yylex_destroy(), so don't allocate here.
|
||||
*/
|
||||
|
||||
yyg->yy_buffer_stack = 0;
|
||||
yyg->yy_buffer_stack_top = 0;
|
||||
yyg->yy_buffer_stack_max = 0;
|
||||
yyg->yy_c_buf_p = (char *) 0;
|
||||
yyg->yy_init = 0;
|
||||
yyg->yy_start = 0;
|
||||
|
||||
yyg->yy_start_stack_ptr = 0;
|
||||
yyg->yy_start_stack_depth = 0;
|
||||
yyg->yy_start_stack = NULL;
|
||||
|
||||
/* Defined in main.c */
|
||||
#ifdef YY_STDINIT
|
||||
yyin = stdin;
|
||||
yyout = stdout;
|
||||
#else
|
||||
yyin = (FILE *) 0;
|
||||
yyout = (FILE *) 0;
|
||||
#endif
|
||||
|
||||
/* For future reference: Set errno on error, since we are called by
|
||||
* yylex_init()
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* yylex_destroy is for both reentrant and non-reentrant scanners. */
|
||||
int yylex_destroy (yyscan_t yyscanner)
|
||||
{
|
||||
|
@ -3065,8 +3080,13 @@ int yylex_destroy (yyscan_t yyscanner)
|
|||
yyfree(yyg->yy_start_stack ,yyscanner );
|
||||
yyg->yy_start_stack = NULL;
|
||||
|
||||
/* Reset the globals. This is important in a non-reentrant scanner so the next time
|
||||
* yylex() is called, initialization will occur. */
|
||||
yy_init_globals( yyscanner);
|
||||
|
||||
/* Destroy the main struct (reentrant only). */
|
||||
yyfree ( yyscanner , yyscanner );
|
||||
yyscanner = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3078,7 +3098,6 @@ int yylex_destroy (yyscan_t yyscanner)
|
|||
static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner)
|
||||
{
|
||||
register int i;
|
||||
struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
|
||||
for ( i = 0; i < n; ++i )
|
||||
s1[i] = s2[i];
|
||||
}
|
||||
|
@ -3088,7 +3107,6 @@ static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yysca
|
|||
static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner)
|
||||
{
|
||||
register int n;
|
||||
struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
|
||||
for ( n = 0; s[n]; ++n )
|
||||
;
|
||||
|
||||
|
@ -3120,18 +3138,6 @@ void yyfree (void * ptr , yyscan_t yyscanner)
|
|||
|
||||
#define YYTABLES_NAME "yytables"
|
||||
|
||||
#undef YY_NEW_FILE
|
||||
#undef YY_FLUSH_BUFFER
|
||||
#undef yy_set_bol
|
||||
#undef yy_new_buffer
|
||||
#undef yy_set_interactive
|
||||
#undef yytext_ptr
|
||||
#undef YY_DO_BEFORE_ACTION
|
||||
|
||||
#ifdef YY_DECL_IS_OURS
|
||||
#undef YY_DECL_IS_OURS
|
||||
#undef YY_DECL
|
||||
#endif
|
||||
#line 254 "bitbakescanner.l"
|
||||
|
||||
|
||||
|
@ -3148,47 +3154,49 @@ void lex_t::accept (int token, const char* sz)
|
|||
|
||||
void lex_t::input (char *buf, int *result, int max_size)
|
||||
{
|
||||
printf("lex_t::input %p %d\n", buf, max_size);
|
||||
/* printf("lex_t::input %p %d\n", buf, max_size); */
|
||||
*result = fread(buf, 1, max_size, file);
|
||||
printf("lex_t::input result %d\n", *result);
|
||||
/* printf("lex_t::input result %d\n", *result); */
|
||||
}
|
||||
|
||||
int lex_t::line ()const
|
||||
{
|
||||
printf("lex_t::line\n");
|
||||
/* printf("lex_t::line\n"); */
|
||||
return yyget_lineno (scanner);
|
||||
}
|
||||
|
||||
|
||||
extern "C" {
|
||||
|
||||
void parse (FILE* file, PyObject* data)
|
||||
void parse (FILE* file, char* name, PyObject* data, int config)
|
||||
{
|
||||
printf("parse bbparseAlloc\n");
|
||||
/* printf("parse bbparseAlloc\n"); */
|
||||
void* parser = bbparseAlloc (malloc);
|
||||
yyscan_t scanner;
|
||||
lex_t lex;
|
||||
|
||||
printf("parse yylex_init\n");
|
||||
/* printf("parse yylex_init\n"); */
|
||||
yylex_init (&scanner);
|
||||
|
||||
lex.parser = parser;
|
||||
lex.scanner = scanner;
|
||||
lex.file = file;
|
||||
lex.name = name;
|
||||
lex.data = data;
|
||||
lex.config = config;
|
||||
lex.parse = bbparse;
|
||||
printf("parse yyset_extra\n");
|
||||
/*printf("parse yyset_extra\n"); */
|
||||
yyset_extra (&lex, scanner);
|
||||
|
||||
printf("parse yylex\n");
|
||||
/* printf("parse yylex\n"); */
|
||||
int result = yylex (scanner);
|
||||
|
||||
printf("parse result %d\n", result);
|
||||
|
||||
/* printf("parse result %d\n", result); */
|
||||
|
||||
lex.accept (0);
|
||||
printf("parse lex.accept\n");
|
||||
/* printf("parse lex.accept\n"); */
|
||||
bbparseTrace (NULL, NULL);
|
||||
printf("parse bbparseTrace\n");
|
||||
/* printf("parse bbparseTrace\n"); */
|
||||
|
||||
if (result != T_EOF)
|
||||
printf ("premature end of file\n");
|
||||
|
|
|
@ -91,7 +91,7 @@ int errorParse;
|
|||
enum {
|
||||
errorNone = 0,
|
||||
errorUnexpectedInput,
|
||||
errorUnsupportedFeature,
|
||||
errorUnsupportedFeature,
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ SSTRING \'([^\n\r]|"\\\n")*\'
|
|||
VALUE ([^'" \t\n])|([^'" \t\n]([^\n]|(\\\n))*[^'" \t\n])
|
||||
|
||||
C_SS [a-zA-Z_]
|
||||
C_SB [a-zA-Z0-9_+-.]
|
||||
C_SB [a-zA-Z0-9_+-./]
|
||||
REF $\{{C_SS}{C_SB}*\}
|
||||
SYMBOL {C_SS}{C_SB}*
|
||||
VARIABLE $?{C_SS}({C_SB}*|{REF})*(\[[a-zA-Z0-9_]*\])?
|
||||
|
@ -265,47 +265,49 @@ void lex_t::accept (int token, const char* sz)
|
|||
|
||||
void lex_t::input (char *buf, int *result, int max_size)
|
||||
{
|
||||
printf("lex_t::input %p %d\n", buf, max_size);
|
||||
/* printf("lex_t::input %p %d\n", buf, max_size); */
|
||||
*result = fread(buf, 1, max_size, file);
|
||||
printf("lex_t::input result %d\n", *result);
|
||||
/* printf("lex_t::input result %d\n", *result); */
|
||||
}
|
||||
|
||||
int lex_t::line ()const
|
||||
{
|
||||
printf("lex_t::line\n");
|
||||
/* printf("lex_t::line\n"); */
|
||||
return yyget_lineno (scanner);
|
||||
}
|
||||
|
||||
|
||||
extern "C" {
|
||||
|
||||
void parse (FILE* file, PyObject* data)
|
||||
void parse (FILE* file, char* name, PyObject* data, int config)
|
||||
{
|
||||
printf("parse bbparseAlloc\n");
|
||||
/* printf("parse bbparseAlloc\n"); */
|
||||
void* parser = bbparseAlloc (malloc);
|
||||
yyscan_t scanner;
|
||||
lex_t lex;
|
||||
|
||||
printf("parse yylex_init\n");
|
||||
/* printf("parse yylex_init\n"); */
|
||||
yylex_init (&scanner);
|
||||
|
||||
lex.parser = parser;
|
||||
lex.scanner = scanner;
|
||||
lex.file = file;
|
||||
lex.name = name;
|
||||
lex.data = data;
|
||||
lex.config = config;
|
||||
lex.parse = bbparse;
|
||||
printf("parse yyset_extra\n");
|
||||
/*printf("parse yyset_extra\n"); */
|
||||
yyset_extra (&lex, scanner);
|
||||
|
||||
printf("parse yylex\n");
|
||||
/* printf("parse yylex\n"); */
|
||||
int result = yylex (scanner);
|
||||
|
||||
printf("parse result %d\n", result);
|
||||
|
||||
/* printf("parse result %d\n", result); */
|
||||
|
||||
lex.accept (0);
|
||||
printf("parse lex.accept\n");
|
||||
/* printf("parse lex.accept\n"); */
|
||||
bbparseTrace (NULL, NULL);
|
||||
printf("parse bbparseTrace\n");
|
||||
/* printf("parse bbparseTrace\n"); */
|
||||
|
||||
if (result != T_EOF)
|
||||
printf ("premature end of file\n");
|
||||
|
|
|
@ -27,13 +27,15 @@ THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "Python.h"
|
||||
|
||||
extern "C" {
|
||||
|
||||
|
||||
struct lex_t {
|
||||
void* parser;
|
||||
void* scanner;
|
||||
FILE* file;
|
||||
FILE* file;
|
||||
char *name;
|
||||
PyObject *data;
|
||||
|
||||
int config;
|
||||
|
||||
void* (*parse)(void*, int, token_t, lex_t*);
|
||||
|
||||
void accept(int token, const char* sz = NULL);
|
||||
|
|
|
@ -11,7 +11,9 @@ typedef struct {
|
|||
void *parser;
|
||||
void *scanner;
|
||||
FILE *file;
|
||||
char *name;
|
||||
PyObject *data;
|
||||
int config;
|
||||
} lex_t;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
import re, bb, os, sys, time
|
||||
import bb.fetch, bb.build, bb.utils
|
||||
from bb import debug, data, fetch, fatal, methodpool
|
||||
from bb import data, fetch, methodpool
|
||||
|
||||
from ConfHandler import include, localpath, obtain, init
|
||||
from bb.parse import ParseError
|
||||
|
@ -44,6 +44,13 @@ __bbpath_found__ = 0
|
|||
__classname__ = ""
|
||||
classes = [ None, ]
|
||||
|
||||
# We need to indicate EOF to the feeder. This code is so messy that
|
||||
# factoring it out to a close_parse_file method is out of question.
|
||||
# We will use the IN_PYTHON_EOF as an indicator to just close the method
|
||||
#
|
||||
# The two parts using it are tightly integrated anyway
|
||||
IN_PYTHON_EOF = -9999999999999
|
||||
|
||||
__parsed_methods__ = methodpool.get_parsed_dict()
|
||||
|
||||
def supports(fn, d):
|
||||
|
@ -60,9 +67,9 @@ def inherit(files, d):
|
|||
file = os.path.join('classes', '%s.bbclass' % file)
|
||||
|
||||
if not file in __inherit_cache.split():
|
||||
debug(2, "BB %s:%d: inheriting %s" % (fn, lineno, file))
|
||||
bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s:%d: inheriting %s" % (fn, lineno, file))
|
||||
__inherit_cache += " %s" % file
|
||||
include(fn, file, d)
|
||||
include(fn, file, d, "inherit")
|
||||
data.setVar('__inherit_cache', __inherit_cache, d)
|
||||
|
||||
|
||||
|
@ -75,9 +82,9 @@ def handle(fn, d, include = 0):
|
|||
__residue__ = []
|
||||
|
||||
if include == 0:
|
||||
debug(2, "BB " + fn + ": handle(data)")
|
||||
bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data)")
|
||||
else:
|
||||
debug(2, "BB " + fn + ": handle(data, include)")
|
||||
bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data, include)")
|
||||
|
||||
(root, ext) = os.path.splitext(os.path.basename(fn))
|
||||
base_name = "%s%s" % (root,ext)
|
||||
|
@ -132,7 +139,7 @@ def handle(fn, d, include = 0):
|
|||
feeder(lineno, s, fn, base_name, d)
|
||||
if __inpython__:
|
||||
# add a blank line to close out any python definition
|
||||
feeder(lineno + 1, "", fn, base_name, d)
|
||||
feeder(IN_PYTHON_EOF, "", fn, base_name, d)
|
||||
if ext == ".bbclass":
|
||||
classes.remove(__classname__)
|
||||
else:
|
||||
|
@ -152,7 +159,7 @@ def handle(fn, d, include = 0):
|
|||
if t:
|
||||
data.setVar('T', t, d)
|
||||
except Exception, e:
|
||||
bb.debug(1, "executing anonymous function: %s" % e)
|
||||
bb.msg.debug(1, bb.msg.domain.Parsing, "executing anonymous function: %s" % e)
|
||||
raise
|
||||
data.delVar("__anonqueue", d)
|
||||
data.delVar("__anonfunc", d)
|
||||
|
@ -220,7 +227,7 @@ def feeder(lineno, s, fn, root, d):
|
|||
|
||||
if __inpython__:
|
||||
m = __python_func_regexp__.match(s)
|
||||
if m:
|
||||
if m and lineno != IN_PYTHON_EOF:
|
||||
__body__.append(s)
|
||||
return
|
||||
else:
|
||||
|
@ -240,6 +247,9 @@ def feeder(lineno, s, fn, root, d):
|
|||
__body__ = []
|
||||
__inpython__ = False
|
||||
|
||||
if lineno == IN_PYTHON_EOF:
|
||||
return
|
||||
|
||||
# fall through
|
||||
|
||||
if s == '' or s[0] == '#': return # skip comments and empty lines
|
||||
|
@ -374,7 +384,7 @@ def vars_from_file(mypkg, d):
|
|||
def set_additional_vars(file, d, include):
|
||||
"""Deduce rest of variables, e.g. ${A} out of ${SRC_URI}"""
|
||||
|
||||
debug(2,"BB %s: set_additional_vars" % file)
|
||||
bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s: set_additional_vars" % file)
|
||||
|
||||
src_uri = data.getVar('SRC_URI', d)
|
||||
if not src_uri:
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
Place, Suite 330, Boston, MA 02111-1307 USA."""
|
||||
|
||||
import re, bb.data, os, sys
|
||||
from bb import debug, fatal
|
||||
from bb.parse import ParseError
|
||||
|
||||
#__config_regexp__ = re.compile( r"(?P<exp>export\s*)?(?P<var>[a-zA-Z0-9\-_+.${}]+)\s*(?P<colon>:)?(?P<ques>\?)?=\s*(?P<apo>['\"]?)(?P<value>.*)(?P=apo)$")
|
||||
|
@ -53,7 +52,7 @@ def localpath(fn, d):
|
|||
localfn = fn
|
||||
return localfn
|
||||
|
||||
def obtain(fn, data = bb.data.init()):
|
||||
def obtain(fn, data):
|
||||
import sys, bb
|
||||
fn = bb.data.expand(fn, data)
|
||||
localfn = bb.data.expand(localpath(fn, data), data)
|
||||
|
@ -61,30 +60,30 @@ def obtain(fn, data = bb.data.init()):
|
|||
if localfn != fn:
|
||||
dldir = bb.data.getVar('DL_DIR', data, 1)
|
||||
if not dldir:
|
||||
debug(1, "obtain: DL_DIR not defined")
|
||||
bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: DL_DIR not defined")
|
||||
return localfn
|
||||
bb.mkdirhier(dldir)
|
||||
try:
|
||||
bb.fetch.init([fn])
|
||||
except bb.fetch.NoMethodError:
|
||||
(type, value, traceback) = sys.exc_info()
|
||||
debug(1, "obtain: no method: %s" % value)
|
||||
bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: no method: %s" % value)
|
||||
return localfn
|
||||
|
||||
try:
|
||||
bb.fetch.go(data)
|
||||
except bb.fetch.MissingParameterError:
|
||||
(type, value, traceback) = sys.exc_info()
|
||||
debug(1, "obtain: missing parameters: %s" % value)
|
||||
bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: missing parameters: %s" % value)
|
||||
return localfn
|
||||
except bb.fetch.FetchError:
|
||||
(type, value, traceback) = sys.exc_info()
|
||||
debug(1, "obtain: failed: %s" % value)
|
||||
bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: failed: %s" % value)
|
||||
return localfn
|
||||
return localfn
|
||||
|
||||
|
||||
def include(oldfn, fn, data = bb.data.init(), error_out = False):
|
||||
def include(oldfn, fn, data, error_out):
|
||||
"""
|
||||
|
||||
error_out If True a ParseError will be reaised if the to be included
|
||||
|
@ -101,10 +100,10 @@ def include(oldfn, fn, data = bb.data.init(), error_out = False):
|
|||
ret = handle(fn, data, True)
|
||||
except IOError:
|
||||
if error_out:
|
||||
raise ParseError("Could not include required file %(fn)s" % vars() )
|
||||
debug(2, "CONF file '%s' not found" % fn)
|
||||
raise ParseError("Could not %(error_out)s file %(fn)s" % vars() )
|
||||
bb.msg.debug(2, bb.msg.domain.Parsing, "CONF file '%s' not found" % fn)
|
||||
|
||||
def handle(fn, data = bb.data.init(), include = 0):
|
||||
def handle(fn, data, include = 0):
|
||||
if include:
|
||||
inc_string = "including"
|
||||
else:
|
||||
|
@ -129,13 +128,13 @@ def handle(fn, data = bb.data.init(), include = 0):
|
|||
if os.access(currname, os.R_OK):
|
||||
f = open(currname, 'r')
|
||||
abs_fn = currname
|
||||
debug(1, "CONF %s %s" % (inc_string, currname))
|
||||
bb.msg.debug(2, bb.msg.domain.Parsing, "CONF %s %s" % (inc_string, currname))
|
||||
break
|
||||
if f is None:
|
||||
raise IOError("file '%s' not found" % fn)
|
||||
else:
|
||||
f = open(fn,'r')
|
||||
debug(1, "CONF %s %s" % (inc_string,fn))
|
||||
bb.msg.debug(1, bb.msg.domain.Parsing, "CONF %s %s" % (inc_string,fn))
|
||||
abs_fn = fn
|
||||
|
||||
if include:
|
||||
|
@ -161,7 +160,7 @@ def handle(fn, data = bb.data.init(), include = 0):
|
|||
bb.data.setVar('FILE', oldfile, data)
|
||||
return data
|
||||
|
||||
def feeder(lineno, s, fn, data = bb.data.init()):
|
||||
def feeder(lineno, s, fn, data):
|
||||
m = __config_regexp__.match(s)
|
||||
if m:
|
||||
groupd = m.groupdict()
|
||||
|
@ -185,7 +184,7 @@ def feeder(lineno, s, fn, data = bb.data.init()):
|
|||
else:
|
||||
val = groupd["value"]
|
||||
if 'flag' in groupd and groupd['flag'] != None:
|
||||
# bb.note("setVarFlag(%s, %s, %s, data)" % (key, groupd['flag'], val))
|
||||
bb.msg.debug(3, bb.msg.domain.Parsing, "setVarFlag(%s, %s, %s, data)" % (key, groupd['flag'], val))
|
||||
bb.data.setVarFlag(key, groupd['flag'], val, data)
|
||||
else:
|
||||
bb.data.setVar(key, val, data)
|
||||
|
@ -194,14 +193,14 @@ def feeder(lineno, s, fn, data = bb.data.init()):
|
|||
m = __include_regexp__.match(s)
|
||||
if m:
|
||||
s = bb.data.expand(m.group(1), data)
|
||||
# debug(2, "CONF %s:%d: including %s" % (fn, lineno, s))
|
||||
include(fn, s, data)
|
||||
bb.msg.debug(3, bb.msg.domain.Parsing, "CONF %s:%d: including %s" % (fn, lineno, s))
|
||||
include(fn, s, data, False)
|
||||
return
|
||||
|
||||
m = __require_regexp__.match(s)
|
||||
if m:
|
||||
s = bb.data.expand(m.group(1), data)
|
||||
include(fn, s, data, True)
|
||||
include(fn, s, data, "include required")
|
||||
return
|
||||
|
||||
raise ParseError("%s:%d: unparsed line: '%s'" % (fn, lineno, s));
|
||||
|
|
|
@ -0,0 +1,209 @@
|
|||
#!/usr/bin/env python
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
# Copyright (C) 2003, 2004 Phil Blundell
|
||||
# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2005 Holger Hans Peter Freyther
|
||||
# Copyright (C) 2005 ROAD GmbH
|
||||
# Copyright (C) 2006 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify it under
|
||||
# the terms of the GNU General Public License as published by the Free Software
|
||||
# Foundation; either version 2 of the License, or (at your option) any later
|
||||
# version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along with
|
||||
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
||||
# Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
|
||||
import os, re
|
||||
from bb import data, utils
|
||||
import bb
|
||||
|
||||
class NoProvider(Exception):
|
||||
"""Exception raised when no provider of a build dependency can be found"""
|
||||
|
||||
class NoRProvider(Exception):
|
||||
"""Exception raised when no provider of a runtime dependency can be found"""
|
||||
|
||||
def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
"""
|
||||
If there is a PREFERRED_VERSION, find the highest-priority bbfile
|
||||
providing that version. If not, find the latest version provided by
|
||||
an bbfile in the highest-priority set.
|
||||
"""
|
||||
if not pkg_pn:
|
||||
pkg_pn = dataCache.pkg_pn
|
||||
|
||||
files = pkg_pn[pn]
|
||||
priorities = {}
|
||||
for f in files:
|
||||
priority = dataCache.bbfile_priority[f]
|
||||
if priority not in priorities:
|
||||
priorities[priority] = []
|
||||
priorities[priority].append(f)
|
||||
p_list = priorities.keys()
|
||||
p_list.sort(lambda a, b: a - b)
|
||||
tmp_pn = []
|
||||
for p in p_list:
|
||||
tmp_pn = [priorities[p]] + tmp_pn
|
||||
|
||||
preferred_file = None
|
||||
|
||||
localdata = data.createCopy(cfgData)
|
||||
bb.data.setVar('OVERRIDES', "%s:%s" % (pn, data.getVar('OVERRIDES', localdata)), localdata)
|
||||
bb.data.update_data(localdata)
|
||||
|
||||
preferred_v = bb.data.getVar('PREFERRED_VERSION_%s' % pn, localdata, True)
|
||||
if preferred_v:
|
||||
m = re.match('(.*)_(.*)', preferred_v)
|
||||
if m:
|
||||
preferred_v = m.group(1)
|
||||
preferred_r = m.group(2)
|
||||
else:
|
||||
preferred_r = None
|
||||
|
||||
for file_set in tmp_pn:
|
||||
for f in file_set:
|
||||
pv,pr = dataCache.pkg_pvpr[f]
|
||||
if preferred_v == pv and (preferred_r == pr or preferred_r == None):
|
||||
preferred_file = f
|
||||
preferred_ver = (pv, pr)
|
||||
break
|
||||
if preferred_file:
|
||||
break;
|
||||
if preferred_r:
|
||||
pv_str = '%s-%s' % (preferred_v, preferred_r)
|
||||
else:
|
||||
pv_str = preferred_v
|
||||
itemstr = ""
|
||||
if item:
|
||||
itemstr = " (for item %s)" % item
|
||||
if preferred_file is None:
|
||||
bb.msg.note(1, bb.msg.domain.Provider, "preferred version %s of %s not available%s" % (pv_str, pn, itemstr))
|
||||
else:
|
||||
bb.msg.debug(1, bb.msg.domain.Provider, "selecting %s as PREFERRED_VERSION %s of package %s%s" % (preferred_file, pv_str, pn, itemstr))
|
||||
|
||||
del localdata
|
||||
|
||||
# get highest priority file set
|
||||
files = tmp_pn[0]
|
||||
latest = None
|
||||
latest_p = 0
|
||||
latest_f = None
|
||||
for file_name in files:
|
||||
pv,pr = dataCache.pkg_pvpr[file_name]
|
||||
dp = dataCache.pkg_dp[file_name]
|
||||
|
||||
if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pv, pr)) < 0)) or (dp > latest_p):
|
||||
latest = (pv, pr)
|
||||
latest_f = file_name
|
||||
latest_p = dp
|
||||
if preferred_file is None:
|
||||
preferred_file = latest_f
|
||||
preferred_ver = latest
|
||||
|
||||
return (latest,latest_f,preferred_ver, preferred_file)
|
||||
|
||||
#
|
||||
# RP - build_cache_fail needs to move elsewhere
|
||||
#
|
||||
def filterProviders(providers, item, cfgData, dataCache, build_cache_fail = {}):
|
||||
"""
|
||||
Take a list of providers and filter/reorder according to the
|
||||
environment variables and previous build results
|
||||
"""
|
||||
eligible = []
|
||||
preferred_versions = {}
|
||||
|
||||
# Collate providers by PN
|
||||
pkg_pn = {}
|
||||
for p in providers:
|
||||
pn = dataCache.pkg_fn[p]
|
||||
if pn not in pkg_pn:
|
||||
pkg_pn[pn] = []
|
||||
pkg_pn[pn].append(p)
|
||||
|
||||
bb.msg.debug(1, bb.msg.domain.Provider, "providers for %s are: %s" % (item, pkg_pn.keys()))
|
||||
|
||||
for pn in pkg_pn.keys():
|
||||
preferred_versions[pn] = bb.providers.findBestProvider(pn, cfgData, dataCache, pkg_pn, item)[2:4]
|
||||
eligible.append(preferred_versions[pn][1])
|
||||
|
||||
|
||||
for p in eligible:
|
||||
if p in build_cache_fail:
|
||||
bb.msg.debug(1, bb.msg.domain.Provider, "rejecting already-failed %s" % p)
|
||||
eligible.remove(p)
|
||||
|
||||
if len(eligible) == 0:
|
||||
bb.msg.error(bb.msg.domain.Provider, "no eligible providers for %s" % item)
|
||||
return 0
|
||||
|
||||
|
||||
# If pn == item, give it a slight default preference
|
||||
# This means PREFERRED_PROVIDER_foobar defaults to foobar if available
|
||||
for p in providers:
|
||||
pn = dataCache.pkg_fn[p]
|
||||
if pn != item:
|
||||
continue
|
||||
(newvers, fn) = preferred_versions[pn]
|
||||
if not fn in eligible:
|
||||
continue
|
||||
eligible.remove(fn)
|
||||
eligible = [fn] + eligible
|
||||
|
||||
# look to see if one of them is already staged, or marked as preferred.
|
||||
# if so, bump it to the head of the queue
|
||||
for p in providers:
|
||||
pn = dataCache.pkg_fn[p]
|
||||
pv, pr = dataCache.pkg_pvpr[p]
|
||||
|
||||
stamp = '%s.do_populate_staging' % dataCache.stamp[p]
|
||||
if os.path.exists(stamp):
|
||||
(newvers, fn) = preferred_versions[pn]
|
||||
if not fn in eligible:
|
||||
# package was made ineligible by already-failed check
|
||||
continue
|
||||
oldver = "%s-%s" % (pv, pr)
|
||||
newver = '-'.join(newvers)
|
||||
if (newver != oldver):
|
||||
extra_chat = "%s (%s) already staged but upgrading to %s to satisfy %s" % (pn, oldver, newver, item)
|
||||
else:
|
||||
extra_chat = "Selecting already-staged %s (%s) to satisfy %s" % (pn, oldver, item)
|
||||
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "%s" % extra_chat)
|
||||
eligible.remove(fn)
|
||||
eligible = [fn] + eligible
|
||||
break
|
||||
|
||||
return eligible
|
||||
|
||||
def getRuntimeProviders(dataCache, rdepend):
|
||||
"""
|
||||
Return any providers of runtime dependency
|
||||
"""
|
||||
rproviders = []
|
||||
|
||||
if rdepend in dataCache.rproviders:
|
||||
rproviders += dataCache.rproviders[rdepend]
|
||||
|
||||
if rdepend in dataCache.packages:
|
||||
rproviders += dataCache.packages[rdepend]
|
||||
|
||||
if rproviders:
|
||||
return rproviders
|
||||
|
||||
# Only search dynamic packages if we can't find anything in other variables
|
||||
for pattern in dataCache.packages_dynamic:
|
||||
regexp = re.compile(pattern)
|
||||
if regexp.match(rdepend):
|
||||
rproviders += dataCache.packages_dynamic[pattern]
|
||||
|
||||
return rproviders
|
|
@ -0,0 +1,491 @@
|
|||
#!/usr/bin/env python
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'RunQueue' implementation
|
||||
|
||||
Handles preparation and execution of a queue of tasks
|
||||
|
||||
Copyright (C) 2006 Richard Purdie
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License version 2 as published by the Free
|
||||
Software Foundation
|
||||
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
"""
|
||||
|
||||
from bb import msg, data, fetch, event, mkdirhier, utils
|
||||
from sets import Set
|
||||
import bb, os, sys
|
||||
|
||||
class TaskFailure(Exception):
|
||||
"""Exception raised when a task in a runqueue fails"""
|
||||
|
||||
def __init__(self, fnid, fn, taskname):
|
||||
self.args = fnid, fn, taskname
|
||||
|
||||
class RunQueue:
|
||||
"""
|
||||
BitBake Run Queue implementation
|
||||
"""
|
||||
def __init__(self):
|
||||
self.reset_runqueue()
|
||||
|
||||
def reset_runqueue(self):
|
||||
self.runq_fnid = []
|
||||
self.runq_task = []
|
||||
self.runq_depends = []
|
||||
self.runq_revdeps = []
|
||||
self.runq_weight = []
|
||||
self.prio_map = []
|
||||
|
||||
def get_user_idstring(self, task, taskData):
|
||||
fn = taskData.fn_index[self.runq_fnid[task]]
|
||||
taskname = self.runq_task[task]
|
||||
return "%s, %s" % (fn, taskname)
|
||||
|
||||
def prepare_runqueue(self, cfgData, dataCache, taskData, targets):
|
||||
"""
|
||||
Turn a set of taskData into a RunQueue and compute data needed
|
||||
to optimise the execution order.
|
||||
targets is list of paired values - a provider name and the task to run
|
||||
"""
|
||||
|
||||
depends = []
|
||||
runq_weight1 = []
|
||||
runq_build = []
|
||||
runq_done = []
|
||||
|
||||
bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing Runqueue")
|
||||
|
||||
for task in range(len(taskData.tasks_name)):
|
||||
fnid = taskData.tasks_fnid[task]
|
||||
fn = taskData.fn_index[fnid]
|
||||
task_deps = dataCache.task_deps[fn]
|
||||
|
||||
if fnid not in taskData.failed_fnids:
|
||||
|
||||
depends = taskData.tasks_tdepends[task]
|
||||
|
||||
# Resolve Depends
|
||||
if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']:
|
||||
taskname = task_deps['deptask'][taskData.tasks_name[task]]
|
||||
for depid in taskData.depids[fnid]:
|
||||
if depid in taskData.build_targets:
|
||||
depdata = taskData.build_targets[depid][0]
|
||||
if depdata:
|
||||
dep = taskData.fn_index[depdata]
|
||||
depends.append(taskData.gettask_id(dep, taskname))
|
||||
|
||||
# Resolve Runtime Depends
|
||||
if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']:
|
||||
taskname = task_deps['rdeptask'][taskData.tasks_name[task]]
|
||||
for depid in taskData.rdepids[fnid]:
|
||||
if depid in taskData.run_targets:
|
||||
depdata = taskData.run_targets[depid][0]
|
||||
if depdata:
|
||||
dep = taskData.fn_index[depdata]
|
||||
depends.append(taskData.gettask_id(dep, taskname))
|
||||
|
||||
def add_recursive_build(depid):
|
||||
"""
|
||||
Add build depends of depid to depends
|
||||
(if we've not see it before)
|
||||
(calls itself recursively)
|
||||
"""
|
||||
if str(depid) in dep_seen:
|
||||
return
|
||||
dep_seen.append(depid)
|
||||
if depid in taskData.build_targets:
|
||||
depdata = taskData.build_targets[depid][0]
|
||||
if depdata:
|
||||
dep = taskData.fn_index[depdata]
|
||||
taskid = taskData.gettask_id(dep, taskname)
|
||||
depends.append(taskid)
|
||||
fnid = taskData.tasks_fnid[taskid]
|
||||
for nextdepid in taskData.depids[fnid]:
|
||||
if nextdepid not in dep_seen:
|
||||
add_recursive_build(nextdepid)
|
||||
for nextdepid in taskData.rdepids[fnid]:
|
||||
if nextdepid not in rdep_seen:
|
||||
add_recursive_run(nextdepid)
|
||||
|
||||
def add_recursive_run(rdepid):
|
||||
"""
|
||||
Add runtime depends of rdepid to depends
|
||||
(if we've not see it before)
|
||||
(calls itself recursively)
|
||||
"""
|
||||
if str(rdepid) in rdep_seen:
|
||||
return
|
||||
rdep_seen.append(rdepid)
|
||||
if rdepid in taskData.run_targets:
|
||||
depdata = taskData.run_targets[rdepid][0]
|
||||
if depdata:
|
||||
dep = taskData.fn_index[depdata]
|
||||
taskid = taskData.gettask_id(dep, taskname)
|
||||
depends.append(taskid)
|
||||
fnid = taskData.tasks_fnid[taskid]
|
||||
for nextdepid in taskData.depids[fnid]:
|
||||
if nextdepid not in dep_seen:
|
||||
add_recursive_build(nextdepid)
|
||||
for nextdepid in taskData.rdepids[fnid]:
|
||||
if nextdepid not in rdep_seen:
|
||||
add_recursive_run(nextdepid)
|
||||
|
||||
|
||||
# Resolve Recursive Runtime Depends
|
||||
# Also includes all Build Depends (and their runtime depends)
|
||||
if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']:
|
||||
dep_seen = []
|
||||
rdep_seen = []
|
||||
taskname = task_deps['recrdeptask'][taskData.tasks_name[task]]
|
||||
for depid in taskData.depids[fnid]:
|
||||
add_recursive_build(depid)
|
||||
for rdepid in taskData.rdepids[fnid]:
|
||||
add_recursive_run(rdepid)
|
||||
|
||||
#Prune self references
|
||||
if task in depends:
|
||||
newdep = []
|
||||
bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends))
|
||||
for dep in depends:
|
||||
if task != dep:
|
||||
newdep.append(dep)
|
||||
depends = newdep
|
||||
|
||||
|
||||
self.runq_fnid.append(taskData.tasks_fnid[task])
|
||||
self.runq_task.append(taskData.tasks_name[task])
|
||||
self.runq_depends.append(Set(depends))
|
||||
self.runq_revdeps.append(Set())
|
||||
self.runq_weight.append(0)
|
||||
|
||||
runq_weight1.append(0)
|
||||
runq_build.append(0)
|
||||
runq_done.append(0)
|
||||
|
||||
bb.msg.note(2, bb.msg.domain.RunQueue, "Marking Active Tasks")
|
||||
|
||||
def mark_active(listid, depth):
|
||||
"""
|
||||
Mark an item as active along with its depends
|
||||
(calls itself recursively)
|
||||
"""
|
||||
|
||||
if runq_build[listid] == 1:
|
||||
return
|
||||
|
||||
runq_build[listid] = 1
|
||||
|
||||
depends = self.runq_depends[listid]
|
||||
for depend in depends:
|
||||
mark_active(depend, depth+1)
|
||||
|
||||
for target in targets:
|
||||
targetid = taskData.getbuild_id(target[0])
|
||||
if targetid in taskData.failed_deps:
|
||||
continue
|
||||
|
||||
if targetid not in taskData.build_targets:
|
||||
continue
|
||||
|
||||
fnid = taskData.build_targets[targetid][0]
|
||||
if fnid in taskData.failed_fnids:
|
||||
continue
|
||||
|
||||
listid = taskData.tasks_lookup[fnid][target[1]]
|
||||
|
||||
mark_active(listid, 1)
|
||||
|
||||
# Prune inactive tasks
|
||||
maps = []
|
||||
delcount = 0
|
||||
for listid in range(len(self.runq_fnid)):
|
||||
if runq_build[listid-delcount] == 1:
|
||||
maps.append(listid-delcount)
|
||||
else:
|
||||
del self.runq_fnid[listid-delcount]
|
||||
del self.runq_task[listid-delcount]
|
||||
del self.runq_depends[listid-delcount]
|
||||
del self.runq_weight[listid-delcount]
|
||||
del runq_weight1[listid-delcount]
|
||||
del runq_build[listid-delcount]
|
||||
del runq_done[listid-delcount]
|
||||
del self.runq_revdeps[listid-delcount]
|
||||
delcount = delcount + 1
|
||||
maps.append(-1)
|
||||
|
||||
if len(self.runq_fnid) == 0:
|
||||
if not taskData.abort:
|
||||
bb.msg.note(1, bb.msg.domain.RunQueue, "All possible tasks have been run but build incomplete (--continue mode). See errors above for incomplete tasks.")
|
||||
return
|
||||
bb.msg.fatal(bb.msg.domain.RunQueue, "No active tasks and not in --continue mode?! Please report this bug.")
|
||||
|
||||
bb.msg.note(2, bb.msg.domain.RunQueue, "Pruned %s inactive tasks, %s left" % (delcount, len(self.runq_fnid)))
|
||||
|
||||
for listid in range(len(self.runq_fnid)):
|
||||
newdeps = []
|
||||
origdeps = self.runq_depends[listid]
|
||||
for origdep in origdeps:
|
||||
if maps[origdep] == -1:
|
||||
bb.msg.fatal(bb.msg.domain.RunQueue, "Invalid mapping - Should never happen!")
|
||||
newdeps.append(maps[origdep])
|
||||
self.runq_depends[listid] = Set(newdeps)
|
||||
|
||||
bb.msg.note(2, bb.msg.domain.RunQueue, "Assign Weightings")
|
||||
|
||||
for listid in range(len(self.runq_fnid)):
|
||||
for dep in self.runq_depends[listid]:
|
||||
self.runq_revdeps[dep].add(listid)
|
||||
|
||||
endpoints = []
|
||||
for listid in range(len(self.runq_fnid)):
|
||||
revdeps = self.runq_revdeps[listid]
|
||||
if len(revdeps) == 0:
|
||||
runq_done[listid] = 1
|
||||
self.runq_weight[listid] = 1
|
||||
endpoints.append(listid)
|
||||
for dep in revdeps:
|
||||
if dep in self.runq_depends[listid]:
|
||||
#self.dump_data(taskData)
|
||||
bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid]))
|
||||
runq_weight1[listid] = len(revdeps)
|
||||
|
||||
bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints))
|
||||
|
||||
while 1:
|
||||
next_points = []
|
||||
for listid in endpoints:
|
||||
for revdep in self.runq_depends[listid]:
|
||||
self.runq_weight[revdep] = self.runq_weight[revdep] + self.runq_weight[listid]
|
||||
runq_weight1[revdep] = runq_weight1[revdep] - 1
|
||||
if runq_weight1[revdep] == 0:
|
||||
next_points.append(revdep)
|
||||
runq_done[revdep] = 1
|
||||
endpoints = next_points
|
||||
if len(next_points) == 0:
|
||||
break
|
||||
|
||||
# Sanity Checks
|
||||
for task in range(len(self.runq_fnid)):
|
||||
if runq_done[task] == 0:
|
||||
seen = []
|
||||
deps_seen = []
|
||||
def print_chain(taskid, finish):
|
||||
seen.append(taskid)
|
||||
for revdep in self.runq_revdeps[taskid]:
|
||||
if runq_done[revdep] == 0 and revdep not in seen and not finish:
|
||||
bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) (depends: %s)" % (revdep, self.get_user_idstring(revdep, taskData), self.runq_depends[revdep]))
|
||||
if revdep in deps_seen:
|
||||
bb.msg.error(bb.msg.domain.RunQueue, "Chain ends at Task %s (%s)" % (revdep, self.get_user_idstring(revdep, taskData)))
|
||||
finish = True
|
||||
return
|
||||
for dep in self.runq_depends[revdep]:
|
||||
deps_seen.append(dep)
|
||||
print_chain(revdep, finish)
|
||||
print_chain(task, False)
|
||||
bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) not processed!\nThis is probably a circular dependency (the chain might be printed above)." % (task, self.get_user_idstring(task, taskData)))
|
||||
if runq_weight1[task] != 0:
|
||||
bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) count not zero!" % (task, self.get_user_idstring(task, taskData)))
|
||||
|
||||
# Make a weight sorted map
|
||||
from copy import deepcopy
|
||||
|
||||
sortweight = deepcopy(self.runq_weight)
|
||||
sortweight.sort()
|
||||
copyweight = deepcopy(self.runq_weight)
|
||||
self.prio_map = []
|
||||
|
||||
for weight in sortweight:
|
||||
idx = copyweight.index(weight)
|
||||
self.prio_map.append(idx)
|
||||
copyweight[idx] = -1
|
||||
self.prio_map.reverse()
|
||||
|
||||
#self.dump_data(taskData)
|
||||
|
||||
def execute_runqueue(self, cooker, cfgData, dataCache, taskData, runlist):
|
||||
"""
|
||||
Run the tasks in a queue prepared by prepare_runqueue
|
||||
Upon failure, optionally try to recover the build using any alternate providers
|
||||
(if the abort on failure configuration option isn't set)
|
||||
"""
|
||||
|
||||
failures = 0
|
||||
while 1:
|
||||
try:
|
||||
self.execute_runqueue_internal(cooker, cfgData, dataCache, taskData)
|
||||
return failures
|
||||
except bb.runqueue.TaskFailure, (fnid, taskData.fn_index[fnid], taskname):
|
||||
if taskData.abort:
|
||||
raise
|
||||
taskData.fail_fnid(fnid)
|
||||
self.reset_runqueue()
|
||||
self.prepare_runqueue(cfgData, dataCache, taskData, runlist)
|
||||
failures = failures + 1
|
||||
|
||||
def execute_runqueue_internal(self, cooker, cfgData, dataCache, taskData):
|
||||
"""
|
||||
Run the tasks in a queue prepared by prepare_runqueue
|
||||
"""
|
||||
|
||||
bb.msg.note(1, bb.msg.domain.RunQueue, "Executing runqueue")
|
||||
|
||||
runq_buildable = []
|
||||
runq_running = []
|
||||
runq_complete = []
|
||||
active_builds = 0
|
||||
build_pids = {}
|
||||
|
||||
if len(self.runq_fnid) == 0:
|
||||
# nothing to do
|
||||
return
|
||||
|
||||
def get_next_task(data):
|
||||
"""
|
||||
Return the id of the highest priority task that is buildable
|
||||
"""
|
||||
for task1 in range(len(data.runq_fnid)):
|
||||
task = data.prio_map[task1]
|
||||
if runq_running[task] == 1:
|
||||
continue
|
||||
if runq_buildable[task] == 1:
|
||||
return task
|
||||
return None
|
||||
|
||||
def task_complete(data, task):
|
||||
"""
|
||||
Mark a task as completed
|
||||
Look at the reverse dependencies and mark any task with
|
||||
completed dependencies as buildable
|
||||
"""
|
||||
runq_complete[task] = 1
|
||||
for revdep in data.runq_revdeps[task]:
|
||||
if runq_running[revdep] == 1:
|
||||
continue
|
||||
if runq_buildable[revdep] == 1:
|
||||
continue
|
||||
alldeps = 1
|
||||
for dep in data.runq_depends[revdep]:
|
||||
if runq_complete[dep] != 1:
|
||||
alldeps = 0
|
||||
if alldeps == 1:
|
||||
runq_buildable[revdep] = 1
|
||||
fn = taskData.fn_index[self.runq_fnid[revdep]]
|
||||
taskname = self.runq_task[revdep]
|
||||
bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname))
|
||||
|
||||
# Mark initial buildable tasks
|
||||
for task in range(len(self.runq_fnid)):
|
||||
runq_running.append(0)
|
||||
runq_complete.append(0)
|
||||
if len(self.runq_depends[task]) == 0:
|
||||
runq_buildable.append(1)
|
||||
else:
|
||||
runq_buildable.append(0)
|
||||
|
||||
|
||||
number_tasks = int(bb.data.getVar("BB_NUMBER_THREADS", cfgData) or 1)
|
||||
|
||||
try:
|
||||
while 1:
|
||||
task = get_next_task(self)
|
||||
if task is not None:
|
||||
fn = taskData.fn_index[self.runq_fnid[task]]
|
||||
taskname = self.runq_task[task]
|
||||
|
||||
if bb.build.stamp_is_current_cache(dataCache, fn, taskname):
|
||||
targetid = taskData.gettask_id(fn, taskname)
|
||||
if not (targetid in taskData.external_targets and cooker.configuration.force):
|
||||
bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task, taskData)))
|
||||
runq_running[task] = 1
|
||||
task_complete(self, task)
|
||||
continue
|
||||
|
||||
bb.msg.debug(1, bb.msg.domain.RunQueue, "Running task %s (%s)" % (task, self.get_user_idstring(task, taskData)))
|
||||
try:
|
||||
pid = os.fork()
|
||||
except OSError, e:
|
||||
bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror))
|
||||
if pid == 0:
|
||||
cooker.configuration.cmd = taskname[3:]
|
||||
try:
|
||||
cooker.tryBuild(fn, False)
|
||||
except bb.build.EventException:
|
||||
bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
|
||||
sys.exit(1)
|
||||
except:
|
||||
bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
|
||||
raise
|
||||
sys.exit(0)
|
||||
build_pids[pid] = task
|
||||
runq_running[task] = 1
|
||||
active_builds = active_builds + 1
|
||||
if active_builds < number_tasks:
|
||||
continue
|
||||
if active_builds > 0:
|
||||
result = os.waitpid(-1, 0)
|
||||
active_builds = active_builds - 1
|
||||
task = build_pids[result[0]]
|
||||
if result[1] != 0:
|
||||
bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task, taskData)))
|
||||
raise bb.runqueue.TaskFailure(self.runq_fnid[task], taskData.fn_index[self.runq_fnid[task]], self.runq_task[task])
|
||||
task_complete(self, task)
|
||||
del build_pids[result[0]]
|
||||
continue
|
||||
break
|
||||
except SystemExit:
|
||||
raise
|
||||
except:
|
||||
bb.msg.error(bb.msg.domain.RunQueue, "Exception received")
|
||||
while active_builds > 0:
|
||||
bb.msg.note(1, bb.msg.domain.RunQueue, "Waiting for %s active tasks to finish" % active_builds)
|
||||
tasknum = 1
|
||||
for k, v in build_pids.iteritems():
|
||||
bb.msg.note(1, bb.msg.domain.RunQueue, "%s: %s (%s)" % (tasknum, self.get_user_idstring(v, taskData), k))
|
||||
tasknum = tasknum + 1
|
||||
result = os.waitpid(-1, 0)
|
||||
del build_pids[result[0]]
|
||||
active_builds = active_builds - 1
|
||||
raise
|
||||
|
||||
# Sanity Checks
|
||||
for task in range(len(self.runq_fnid)):
|
||||
if runq_buildable[task] == 0:
|
||||
bb.msg.error(bb.msg.domain.RunQueue, "Task %s never buildable!" % task)
|
||||
if runq_running[task] == 0:
|
||||
bb.msg.error(bb.msg.domain.RunQueue, "Task %s never ran!" % task)
|
||||
if runq_complete[task] == 0:
|
||||
bb.msg.error(bb.msg.domain.RunQueue, "Task %s never completed!" % task)
|
||||
|
||||
return 0
|
||||
|
||||
def dump_data(self, taskQueue):
|
||||
"""
|
||||
Dump some debug information on the internal data structures
|
||||
"""
|
||||
bb.msg.debug(3, bb.msg.domain.RunQueue, "run_tasks:")
|
||||
for task in range(len(self.runq_fnid)):
|
||||
bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task,
|
||||
taskQueue.fn_index[self.runq_fnid[task]],
|
||||
self.runq_task[task],
|
||||
self.runq_weight[task],
|
||||
self.runq_depends[task],
|
||||
self.runq_revdeps[task]))
|
||||
|
||||
bb.msg.debug(3, bb.msg.domain.RunQueue, "sorted_tasks:")
|
||||
for task1 in range(len(self.runq_fnid)):
|
||||
if task1 in self.prio_map:
|
||||
task = self.prio_map[task1]
|
||||
bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task,
|
||||
taskQueue.fn_index[self.runq_fnid[task]],
|
||||
self.runq_task[task],
|
||||
self.runq_weight[task],
|
||||
self.runq_depends[task],
|
||||
self.runq_revdeps[task]))
|
|
@ -56,9 +56,8 @@ try:
|
|||
set
|
||||
except NameError:
|
||||
from sets import Set as set
|
||||
import sys, os, imp, readline, socket, httplib, urllib, commands, popen2, copy, shlex, Queue, fnmatch
|
||||
imp.load_source( "bitbake", os.path.dirname( sys.argv[0] )+"/bitbake" )
|
||||
from bb import data, parse, build, fatal
|
||||
import sys, os, readline, socket, httplib, urllib, commands, popen2, copy, shlex, Queue, fnmatch
|
||||
from bb import data, parse, build, fatal, cache, taskdata, runqueue, providers as Providers
|
||||
|
||||
__version__ = "0.5.3.1"
|
||||
__credits__ = """BitBake Shell Version %s (C) 2005 Michael 'Mickey' Lauer <mickey@Vanille.de>
|
||||
|
@ -108,7 +107,7 @@ class BitBakeShellCommands:
|
|||
preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 )
|
||||
if not preferred: preferred = item
|
||||
try:
|
||||
lv, lf, pv, pf = cooker.findBestProvider( preferred )
|
||||
lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status, cooker.build_cache_fail)
|
||||
except KeyError:
|
||||
if item in cooker.status.providers:
|
||||
pf = cooker.status.providers[item][0]
|
||||
|
@ -156,14 +155,39 @@ class BitBakeShellCommands:
|
|||
cooker.build_cache = []
|
||||
cooker.build_cache_fail = []
|
||||
|
||||
for name in names:
|
||||
try:
|
||||
cooker.buildProvider( name, data.getVar("BUILD_ALL_DEPS", cooker.configuration.data, True) )
|
||||
except build.EventException, e:
|
||||
print "ERROR: Couldn't build '%s'" % name
|
||||
global last_exception
|
||||
last_exception = e
|
||||
break
|
||||
td = taskdata.TaskData(cooker.configuration.abort)
|
||||
|
||||
try:
|
||||
tasks = []
|
||||
for name in names:
|
||||
td.add_provider(cooker.configuration.data, cooker.status, name)
|
||||
providers = td.get_provider(name)
|
||||
|
||||
if len(providers) == 0:
|
||||
raise Providers.NoProvider
|
||||
|
||||
tasks.append([name, "do_%s" % cooker.configuration.cmd])
|
||||
|
||||
td.add_unresolved(cooker.configuration.data, cooker.status)
|
||||
|
||||
rq = runqueue.RunQueue()
|
||||
rq.prepare_runqueue(cooker.configuration.data, cooker.status, td, tasks)
|
||||
rq.execute_runqueue(cooker, cooker.configuration.data, cooker.status, td, tasks)
|
||||
|
||||
except Providers.NoProvider:
|
||||
print "ERROR: No Provider"
|
||||
global last_exception
|
||||
last_exception = Providers.NoProvider
|
||||
|
||||
except runqueue.TaskFailure, (fnid, fn, taskname):
|
||||
print "ERROR: '%s, %s' failed" % (fn, taskname)
|
||||
global last_exception
|
||||
last_exception = runqueue.TaskFailure
|
||||
|
||||
except build.EventException, e:
|
||||
print "ERROR: Couldn't build '%s'" % names
|
||||
global last_exception
|
||||
last_exception = e
|
||||
|
||||
cooker.configuration.cmd = oldcmd
|
||||
|
||||
|
@ -233,7 +257,7 @@ class BitBakeShellCommands:
|
|||
item = data.getVar('PN', bbfile_data, 1)
|
||||
data.setVar( "_task_cache", [], bbfile_data ) # force
|
||||
try:
|
||||
cooker.tryBuildPackage( os.path.abspath( bf ), item, bbfile_data )
|
||||
cooker.tryBuildPackage( os.path.abspath( bf ), item, cmd, bbfile_data, True )
|
||||
except build.EventException, e:
|
||||
print "ERROR: Couldn't build '%s'" % name
|
||||
global last_exception
|
||||
|
@ -255,8 +279,7 @@ class BitBakeShellCommands:
|
|||
|
||||
def fileRebuild( self, params ):
|
||||
"""Rebuild (clean & build) a .bb file"""
|
||||
self.fileClean( params )
|
||||
self.fileBuild( params )
|
||||
self.fileBuild( params, "rebuild" )
|
||||
fileRebuild.usage = "<bbfile>"
|
||||
|
||||
def fileReparse( self, params ):
|
||||
|
@ -265,14 +288,19 @@ class BitBakeShellCommands:
|
|||
print "SHELL: Parsing '%s'" % bbfile
|
||||
parse.update_mtime( bbfile )
|
||||
cooker.bb_cache.cacheValidUpdate(bbfile)
|
||||
fromCache = cooker.bb_cache.loadData(bbfile, cooker)
|
||||
fromCache = cooker.bb_cache.loadData(bbfile, cooker.configuration.data)
|
||||
cooker.bb_cache.sync()
|
||||
if False: #from Cache
|
||||
if False: #fromCache:
|
||||
print "SHELL: File has not been updated, not reparsing"
|
||||
else:
|
||||
print "SHELL: Parsed"
|
||||
fileReparse.usage = "<bbfile>"
|
||||
|
||||
def abort( self, params ):
|
||||
"""Toggle abort task execution flag (see bitbake -k)"""
|
||||
cooker.configuration.abort = not cooker.configuration.abort
|
||||
print "SHELL: Abort Flag is now '%s'" % repr( cooker.configuration.abort )
|
||||
|
||||
def force( self, params ):
|
||||
"""Toggle force task execution flag (see bitbake -f)"""
|
||||
cooker.configuration.force = not cooker.configuration.force
|
||||
|
@ -365,18 +393,14 @@ SRC_URI = ""
|
|||
new.usage = "<directory> <filename>"
|
||||
|
||||
def pasteBin( self, params ):
|
||||
"""Send a command + output buffer to http://pastebin.com"""
|
||||
"""Send a command + output buffer to the pastebin at http://rafb.net/paste"""
|
||||
index = params[0]
|
||||
contents = self._shell.myout.buffer( int( index ) )
|
||||
status, error, location = sendToPastebin( contents )
|
||||
if status == 302:
|
||||
print "SHELL: Pasted to %s" % location
|
||||
else:
|
||||
print "ERROR: %s %s" % ( status, error )
|
||||
sendToPastebin( "output of " + params[0], contents )
|
||||
pasteBin.usage = "<index>"
|
||||
|
||||
def pasteLog( self, params ):
|
||||
"""Send the last event exception error log (if there is one) to http://oe.pastebin.com"""
|
||||
"""Send the last event exception error log (if there is one) to http://rafb.net/paste"""
|
||||
if last_exception is None:
|
||||
print "SHELL: No Errors yet (Phew)..."
|
||||
else:
|
||||
|
@ -387,12 +411,8 @@ SRC_URI = ""
|
|||
filename = filename.strip()
|
||||
print "SHELL: Pasting log file to pastebin..."
|
||||
|
||||
status, error, location = sendToPastebin( open( filename ).read() )
|
||||
|
||||
if status == 302:
|
||||
print "SHELL: Pasted to %s" % location
|
||||
else:
|
||||
print "ERROR: %s %s" % ( status, error )
|
||||
file = open( filename ).read()
|
||||
sendToPastebin( "contents of " + filename, file )
|
||||
|
||||
def patch( self, params ):
|
||||
"""Execute 'patch' command on a providee"""
|
||||
|
@ -401,12 +421,13 @@ SRC_URI = ""
|
|||
|
||||
def parse( self, params ):
|
||||
"""(Re-)parse .bb files and calculate the dependency graph"""
|
||||
cooker.status = cooker.ParsingStatus()
|
||||
cooker.status = cache.CacheData()
|
||||
ignore = data.getVar("ASSUME_PROVIDED", cooker.configuration.data, 1) or ""
|
||||
cooker.status.ignored_dependencies = set( ignore.split() )
|
||||
cooker.handleCollections( data.getVar("BBFILE_COLLECTIONS", cooker.configuration.data, 1) )
|
||||
|
||||
cooker.collect_bbfiles( cooker.myProgressCallback )
|
||||
(filelist, masked) = cooker.collect_bbfiles()
|
||||
cooker.parse_bbfiles(filelist, masked, cooker.myProgressCallback)
|
||||
cooker.buildDepgraph()
|
||||
global parsed
|
||||
parsed = True
|
||||
|
@ -434,7 +455,7 @@ SRC_URI = ""
|
|||
name, var = params
|
||||
bbfile = self._findProvider( name )
|
||||
if bbfile is not None:
|
||||
the_data = cooker.bb_cache.loadDataFull(bbfile, cooker)
|
||||
the_data = cooker.bb_cache.loadDataFull(bbfile, cooker.configuration.data)
|
||||
value = the_data.getVar( var, 1 )
|
||||
print value
|
||||
else:
|
||||
|
@ -538,7 +559,8 @@ SRC_URI = ""
|
|||
if not preferred: preferred = item
|
||||
|
||||
try:
|
||||
lv, lf, pv, pf = cooker.findBestProvider( preferred )
|
||||
lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status,
|
||||
cooker.build_cache_fail)
|
||||
except KeyError:
|
||||
lv, lf, pv, pf = (None,)*4
|
||||
|
||||
|
@ -565,24 +587,29 @@ def completeFilePath( bbfile ):
|
|||
return key
|
||||
return bbfile
|
||||
|
||||
def sendToPastebin( content ):
|
||||
def sendToPastebin( desc, content ):
|
||||
"""Send content to http://oe.pastebin.com"""
|
||||
mydata = {}
|
||||
mydata["parent_pid"] = ""
|
||||
mydata["format"] = "bash"
|
||||
mydata["code2"] = content
|
||||
mydata["paste"] = "Send"
|
||||
mydata["poster"] = "%s@%s" % ( os.environ.get( "USER", "unknown" ), socket.gethostname() or "unknown" )
|
||||
mydata["lang"] = "Plain Text"
|
||||
mydata["desc"] = desc
|
||||
mydata["cvt_tabs"] = "No"
|
||||
mydata["nick"] = "%s@%s" % ( os.environ.get( "USER", "unknown" ), socket.gethostname() or "unknown" )
|
||||
mydata["text"] = content
|
||||
params = urllib.urlencode( mydata )
|
||||
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"}
|
||||
|
||||
conn = httplib.HTTPConnection( "oe.pastebin.com:80" )
|
||||
conn.request("POST", "/", params, headers )
|
||||
host = "rafb.net"
|
||||
conn = httplib.HTTPConnection( "%s:80" % host )
|
||||
conn.request("POST", "/paste/paste.php", params, headers )
|
||||
|
||||
response = conn.getresponse()
|
||||
conn.close()
|
||||
|
||||
return response.status, response.reason, response.getheader( "location" ) or "unknown"
|
||||
if response.status == 302:
|
||||
location = response.getheader( "location" ) or "unknown"
|
||||
print "SHELL: Pasted to http://%s%s" % ( host, location )
|
||||
else:
|
||||
print "ERROR: %s %s" % ( response.status, response.reason )
|
||||
|
||||
def completer( text, state ):
|
||||
"""Return a possible readline completion"""
|
||||
|
|
|
@ -0,0 +1,558 @@
|
|||
#!/usr/bin/env python
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'TaskData' implementation
|
||||
|
||||
Task data collection and handling
|
||||
|
||||
Copyright (C) 2006 Richard Purdie
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License version 2 as published by the Free
|
||||
Software Foundation
|
||||
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
"""
|
||||
|
||||
from bb import data, fetch, event, mkdirhier, utils
|
||||
import bb, os
|
||||
|
||||
class TaskData:
|
||||
"""
|
||||
BitBake Task Data implementation
|
||||
"""
|
||||
def __init__(self, abort = True):
|
||||
self.build_names_index = []
|
||||
self.run_names_index = []
|
||||
self.fn_index = []
|
||||
|
||||
self.build_targets = {}
|
||||
self.run_targets = {}
|
||||
|
||||
self.external_targets = []
|
||||
|
||||
self.tasks_fnid = []
|
||||
self.tasks_name = []
|
||||
self.tasks_tdepends = []
|
||||
# Cache to speed up task ID lookups
|
||||
self.tasks_lookup = {}
|
||||
|
||||
self.depids = {}
|
||||
self.rdepids = {}
|
||||
|
||||
self.consider_msgs_cache = []
|
||||
|
||||
self.failed_deps = []
|
||||
self.failed_rdeps = []
|
||||
self.failed_fnids = []
|
||||
|
||||
self.abort = abort
|
||||
|
||||
def getbuild_id(self, name):
|
||||
"""
|
||||
Return an ID number for the build target name.
|
||||
If it doesn't exist, create one.
|
||||
"""
|
||||
if not name in self.build_names_index:
|
||||
self.build_names_index.append(name)
|
||||
return len(self.build_names_index) - 1
|
||||
|
||||
return self.build_names_index.index(name)
|
||||
|
||||
def getrun_id(self, name):
|
||||
"""
|
||||
Return an ID number for the run target name.
|
||||
If it doesn't exist, create one.
|
||||
"""
|
||||
if not name in self.run_names_index:
|
||||
self.run_names_index.append(name)
|
||||
return len(self.run_names_index) - 1
|
||||
|
||||
return self.run_names_index.index(name)
|
||||
|
||||
def getfn_id(self, name):
|
||||
"""
|
||||
Return an ID number for the filename.
|
||||
If it doesn't exist, create one.
|
||||
"""
|
||||
if not name in self.fn_index:
|
||||
self.fn_index.append(name)
|
||||
return len(self.fn_index) - 1
|
||||
|
||||
return self.fn_index.index(name)
|
||||
|
||||
def gettask_id(self, fn, task):
|
||||
"""
|
||||
Return an ID number for the task matching fn and task.
|
||||
If it doesn't exist, create one.
|
||||
"""
|
||||
fnid = self.getfn_id(fn)
|
||||
|
||||
if fnid in self.tasks_lookup:
|
||||
if task in self.tasks_lookup[fnid]:
|
||||
return self.tasks_lookup[fnid][task]
|
||||
|
||||
self.tasks_name.append(task)
|
||||
self.tasks_fnid.append(fnid)
|
||||
self.tasks_tdepends.append([])
|
||||
|
||||
listid = len(self.tasks_name) - 1
|
||||
|
||||
if fnid not in self.tasks_lookup:
|
||||
self.tasks_lookup[fnid] = {}
|
||||
self.tasks_lookup[fnid][task] = listid
|
||||
|
||||
return listid
|
||||
|
||||
def add_tasks(self, fn, dataCache):
|
||||
"""
|
||||
Add tasks for a given fn to the database
|
||||
"""
|
||||
|
||||
task_graph = dataCache.task_queues[fn]
|
||||
task_deps = dataCache.task_deps[fn]
|
||||
|
||||
fnid = self.getfn_id(fn)
|
||||
|
||||
if fnid in self.failed_fnids:
|
||||
bb.msg.fatal(bb.msg.domain.TaskData, "Trying to re-add a failed file? Something is broken...")
|
||||
|
||||
# Check if we've already seen this fn
|
||||
if fnid in self.tasks_fnid:
|
||||
return
|
||||
|
||||
# Work out task dependencies
|
||||
for task in task_graph.allnodes():
|
||||
parentids = []
|
||||
for dep in task_graph.getparents(task):
|
||||
parentid = self.gettask_id(fn, dep)
|
||||
parentids.append(parentid)
|
||||
taskid = self.gettask_id(fn, task)
|
||||
self.tasks_tdepends[taskid].extend(parentids)
|
||||
|
||||
# Work out build dependencies
|
||||
if not fnid in self.depids:
|
||||
dependids = {}
|
||||
for depend in dataCache.deps[fn]:
|
||||
bb.msg.debug(2, bb.msg.domain.TaskData, "Added dependency %s for %s" % (depend, fn))
|
||||
dependids[self.getbuild_id(depend)] = None
|
||||
self.depids[fnid] = dependids.keys()
|
||||
|
||||
# Work out runtime dependencies
|
||||
if not fnid in self.rdepids:
|
||||
rdependids = {}
|
||||
rdepends = dataCache.rundeps[fn]
|
||||
rrecs = dataCache.runrecs[fn]
|
||||
for package in rdepends:
|
||||
for rdepend in rdepends[package]:
|
||||
bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime dependency %s for %s" % (rdepend, fn))
|
||||
rdependids[self.getrun_id(rdepend)] = None
|
||||
for package in rrecs:
|
||||
for rdepend in rrecs[package]:
|
||||
bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime recommendation %s for %s" % (rdepend, fn))
|
||||
rdependids[self.getrun_id(rdepend)] = None
|
||||
self.rdepids[fnid] = rdependids.keys()
|
||||
|
||||
for dep in self.depids[fnid]:
|
||||
if dep in self.failed_deps:
|
||||
self.fail_fnid(fnid)
|
||||
return
|
||||
for dep in self.rdepids[fnid]:
|
||||
if dep in self.failed_rdeps:
|
||||
self.fail_fnid(fnid)
|
||||
return
|
||||
|
||||
def have_build_target(self, target):
|
||||
"""
|
||||
Have we a build target matching this name?
|
||||
"""
|
||||
targetid = self.getbuild_id(target)
|
||||
|
||||
if targetid in self.build_targets:
|
||||
return True
|
||||
return False
|
||||
|
||||
def have_runtime_target(self, target):
|
||||
"""
|
||||
Have we a runtime target matching this name?
|
||||
"""
|
||||
targetid = self.getrun_id(target)
|
||||
|
||||
if targetid in self.run_targets:
|
||||
return True
|
||||
return False
|
||||
|
||||
def add_build_target(self, fn, item):
|
||||
"""
|
||||
Add a build target.
|
||||
If already present, append the provider fn to the list
|
||||
"""
|
||||
targetid = self.getbuild_id(item)
|
||||
fnid = self.getfn_id(fn)
|
||||
|
||||
if targetid in self.build_targets:
|
||||
if fnid in self.build_targets[targetid]:
|
||||
return
|
||||
self.build_targets[targetid].append(fnid)
|
||||
return
|
||||
self.build_targets[targetid] = [fnid]
|
||||
|
||||
def add_runtime_target(self, fn, item):
|
||||
"""
|
||||
Add a runtime target.
|
||||
If already present, append the provider fn to the list
|
||||
"""
|
||||
targetid = self.getrun_id(item)
|
||||
fnid = self.getfn_id(fn)
|
||||
|
||||
if targetid in self.run_targets:
|
||||
if fnid in self.run_targets[targetid]:
|
||||
return
|
||||
self.run_targets[targetid].append(fnid)
|
||||
return
|
||||
self.run_targets[targetid] = [fnid]
|
||||
|
||||
def mark_external_target(self, item):
|
||||
"""
|
||||
Mark a build target as being externally requested
|
||||
"""
|
||||
targetid = self.getbuild_id(item)
|
||||
|
||||
if targetid not in self.external_targets:
|
||||
self.external_targets.append(targetid)
|
||||
|
||||
def get_unresolved_build_targets(self, dataCache):
|
||||
"""
|
||||
Return a list of build targets who's providers
|
||||
are unknown.
|
||||
"""
|
||||
unresolved = []
|
||||
for target in self.build_names_index:
|
||||
if target in dataCache.ignored_dependencies:
|
||||
continue
|
||||
if self.build_names_index.index(target) in self.failed_deps:
|
||||
continue
|
||||
if not self.have_build_target(target):
|
||||
unresolved.append(target)
|
||||
return unresolved
|
||||
|
||||
def get_unresolved_run_targets(self, dataCache):
|
||||
"""
|
||||
Return a list of runtime targets who's providers
|
||||
are unknown.
|
||||
"""
|
||||
unresolved = []
|
||||
for target in self.run_names_index:
|
||||
if target in dataCache.ignored_dependencies:
|
||||
continue
|
||||
if self.run_names_index.index(target) in self.failed_rdeps:
|
||||
continue
|
||||
if not self.have_runtime_target(target):
|
||||
unresolved.append(target)
|
||||
return unresolved
|
||||
|
||||
def get_provider(self, item):
|
||||
"""
|
||||
Return a list of providers of item
|
||||
"""
|
||||
targetid = self.getbuild_id(item)
|
||||
|
||||
return self.build_targets[targetid]
|
||||
|
||||
def get_dependees(self, itemid):
|
||||
"""
|
||||
Return a list of targets which depend on item
|
||||
"""
|
||||
dependees = []
|
||||
for fnid in self.depids:
|
||||
if itemid in self.depids[fnid]:
|
||||
dependees.append(fnid)
|
||||
return dependees
|
||||
|
||||
def get_dependees_str(self, item):
|
||||
"""
|
||||
Return a list of targets which depend on item as a user readable string
|
||||
"""
|
||||
itemid = self.getbuild_id(item)
|
||||
dependees = []
|
||||
for fnid in self.depids:
|
||||
if itemid in self.depids[fnid]:
|
||||
dependees.append(self.fn_index[fnid])
|
||||
return dependees
|
||||
|
||||
def get_rdependees(self, itemid):
|
||||
"""
|
||||
Return a list of targets which depend on runtime item
|
||||
"""
|
||||
dependees = []
|
||||
for fnid in self.rdepids:
|
||||
if itemid in self.rdepids[fnid]:
|
||||
dependees.append(fnid)
|
||||
return dependees
|
||||
|
||||
def get_rdependees_str(self, item):
|
||||
"""
|
||||
Return a list of targets which depend on runtime item as a user readable string
|
||||
"""
|
||||
itemid = self.getrun_id(item)
|
||||
dependees = []
|
||||
for fnid in self.rdepids:
|
||||
if itemid in self.rdepids[fnid]:
|
||||
dependees.append(self.fn_index[fnid])
|
||||
return dependees
|
||||
|
||||
def add_provider(self, cfgData, dataCache, item):
|
||||
try:
|
||||
self.add_provider_internal(cfgData, dataCache, item)
|
||||
except bb.providers.NoProvider:
|
||||
if self.abort:
|
||||
bb.msg.error(bb.msg.domain.Provider, "No providers of build target %s (for %s)" % (item, self.get_dependees_str(item)))
|
||||
raise
|
||||
targetid = self.getbuild_id(item)
|
||||
self.remove_buildtarget(targetid)
|
||||
|
||||
self.mark_external_target(item)
|
||||
|
||||
def add_provider_internal(self, cfgData, dataCache, item):
|
||||
"""
|
||||
Add the providers of item to the task data
|
||||
Mark entries were specifically added externally as against dependencies
|
||||
added internally during dependency resolution
|
||||
"""
|
||||
|
||||
if item in dataCache.ignored_dependencies:
|
||||
return
|
||||
|
||||
if not item in dataCache.providers:
|
||||
bb.msg.debug(1, bb.msg.domain.Provider, "No providers of build target %s (for %s)" % (item, self.get_dependees_str(item)))
|
||||
bb.event.fire(bb.event.NoProvider(item, cfgData))
|
||||
raise bb.providers.NoProvider(item)
|
||||
|
||||
if self.have_build_target(item):
|
||||
return
|
||||
|
||||
all_p = dataCache.providers[item]
|
||||
|
||||
eligible = bb.providers.filterProviders(all_p, item, cfgData, dataCache)
|
||||
|
||||
for p in eligible:
|
||||
fnid = self.getfn_id(p)
|
||||
if fnid in self.failed_fnids:
|
||||
eligible.remove(p)
|
||||
|
||||
if not eligible:
|
||||
bb.msg.debug(1, bb.msg.domain.Provider, "No providers of build target %s after filtering (for %s)" % (item, self.get_dependees_str(item)))
|
||||
bb.event.fire(bb.event.NoProvider(item, cfgData))
|
||||
raise bb.providers.NoProvider(item)
|
||||
|
||||
prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % item, cfgData, 1)
|
||||
if prefervar:
|
||||
dataCache.preferred[item] = prefervar
|
||||
|
||||
discriminated = False
|
||||
if item in dataCache.preferred:
|
||||
for p in eligible:
|
||||
pn = dataCache.pkg_fn[p]
|
||||
if dataCache.preferred[item] == pn:
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy %s due to PREFERRED_PROVIDERS" % (pn, item))
|
||||
eligible.remove(p)
|
||||
eligible = [p] + eligible
|
||||
discriminated = True
|
||||
break
|
||||
|
||||
if len(eligible) > 1 and discriminated == False:
|
||||
if item not in self.consider_msgs_cache:
|
||||
providers_list = []
|
||||
for fn in eligible:
|
||||
providers_list.append(dataCache.pkg_fn[fn])
|
||||
bb.msg.note(1, bb.msg.domain.Provider, "multiple providers are available for %s (%s);" % (item, ", ".join(providers_list)))
|
||||
bb.msg.note(1, bb.msg.domain.Provider, "consider defining PREFERRED_PROVIDER_%s" % item)
|
||||
bb.event.fire(bb.event.MultipleProviders(item,providers_list,cfgData))
|
||||
self.consider_msgs_cache.append(item)
|
||||
|
||||
for fn in eligible:
|
||||
fnid = self.getfn_id(fn)
|
||||
if fnid in self.failed_fnids:
|
||||
continue
|
||||
bb.msg.debug(2, bb.msg.domain.Provider, "adding %s to satisfy %s" % (fn, item))
|
||||
self.add_build_target(fn, item)
|
||||
self.add_tasks(fn, dataCache)
|
||||
|
||||
|
||||
#item = dataCache.pkg_fn[fn]
|
||||
|
||||
def add_rprovider(self, cfgData, dataCache, item):
|
||||
"""
|
||||
Add the runtime providers of item to the task data
|
||||
(takes item names from RDEPENDS/PACKAGES namespace)
|
||||
"""
|
||||
|
||||
if item in dataCache.ignored_dependencies:
|
||||
return
|
||||
|
||||
if self.have_runtime_target(item):
|
||||
return
|
||||
|
||||
all_p = bb.providers.getRuntimeProviders(dataCache, item)
|
||||
|
||||
if not all_p:
|
||||
bb.msg.error(bb.msg.domain.Provider, "No providers of runtime build target %s (for %s)" % (item, self.get_rdependees_str(item)))
|
||||
bb.event.fire(bb.event.NoProvider(item, cfgData, runtime=True))
|
||||
raise bb.providers.NoRProvider(item)
|
||||
|
||||
eligible = bb.providers.filterProviders(all_p, item, cfgData, dataCache)
|
||||
|
||||
for p in eligible:
|
||||
fnid = self.getfn_id(p)
|
||||
if fnid in self.failed_fnids:
|
||||
eligible.remove(p)
|
||||
|
||||
if not eligible:
|
||||
bb.msg.error(bb.msg.domain.Provider, "No providers of runtime build target %s after filtering (for %s)" % (item, self.get_rdependees_str(item)))
|
||||
bb.event.fire(bb.event.NoProvider(item, cfgData, runtime=True))
|
||||
raise bb.providers.NoRProvider(item)
|
||||
|
||||
# Should use dataCache.preferred here?
|
||||
preferred = []
|
||||
for p in eligible:
|
||||
pn = dataCache.pkg_fn[p]
|
||||
provides = dataCache.pn_provides[pn]
|
||||
for provide in provides:
|
||||
prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % provide, cfgData, 1)
|
||||
if prefervar == pn:
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy runtime %s due to PREFERRED_PROVIDERS" % (pn, item))
|
||||
eligible.remove(p)
|
||||
eligible = [p] + eligible
|
||||
preferred.append(p)
|
||||
|
||||
if len(eligible) > 1 and len(preferred) == 0:
|
||||
if item not in self.consider_msgs_cache:
|
||||
providers_list = []
|
||||
for fn in eligible:
|
||||
providers_list.append(dataCache.pkg_fn[fn])
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "multiple providers are available for runtime %s (%s);" % (item, ", ".join(providers_list)))
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "consider defining a PREFERRED_PROVIDER entry to match runtime %s" % item)
|
||||
bb.event.fire(bb.event.MultipleProviders(item,providers_list, cfgData, runtime=True))
|
||||
self.consider_msgs_cache.append(item)
|
||||
|
||||
if len(preferred) > 1:
|
||||
if item not in self.consider_msgs_cache:
|
||||
providers_list = []
|
||||
for fn in preferred:
|
||||
providers_list.append(dataCache.pkg_fn[fn])
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "multiple preferred providers are available for runtime %s (%s);" % (item, ", ".join(providers_list)))
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "consider defining only one PREFERRED_PROVIDER entry to match runtime %s" % item)
|
||||
bb.event.fire(bb.event.MultipleProviders(item,providers_list, cfgData, runtime=True))
|
||||
self.consider_msgs_cache.append(item)
|
||||
|
||||
# run through the list until we find one that we can build
|
||||
for fn in eligible:
|
||||
fnid = self.getfn_id(fn)
|
||||
if fnid in self.failed_fnids:
|
||||
continue
|
||||
bb.msg.debug(2, bb.msg.domain.Provider, "adding %s to satisfy runtime %s" % (fn, item))
|
||||
self.add_runtime_target(fn, item)
|
||||
self.add_tasks(fn, dataCache)
|
||||
|
||||
def fail_fnid(self, fnid):
|
||||
"""
|
||||
Mark a file as failed (unbuildable)
|
||||
Remove any references from build and runtime provider lists
|
||||
"""
|
||||
if fnid in self.failed_fnids:
|
||||
return
|
||||
bb.msg.debug(1, bb.msg.domain.Provider, "Removing failed file %s" % self.fn_index[fnid])
|
||||
self.failed_fnids.append(fnid)
|
||||
for target in self.build_targets:
|
||||
if fnid in self.build_targets[target]:
|
||||
self.build_targets[target].remove(fnid)
|
||||
if len(self.build_targets[target]) == 0:
|
||||
self.remove_buildtarget(target)
|
||||
for target in self.run_targets:
|
||||
if fnid in self.run_targets[target]:
|
||||
self.run_targets[target].remove(fnid)
|
||||
if len(self.run_targets[target]) == 0:
|
||||
self.remove_runtarget(target)
|
||||
|
||||
def remove_buildtarget(self, targetid):
|
||||
"""
|
||||
Mark a build target as failed (unbuildable)
|
||||
Trigger removal of any files that have this as a dependency
|
||||
"""
|
||||
bb.msg.debug(1, bb.msg.domain.Provider, "Removing failed build target %s" % self.build_names_index[targetid])
|
||||
self.failed_deps.append(targetid)
|
||||
dependees = self.get_dependees(targetid)
|
||||
for fnid in dependees:
|
||||
self.fail_fnid(fnid)
|
||||
if self.abort and targetid in self.external_targets:
|
||||
bb.msg.error(bb.msg.domain.Provider, "No buildable providers available for required build target %s" % self.build_names_index[targetid])
|
||||
raise bb.providers.NoProvider
|
||||
|
||||
def remove_runtarget(self, targetid):
|
||||
"""
|
||||
Mark a run target as failed (unbuildable)
|
||||
Trigger removal of any files that have this as a dependency
|
||||
"""
|
||||
bb.msg.note(1, bb.msg.domain.Provider, "Removing failed runtime build target %s" % self.run_names_index[targetid])
|
||||
self.failed_rdeps.append(targetid)
|
||||
dependees = self.get_rdependees(targetid)
|
||||
for fnid in dependees:
|
||||
self.fail_fnid(fnid)
|
||||
|
||||
def add_unresolved(self, cfgData, dataCache):
|
||||
"""
|
||||
Resolve all unresolved build and runtime targets
|
||||
"""
|
||||
bb.msg.note(1, bb.msg.domain.TaskData, "Resolving missing task queue dependencies")
|
||||
while 1:
|
||||
added = 0
|
||||
for target in self.get_unresolved_build_targets(dataCache):
|
||||
try:
|
||||
self.add_provider_internal(cfgData, dataCache, target)
|
||||
added = added + 1
|
||||
except bb.providers.NoProvider:
|
||||
targetid = self.getbuild_id(target)
|
||||
if self.abort and targetid in self.external_targets:
|
||||
raise
|
||||
self.remove_buildtarget(targetid)
|
||||
for target in self.get_unresolved_run_targets(dataCache):
|
||||
try:
|
||||
self.add_rprovider(cfgData, dataCache, target)
|
||||
added = added + 1
|
||||
except bb.providers.NoRProvider:
|
||||
self.remove_runtarget(self.getrun_id(target))
|
||||
bb.msg.debug(1, bb.msg.domain.TaskData, "Resolved " + str(added) + " extra dependecies")
|
||||
if added == 0:
|
||||
break
|
||||
|
||||
def dump_data(self):
|
||||
"""
|
||||
Dump some debug information on the internal data structures
|
||||
"""
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, "build_names:")
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, ", ".join(self.build_names_index))
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, "run_names:")
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, ", ".join(self.run_names_index))
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, "build_targets:")
|
||||
for target in self.build_targets.keys():
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, " %s: %s" % (self.build_names_index[target], self.build_targets[target]))
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, "run_targets:")
|
||||
for target in self.run_targets.keys():
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, " %s: %s" % (self.run_names_index[target], self.run_targets[target]))
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, "tasks:")
|
||||
for task in range(len(self.tasks_name)):
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s - %s: %s" % (
|
||||
task,
|
||||
self.fn_index[self.tasks_fnid[task]],
|
||||
self.tasks_name[task],
|
||||
self.tasks_tdepends[task]))
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, "runtime ids (per fn):")
|
||||
for fnid in self.rdepids:
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, " %s %s: %s" % (fnid, self.fn_index[fnid], self.rdepids[fnid]))
|
||||
|
||||
|
|
@ -103,11 +103,11 @@ def _print_trace(body, line):
|
|||
import bb
|
||||
|
||||
# print the environment of the method
|
||||
bb.error("Printing the environment of the function")
|
||||
bb.msg.error(bb.msg.domain.Util, "Printing the environment of the function")
|
||||
min_line = max(1,line-4)
|
||||
max_line = min(line+4,len(body)-1)
|
||||
for i in range(min_line,max_line+1):
|
||||
bb.error("\t%.4d:%s" % (i, body[i-1]) )
|
||||
bb.msg.error(bb.msg.domain.Util, "\t%.4d:%s" % (i, body[i-1]) )
|
||||
|
||||
|
||||
def better_compile(text, file, realfile):
|
||||
|
@ -122,9 +122,9 @@ def better_compile(text, file, realfile):
|
|||
|
||||
# split the text into lines again
|
||||
body = text.split('\n')
|
||||
bb.error("Error in compiling: ", realfile)
|
||||
bb.error("The lines resulting into this error were:")
|
||||
bb.error("\t%d:%s:'%s'" % (e.lineno, e.__class__.__name__, body[e.lineno-1]))
|
||||
bb.msg.error(bb.msg.domain.Util, "Error in compiling: ", realfile)
|
||||
bb.msg.error(bb.msg.domain.Util, "The lines resulting into this error were:")
|
||||
bb.msg.error(bb.msg.domain.Util, "\t%d:%s:'%s'" % (e.lineno, e.__class__.__name__, body[e.lineno-1]))
|
||||
|
||||
_print_trace(body, e.lineno)
|
||||
|
||||
|
@ -147,8 +147,8 @@ def better_exec(code, context, text, realfile):
|
|||
raise
|
||||
|
||||
# print the Header of the Error Message
|
||||
bb.error("Error in executing: ", realfile)
|
||||
bb.error("Exception:%s Message:%s" % (t,value) )
|
||||
bb.msg.error(bb.msg.domain.Util, "Error in executing: ", realfile)
|
||||
bb.msg.error(bb.msg.domain.Util, "Exception:%s Message:%s" % (t,value) )
|
||||
|
||||
# let us find the line number now
|
||||
while tb.tb_next:
|
||||
|
@ -160,3 +160,43 @@ def better_exec(code, context, text, realfile):
|
|||
_print_trace( text.split('\n'), line )
|
||||
|
||||
raise
|
||||
|
||||
def Enum(*names):
|
||||
"""
|
||||
A simple class to give Enum support
|
||||
"""
|
||||
|
||||
assert names, "Empty enums are not supported"
|
||||
|
||||
class EnumClass(object):
|
||||
__slots__ = names
|
||||
def __iter__(self): return iter(constants)
|
||||
def __len__(self): return len(constants)
|
||||
def __getitem__(self, i): return constants[i]
|
||||
def __repr__(self): return 'Enum' + str(names)
|
||||
def __str__(self): return 'enum ' + str(constants)
|
||||
|
||||
class EnumValue(object):
|
||||
__slots__ = ('__value')
|
||||
def __init__(self, value): self.__value = value
|
||||
Value = property(lambda self: self.__value)
|
||||
EnumType = property(lambda self: EnumType)
|
||||
def __hash__(self): return hash(self.__value)
|
||||
def __cmp__(self, other):
|
||||
# C fans might want to remove the following assertion
|
||||
# to make all enums comparable by ordinal value {;))
|
||||
assert self.EnumType is other.EnumType, "Only values from the same enum are comparable"
|
||||
return cmp(self.__value, other.__value)
|
||||
def __invert__(self): return constants[maximum - self.__value]
|
||||
def __nonzero__(self): return bool(self.__value)
|
||||
def __repr__(self): return str(names[self.__value])
|
||||
|
||||
maximum = len(names) - 1
|
||||
constants = [None] * len(names)
|
||||
for i, each in enumerate(names):
|
||||
val = EnumValue(i)
|
||||
setattr(EnumClass, each, val)
|
||||
constants[i] = val
|
||||
constants = tuple(constants)
|
||||
EnumType = EnumClass()
|
||||
return EnumType
|
||||
|
|
Loading…
Reference in New Issue