Apply some 2to3 transforms that don't cause issues in 2.6

(Bitbake rev: d39ab776e7ceaefc8361150151cf0892dcb70d9c)

Signed-off-by: Chris Larson <chris_larson@mentor.com>
Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
This commit is contained in:
Chris Larson 2010-04-11 17:03:55 -07:00 committed by Richard Purdie
parent 5b216c8000
commit 1180bab54e
26 changed files with 268 additions and 269 deletions

View File

@ -140,7 +140,7 @@ def exec_func(func, d, dirs = None):
so = os.popen("tee \"%s\"" % logfile, "w") so = os.popen("tee \"%s\"" % logfile, "w")
else: else:
so = file(logfile, 'w') so = file(logfile, 'w')
except OSError, e: except OSError as e:
bb.msg.error(bb.msg.domain.Build, "opening log file: %s" % e) bb.msg.error(bb.msg.domain.Build, "opening log file: %s" % e)
pass pass
@ -285,7 +285,7 @@ def exec_task(task, d):
event.fire(TaskStarted(task, localdata), localdata) event.fire(TaskStarted(task, localdata), localdata)
exec_func(task, localdata) exec_func(task, localdata)
event.fire(TaskSucceeded(task, localdata), localdata) event.fire(TaskSucceeded(task, localdata), localdata)
except FuncFailed, message: except FuncFailed as message:
# Try to extract the optional logfile # Try to extract the optional logfile
try: try:
(msg, logfile) = message (msg, logfile) = message

View File

@ -61,7 +61,7 @@ class Cache:
return return
self.has_cache = True self.has_cache = True
self.cachefile = os.path.join(self.cachedir,"bb_cache.dat") self.cachefile = os.path.join(self.cachedir, "bb_cache.dat")
bb.msg.debug(1, bb.msg.domain.Cache, "Using cache in '%s'" % self.cachedir) bb.msg.debug(1, bb.msg.domain.Cache, "Using cache in '%s'" % self.cachedir)
try: try:
@ -82,9 +82,9 @@ class Cache:
p = pickle.Unpickler(file(self.cachefile, "rb")) p = pickle.Unpickler(file(self.cachefile, "rb"))
self.depends_cache, version_data = p.load() self.depends_cache, version_data = p.load()
if version_data['CACHE_VER'] != __cache_version__: if version_data['CACHE_VER'] != __cache_version__:
raise ValueError, 'Cache Version Mismatch' raise ValueError('Cache Version Mismatch')
if version_data['BITBAKE_VER'] != bb.__version__: if version_data['BITBAKE_VER'] != bb.__version__:
raise ValueError, 'Bitbake Version Mismatch' raise ValueError('Bitbake Version Mismatch')
except EOFError: except EOFError:
bb.msg.note(1, bb.msg.domain.Cache, "Truncated cache found, rebuilding...") bb.msg.note(1, bb.msg.domain.Cache, "Truncated cache found, rebuilding...")
self.depends_cache = {} self.depends_cache = {}
@ -446,7 +446,7 @@ class Cache:
self.getVar('__BB_DONT_CACHE', file_name, True) self.getVar('__BB_DONT_CACHE', file_name, True)
self.getVar('__VARIANTS', file_name, True) self.getVar('__VARIANTS', file_name, True)
def load_bbfile( self, bbfile , config): def load_bbfile( self, bbfile, config):
""" """
Load and parse one .bb build file Load and parse one .bb build file
Return the data and whether parsing resulted in the file being skipped Return the data and whether parsing resulted in the file being skipped

View File

@ -1,190 +1,190 @@
""" """
Python Deamonizing helper Python Deamonizing helper
Configurable daemon behaviors: Configurable daemon behaviors:
1.) The current working directory set to the "/" directory. 1.) The current working directory set to the "/" directory.
2.) The current file creation mode mask set to 0. 2.) The current file creation mode mask set to 0.
3.) Close all open files (1024). 3.) Close all open files (1024).
4.) Redirect standard I/O streams to "/dev/null". 4.) Redirect standard I/O streams to "/dev/null".
A failed call to fork() now raises an exception. A failed call to fork() now raises an exception.
References: References:
1) Advanced Programming in the Unix Environment: W. Richard Stevens 1) Advanced Programming in the Unix Environment: W. Richard Stevens
2) Unix Programming Frequently Asked Questions: 2) Unix Programming Frequently Asked Questions:
http://www.erlenstar.demon.co.uk/unix/faq_toc.html http://www.erlenstar.demon.co.uk/unix/faq_toc.html
Modified to allow a function to be daemonized and return for Modified to allow a function to be daemonized and return for
bitbake use by Richard Purdie bitbake use by Richard Purdie
""" """
__author__ = "Chad J. Schroeder" __author__ = "Chad J. Schroeder"
__copyright__ = "Copyright (C) 2005 Chad J. Schroeder" __copyright__ = "Copyright (C) 2005 Chad J. Schroeder"
__version__ = "0.2" __version__ = "0.2"
# Standard Python modules. # Standard Python modules.
import os # Miscellaneous OS interfaces. import os # Miscellaneous OS interfaces.
import sys # System-specific parameters and functions. import sys # System-specific parameters and functions.
# Default daemon parameters. # Default daemon parameters.
# File mode creation mask of the daemon. # File mode creation mask of the daemon.
# For BitBake's children, we do want to inherit the parent umask. # For BitBake's children, we do want to inherit the parent umask.
UMASK = None UMASK = None
# Default maximum for the number of available file descriptors. # Default maximum for the number of available file descriptors.
MAXFD = 1024 MAXFD = 1024
# The standard I/O file descriptors are redirected to /dev/null by default. # The standard I/O file descriptors are redirected to /dev/null by default.
if (hasattr(os, "devnull")): if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull REDIRECT_TO = os.devnull
else: else:
REDIRECT_TO = "/dev/null" REDIRECT_TO = "/dev/null"
def createDaemon(function, logfile): def createDaemon(function, logfile):
""" """
Detach a process from the controlling terminal and run it in the Detach a process from the controlling terminal and run it in the
background as a daemon, returning control to the caller. background as a daemon, returning control to the caller.
""" """
try: try:
# Fork a child process so the parent can exit. This returns control to # Fork a child process so the parent can exit. This returns control to
# the command-line or shell. It also guarantees that the child will not # the command-line or shell. It also guarantees that the child will not
# be a process group leader, since the child receives a new process ID # be a process group leader, since the child receives a new process ID
# and inherits the parent's process group ID. This step is required # and inherits the parent's process group ID. This step is required
# to insure that the next call to os.setsid is successful. # to insure that the next call to os.setsid is successful.
pid = os.fork() pid = os.fork()
except OSError, e: except OSError as e:
raise Exception, "%s [%d]" % (e.strerror, e.errno) raise Exception("%s [%d]" % (e.strerror, e.errno))
if (pid == 0): # The first child. if (pid == 0): # The first child.
# To become the session leader of this new session and the process group # To become the session leader of this new session and the process group
# leader of the new process group, we call os.setsid(). The process is # leader of the new process group, we call os.setsid(). The process is
# also guaranteed not to have a controlling terminal. # also guaranteed not to have a controlling terminal.
os.setsid() os.setsid()
# Is ignoring SIGHUP necessary? # Is ignoring SIGHUP necessary?
# #
# It's often suggested that the SIGHUP signal should be ignored before # It's often suggested that the SIGHUP signal should be ignored before
# the second fork to avoid premature termination of the process. The # the second fork to avoid premature termination of the process. The
# reason is that when the first child terminates, all processes, e.g. # reason is that when the first child terminates, all processes, e.g.
# the second child, in the orphaned group will be sent a SIGHUP. # the second child, in the orphaned group will be sent a SIGHUP.
# #
# "However, as part of the session management system, there are exactly # "However, as part of the session management system, there are exactly
# two cases where SIGHUP is sent on the death of a process: # two cases where SIGHUP is sent on the death of a process:
# #
# 1) When the process that dies is the session leader of a session that # 1) When the process that dies is the session leader of a session that
# is attached to a terminal device, SIGHUP is sent to all processes # is attached to a terminal device, SIGHUP is sent to all processes
# in the foreground process group of that terminal device. # in the foreground process group of that terminal device.
# 2) When the death of a process causes a process group to become # 2) When the death of a process causes a process group to become
# orphaned, and one or more processes in the orphaned group are # orphaned, and one or more processes in the orphaned group are
# stopped, then SIGHUP and SIGCONT are sent to all members of the # stopped, then SIGHUP and SIGCONT are sent to all members of the
# orphaned group." [2] # orphaned group." [2]
# #
# The first case can be ignored since the child is guaranteed not to have # The first case can be ignored since the child is guaranteed not to have
# a controlling terminal. The second case isn't so easy to dismiss. # a controlling terminal. The second case isn't so easy to dismiss.
# The process group is orphaned when the first child terminates and # The process group is orphaned when the first child terminates and
# POSIX.1 requires that every STOPPED process in an orphaned process # POSIX.1 requires that every STOPPED process in an orphaned process
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
# second child is not STOPPED though, we can safely forego ignoring the # second child is not STOPPED though, we can safely forego ignoring the
# SIGHUP signal. In any case, there are no ill-effects if it is ignored. # SIGHUP signal. In any case, there are no ill-effects if it is ignored.
# #
# import signal # Set handlers for asynchronous events. # import signal # Set handlers for asynchronous events.
# signal.signal(signal.SIGHUP, signal.SIG_IGN) # signal.signal(signal.SIGHUP, signal.SIG_IGN)
try: try:
# Fork a second child and exit immediately to prevent zombies. This # Fork a second child and exit immediately to prevent zombies. This
# causes the second child process to be orphaned, making the init # causes the second child process to be orphaned, making the init
# process responsible for its cleanup. And, since the first child is # process responsible for its cleanup. And, since the first child is
# a session leader without a controlling terminal, it's possible for # a session leader without a controlling terminal, it's possible for
# it to acquire one by opening a terminal in the future (System V- # it to acquire one by opening a terminal in the future (System V-
# based systems). This second fork guarantees that the child is no # based systems). This second fork guarantees that the child is no
# longer a session leader, preventing the daemon from ever acquiring # longer a session leader, preventing the daemon from ever acquiring
# a controlling terminal. # a controlling terminal.
pid = os.fork() # Fork a second child. pid = os.fork() # Fork a second child.
except OSError, e: except OSError as e:
raise Exception, "%s [%d]" % (e.strerror, e.errno) raise Exception("%s [%d]" % (e.strerror, e.errno))
if (pid == 0): # The second child. if (pid == 0): # The second child.
# We probably don't want the file mode creation mask inherited from # We probably don't want the file mode creation mask inherited from
# the parent, so we give the child complete control over permissions. # the parent, so we give the child complete control over permissions.
if UMASK is not None: if UMASK is not None:
os.umask(UMASK) os.umask(UMASK)
else: else:
# Parent (the first child) of the second child. # Parent (the first child) of the second child.
os._exit(0) os._exit(0)
else: else:
# exit() or _exit()? # exit() or _exit()?
# _exit is like exit(), but it doesn't call any functions registered # _exit is like exit(), but it doesn't call any functions registered
# with atexit (and on_exit) or any registered signal handlers. It also # with atexit (and on_exit) or any registered signal handlers. It also
# closes any open file descriptors. Using exit() may cause all stdio # closes any open file descriptors. Using exit() may cause all stdio
# streams to be flushed twice and any temporary files may be unexpectedly # streams to be flushed twice and any temporary files may be unexpectedly
# removed. It's therefore recommended that child branches of a fork() # removed. It's therefore recommended that child branches of a fork()
# and the parent branch(es) of a daemon use _exit(). # and the parent branch(es) of a daemon use _exit().
return return
# Close all open file descriptors. This prevents the child from keeping # Close all open file descriptors. This prevents the child from keeping
# open any file descriptors inherited from the parent. There is a variety # open any file descriptors inherited from the parent. There is a variety
# of methods to accomplish this task. Three are listed below. # of methods to accomplish this task. Three are listed below.
# #
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
# number of open file descriptors to close. If it doesn't exists, use # number of open file descriptors to close. If it doesn't exists, use
# the default value (configurable). # the default value (configurable).
# #
# try: # try:
# maxfd = os.sysconf("SC_OPEN_MAX") # maxfd = os.sysconf("SC_OPEN_MAX")
# except (AttributeError, ValueError): # except (AttributeError, ValueError):
# maxfd = MAXFD # maxfd = MAXFD
# #
# OR # OR
# #
# if (os.sysconf_names.has_key("SC_OPEN_MAX")): # if (os.sysconf_names.has_key("SC_OPEN_MAX")):
# maxfd = os.sysconf("SC_OPEN_MAX") # maxfd = os.sysconf("SC_OPEN_MAX")
# else: # else:
# maxfd = MAXFD # maxfd = MAXFD
# #
# OR # OR
# #
# Use the getrlimit method to retrieve the maximum file descriptor number # Use the getrlimit method to retrieve the maximum file descriptor number
# that can be opened by this process. If there is not limit on the # that can be opened by this process. If there is not limit on the
# resource, use the default value. # resource, use the default value.
# #
import resource # Resource usage information. import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY): if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD maxfd = MAXFD
# Iterate through and close all file descriptors. # Iterate through and close all file descriptors.
# for fd in range(0, maxfd): # for fd in range(0, maxfd):
# try: # try:
# os.close(fd) # os.close(fd)
# except OSError: # ERROR, fd wasn't open to begin with (ignored) # except OSError: # ERROR, fd wasn't open to begin with (ignored)
# pass # pass
# Redirect the standard I/O file descriptors to the specified file. Since # Redirect the standard I/O file descriptors to the specified file. Since
# the daemon has no controlling terminal, most daemons redirect stdin, # the daemon has no controlling terminal, most daemons redirect stdin,
# stdout, and stderr to /dev/null. This is done to prevent side-effects # stdout, and stderr to /dev/null. This is done to prevent side-effects
# from reads and writes to the standard I/O file descriptors. # from reads and writes to the standard I/O file descriptors.
# This call to open is guaranteed to return the lowest file descriptor, # This call to open is guaranteed to return the lowest file descriptor,
# which will be 0 (stdin), since it was closed above. # which will be 0 (stdin), since it was closed above.
# os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) # os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error. # Duplicate standard input to standard output and standard error.
# os.dup2(0, 1) # standard output (1) # os.dup2(0, 1) # standard output (1)
# os.dup2(0, 2) # standard error (2) # os.dup2(0, 2) # standard error (2)
si = file('/dev/null', 'r') si = file('/dev/null', 'r')
so = file(logfile, 'w') so = file(logfile, 'w')
se = so se = so
# Replace those fds with our own # Replace those fds with our own
os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno()) os.dup2(se.fileno(), sys.stderr.fileno())
function() function()
os._exit(0) os._exit(0)

View File

@ -193,7 +193,7 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
if all: if all:
o.write('# %s=%s\n' % (var, oval)) o.write('# %s=%s\n' % (var, oval))
if type(val) is not types.StringType: if not isinstance(val, types.StringType):
return 0 return 0
if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all: if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all:

View File

@ -66,10 +66,10 @@ class DataSmart:
code = match.group()[3:-1] code = match.group()[3:-1]
codeobj = compile(code.strip(), varname or "<expansion>", "eval") codeobj = compile(code.strip(), varname or "<expansion>", "eval")
s = utils.better_eval(codeobj, {"d": self}) s = utils.better_eval(codeobj, {"d": self})
if type(s) == types.IntType: s = str(s) if isinstance(s, types.IntType): s = str(s)
return s return s
if type(s) is not types.StringType: # sanity check if not isinstance(s, types.StringType): # sanity check
return s return s
if varname and varname in self.expand_cache: if varname and varname in self.expand_cache:
@ -81,7 +81,7 @@ class DataSmart:
s = __expand_var_regexp__.sub(var_sub, s) s = __expand_var_regexp__.sub(var_sub, s)
s = __expand_python_regexp__.sub(python_sub, s) s = __expand_python_regexp__.sub(python_sub, s)
if s == olds: break if s == olds: break
if type(s) is not types.StringType: # sanity check if not isinstance(s, types.StringType): # sanity check
bb.msg.error(bb.msg.domain.Data, 'expansion of %s returned non-string %s' % (olds, s)) bb.msg.error(bb.msg.domain.Data, 'expansion of %s returned non-string %s' % (olds, s))
except KeyboardInterrupt: except KeyboardInterrupt:
raise raise
@ -118,7 +118,7 @@ class DataSmart:
l = len(o)+1 l = len(o)+1
# see if one should even try # see if one should even try
if not self._seen_overrides.has_key(o): if o not in self._seen_overrides:
continue continue
vars = self._seen_overrides[o] vars = self._seen_overrides[o]
@ -130,7 +130,7 @@ class DataSmart:
bb.msg.note(1, bb.msg.domain.Data, "Untracked delVar") bb.msg.note(1, bb.msg.domain.Data, "Untracked delVar")
# now on to the appends and prepends # now on to the appends and prepends
if self._special_values.has_key("_append"): if "_append" in self._special_values:
appends = self._special_values['_append'] or [] appends = self._special_values['_append'] or []
for append in appends: for append in appends:
for (a, o) in self.getVarFlag(append, '_append') or []: for (a, o) in self.getVarFlag(append, '_append') or []:
@ -145,7 +145,7 @@ class DataSmart:
self.setVar(append, sval) self.setVar(append, sval)
if self._special_values.has_key("_prepend"): if "_prepend" in self._special_values:
prepends = self._special_values['_prepend'] or [] prepends = self._special_values['_prepend'] or []
for prepend in prepends: for prepend in prepends:
@ -215,7 +215,7 @@ class DataSmart:
# more cookies for the cookie monster # more cookies for the cookie monster
if '_' in var: if '_' in var:
override = var[var.rfind('_')+1:] override = var[var.rfind('_')+1:]
if not self._seen_overrides.has_key(override): if override not in self._seen_overrides:
self._seen_overrides[override] = set() self._seen_overrides[override] = set()
self._seen_overrides[override].add( var ) self._seen_overrides[override].add( var )
@ -246,7 +246,7 @@ class DataSmart:
dest.extend(src) dest.extend(src)
self.setVarFlag(newkey, i, dest) self.setVarFlag(newkey, i, dest)
if self._special_values.has_key(i) and key in self._special_values[i]: if i in self._special_values and key in self._special_values[i]:
self._special_values[i].remove(key) self._special_values[i].remove(key)
self._special_values[i].add(newkey) self._special_values[i].add(newkey)

View File

@ -139,8 +139,8 @@ class Cvs(Fetch):
bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory") bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory")
pkg = data.expand('${PN}', d) pkg = data.expand('${PN}', d)
pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg) pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg)
moddir = os.path.join(pkgdir,localdir) moddir = os.path.join(pkgdir, localdir)
if os.access(os.path.join(moddir,'CVS'), os.R_OK): if os.access(os.path.join(moddir, 'CVS'), os.R_OK):
bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc)
# update sources there # update sources there
os.chdir(moddir) os.chdir(moddir)

View File

@ -35,15 +35,15 @@ class Perforce(Fetch):
def supports(self, url, ud, d): def supports(self, url, ud, d):
return ud.type in ['p4'] return ud.type in ['p4']
def doparse(url,d): def doparse(url, d):
parm = {} parm = {}
path = url.split("://")[1] path = url.split("://")[1]
delim = path.find("@"); delim = path.find("@");
if delim != -1: if delim != -1:
(user,pswd,host,port) = path.split('@')[0].split(":") (user, pswd, host, port) = path.split('@')[0].split(":")
path = path.split('@')[1] path = path.split('@')[1]
else: else:
(host,port) = data.getVar('P4PORT', d).split(':') (host, port) = data.getVar('P4PORT', d).split(':')
user = "" user = ""
pswd = "" pswd = ""
@ -53,19 +53,19 @@ class Perforce(Fetch):
plist = path.split(';') plist = path.split(';')
for item in plist: for item in plist:
if item.count('='): if item.count('='):
(key,value) = item.split('=') (key, value) = item.split('=')
keys.append(key) keys.append(key)
values.append(value) values.append(value)
parm = dict(zip(keys,values)) parm = dict(zip(keys, values))
path = "//" + path.split(';')[0] path = "//" + path.split(';')[0]
host += ":%s" % (port) host += ":%s" % (port)
parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm) parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm)
return host,path,user,pswd,parm return host, path, user, pswd, parm
doparse = staticmethod(doparse) doparse = staticmethod(doparse)
def getcset(d, depot,host,user,pswd,parm): def getcset(d, depot, host, user, pswd, parm):
p4opt = "" p4opt = ""
if "cset" in parm: if "cset" in parm:
return parm["cset"]; return parm["cset"];
@ -97,7 +97,7 @@ class Perforce(Fetch):
def localpath(self, url, ud, d): def localpath(self, url, ud, d):
(host,path,user,pswd,parm) = Perforce.doparse(url,d) (host, path, user, pswd, parm) = Perforce.doparse(url, d)
# If a label is specified, we use that as our filename # If a label is specified, we use that as our filename
@ -115,7 +115,7 @@ class Perforce(Fetch):
cset = Perforce.getcset(d, path, host, user, pswd, parm) cset = Perforce.getcset(d, path, host, user, pswd, parm)
ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host,base.replace('/', '.'), cset), d) ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host, base.replace('/', '.'), cset), d)
return os.path.join(data.getVar("DL_DIR", d, 1), ud.localfile) return os.path.join(data.getVar("DL_DIR", d, 1), ud.localfile)
@ -124,7 +124,7 @@ class Perforce(Fetch):
Fetch urls Fetch urls
""" """
(host,depot,user,pswd,parm) = Perforce.doparse(loc, d) (host, depot, user, pswd, parm) = Perforce.doparse(loc, d)
if depot.find('/...') != -1: if depot.find('/...') != -1:
path = depot[:depot.find('/...')] path = depot[:depot.find('/...')]
@ -164,10 +164,10 @@ class Perforce(Fetch):
raise FetchError(module) raise FetchError(module)
if "label" in parm: if "label" in parm:
depot = "%s@%s" % (depot,parm["label"]) depot = "%s@%s" % (depot, parm["label"])
else: else:
cset = Perforce.getcset(d, depot, host, user, pswd, parm) cset = Perforce.getcset(d, depot, host, user, pswd, parm)
depot = "%s@%s" % (depot,cset) depot = "%s@%s" % (depot, cset)
os.chdir(tmpfile) os.chdir(tmpfile)
bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
@ -189,7 +189,7 @@ class Perforce(Fetch):
dest = list[0][len(path)+1:] dest = list[0][len(path)+1:]
where = dest.find("#") where = dest.find("#")
os.system("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module,dest[:where],list[0])) os.system("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module, dest[:where], list[0]))
count = count + 1 count = count + 1
if count == 0: if count == 0:

View File

@ -38,7 +38,7 @@ class Wget(Fetch):
""" """
Check to see if a given url can be fetched with wget. Check to see if a given url can be fetched with wget.
""" """
return ud.type in ['http','https','ftp'] return ud.type in ['http', 'https', 'ftp']
def localpath(self, url, ud, d): def localpath(self, url, ud, d):

View File

@ -37,12 +37,12 @@ class SkipPackage(Exception):
__mtime_cache = {} __mtime_cache = {}
def cached_mtime(f): def cached_mtime(f):
if not __mtime_cache.has_key(f): if f not in __mtime_cache:
__mtime_cache[f] = os.stat(f)[8] __mtime_cache[f] = os.stat(f)[8]
return __mtime_cache[f] return __mtime_cache[f]
def cached_mtime_noerror(f): def cached_mtime_noerror(f):
if not __mtime_cache.has_key(f): if f not in __mtime_cache:
try: try:
__mtime_cache[f] = os.stat(f)[8] __mtime_cache[f] = os.stat(f)[8]
except OSError: except OSError:

View File

@ -311,7 +311,7 @@ def finalize(fn, d):
all_handlers = {} all_handlers = {}
for var in bb.data.getVar('__BBHANDLERS', d) or []: for var in bb.data.getVar('__BBHANDLERS', d) or []:
# try to add the handler # try to add the handler
handler = bb.data.getVar(var,d) handler = bb.data.getVar(var, d)
bb.event.register(var, handler) bb.event.register(var, handler)
tasklist = bb.data.getVar('__BBTASKS', d) or [] tasklist = bb.data.getVar('__BBTASKS', d) or []

View File

@ -90,7 +90,7 @@ def get_statements(filename, absolsute_filename, base_name):
statements = ast.StatementGroup() statements = ast.StatementGroup()
lineno = 0 lineno = 0
while 1: while True:
lineno = lineno + 1 lineno = lineno + 1
s = file.readline() s = file.readline()
if not s: break if not s: break
@ -118,7 +118,7 @@ def handle(fn, d, include):
bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data, include)") bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data, include)")
(root, ext) = os.path.splitext(os.path.basename(fn)) (root, ext) = os.path.splitext(os.path.basename(fn))
base_name = "%s%s" % (root,ext) base_name = "%s%s" % (root, ext)
init(d) init(d)
if ext == ".bbclass": if ext == ".bbclass":
@ -164,7 +164,7 @@ def handle(fn, d, include):
return d return d
def feeder(lineno, s, fn, root, statements): def feeder(lineno, s, fn, root, statements):
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__,__infunc__, __body__, classes, bb, __residue__ global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, classes, bb, __residue__
if __infunc__: if __infunc__:
if s == '}': if s == '}':
__body__.append('') __body__.append('')

View File

@ -89,7 +89,7 @@ def handle(fn, data, include):
statements = ast.StatementGroup() statements = ast.StatementGroup()
lineno = 0 lineno = 0
while 1: while True:
lineno = lineno + 1 lineno = lineno + 1
s = f.readline() s = f.readline()
if not s: break if not s: break

View File

@ -52,7 +52,7 @@ class PersistData:
except OSError: except OSError:
bb.utils.mkdirhier(self.cachedir) bb.utils.mkdirhier(self.cachedir)
self.cachefile = os.path.join(self.cachedir,"bb_persist_data.sqlite3") self.cachefile = os.path.join(self.cachedir, "bb_persist_data.sqlite3")
bb.msg.debug(1, bb.msg.domain.PersistData, "Using '%s' as the persistent data cache" % self.cachefile) bb.msg.debug(1, bb.msg.domain.PersistData, "Using '%s' as the persistent data cache" % self.cachefile)
self.connection = sqlite3.connect(self.cachefile, timeout=5, isolation_level=None) self.connection = sqlite3.connect(self.cachefile, timeout=5, isolation_level=None)
@ -113,7 +113,7 @@ class PersistData:
try: try:
self.connection.execute(*query) self.connection.execute(*query)
return return
except sqlite3.OperationalError, e: except sqlite3.OperationalError as e:
if 'database is locked' in str(e): if 'database is locked' in str(e):
continue continue
raise raise

View File

@ -109,8 +109,7 @@ class RunQueueSchedulerSpeed(RunQueueScheduler):
self.rq = runqueue self.rq = runqueue
sortweight = deepcopy(self.rq.runq_weight) sortweight = sorted(deepcopy(self.rq.runq_weight))
sortweight.sort()
copyweight = deepcopy(self.rq.runq_weight) copyweight = deepcopy(self.rq.runq_weight)
self.prio_map = [] self.prio_map = []
@ -307,7 +306,7 @@ class RunQueue:
weight[listid] = 1 weight[listid] = 1
task_done[listid] = True task_done[listid] = True
while 1: while True:
next_points = [] next_points = []
for listid in endpoints: for listid in endpoints:
for revdep in self.runq_depends[listid]: for revdep in self.runq_depends[listid]:
@ -631,7 +630,7 @@ class RunQueue:
for dep in revdeps: for dep in revdeps:
if dep in self.runq_depends[listid]: if dep in self.runq_depends[listid]:
#self.dump_data(taskData) #self.dump_data(taskData)
bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid])) bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep], taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid]))
bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints)) bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints))
@ -814,7 +813,7 @@ class RunQueue:
bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s < %s" % (stampfile, stampfile2)) bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s < %s" % (stampfile, stampfile2))
iscurrent = False iscurrent = False
except: except:
bb.msg.debug(2, bb.msg.domain.RunQueue, "Exception reading %s for %s" % (stampfile2 , stampfile)) bb.msg.debug(2, bb.msg.domain.RunQueue, "Exception reading %s for %s" % (stampfile2, stampfile))
iscurrent = False iscurrent = False
return iscurrent return iscurrent
@ -948,7 +947,7 @@ class RunQueue:
try: try:
pipein, pipeout = os.pipe() pipein, pipeout = os.pipe()
pid = os.fork() pid = os.fork()
except OSError, e: except OSError as e:
bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror)) bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror))
if pid == 0: if pid == 0:
os.close(pipein) os.close(pipein)

View File

@ -115,7 +115,7 @@ class BitBakeServer():
def register_idle_function(self, function, data): def register_idle_function(self, function, data):
"""Register a function to be called while the server is idle""" """Register a function to be called while the server is idle"""
assert callable(function) assert hasattr(function, '__call__')
self._idlefuns[function] = data self._idlefuns[function] = data
def idle_commands(self, delay): def idle_commands(self, delay):

View File

@ -112,7 +112,7 @@ class BitBakeServer(SimpleXMLRPCServer):
def register_idle_function(self, function, data): def register_idle_function(self, function, data):
"""Register a function to be called while the server is idle""" """Register a function to be called while the server is idle"""
assert callable(function) assert hasattr(function, '__call__')
self._idlefuns[function] = data self._idlefuns[function] = data
def serve_forever(self): def serve_forever(self):

View File

@ -34,7 +34,7 @@ def re_match_strings(target, strings):
for name in strings: for name in strings:
if (name==target or if (name==target or
re.search(name,target)!=None): re.search(name, target)!=None):
return True return True
return False return False
@ -539,7 +539,7 @@ class TaskData:
Resolve all unresolved build and runtime targets Resolve all unresolved build and runtime targets
""" """
bb.msg.note(1, bb.msg.domain.TaskData, "Resolving any missing task queue dependencies") bb.msg.note(1, bb.msg.domain.TaskData, "Resolving any missing task queue dependencies")
while 1: while True:
added = 0 added = 0
for target in self.get_unresolved_build_targets(dataCache): for target in self.get_unresolved_build_targets(dataCache):
try: try:

View File

@ -157,7 +157,7 @@ class BuildResult(gobject.GObject):
# format build-<year><month><day>-<ordinal> we can easily # format build-<year><month><day>-<ordinal> we can easily
# pull it out. # pull it out.
# TODO: Better to stat a file? # TODO: Better to stat a file?
(_ , date, revision) = identifier.split ("-") (_, date, revision) = identifier.split ("-")
print(date) print(date)
year = int (date[0:4]) year = int (date[0:4])
@ -385,7 +385,7 @@ class BuildManager (gobject.GObject):
build_directory]) build_directory])
server.runCommand(["buildTargets", [conf.image], "rootfs"]) server.runCommand(["buildTargets", [conf.image], "rootfs"])
except Exception, e: except Exception as e:
print(e) print(e)
class BuildManagerTreeView (gtk.TreeView): class BuildManagerTreeView (gtk.TreeView):

View File

@ -63,7 +63,7 @@ class RunningBuild (gobject.GObject):
# for the message. # for the message.
if hasattr(event, 'pid'): if hasattr(event, 'pid'):
pid = event.pid pid = event.pid
if self.pids_to_task.has_key(pid): if pid in self.pids_to_task:
(package, task) = self.pids_to_task[pid] (package, task) = self.pids_to_task[pid]
parent = self.tasks_to_iter[(package, task)] parent = self.tasks_to_iter[(package, task)]
@ -93,12 +93,12 @@ class RunningBuild (gobject.GObject):
(package, task) = (event._package, event._task) (package, task) = (event._package, event._task)
# Save out this PID. # Save out this PID.
self.pids_to_task[pid] = (package,task) self.pids_to_task[pid] = (package, task)
# Check if we already have this package in our model. If so then # Check if we already have this package in our model. If so then
# that can be the parent for the task. Otherwise we create a new # that can be the parent for the task. Otherwise we create a new
# top level for the package. # top level for the package.
if (self.tasks_to_iter.has_key ((package, None))): if ((package, None) in self.tasks_to_iter):
parent = self.tasks_to_iter[(package, None)] parent = self.tasks_to_iter[(package, None)]
else: else:
parent = self.model.append (None, (None, parent = self.model.append (None, (None,

View File

@ -207,7 +207,7 @@ def init(server, eventHandler):
if ret != True: if ret != True:
print("Couldn't run command! %s" % ret) print("Couldn't run command! %s" % ret)
return return
except xmlrpclib.Fault, x: except xmlrpclib.Fault as x:
print("XMLRPC Fault getting commandline:\n %s" % x) print("XMLRPC Fault getting commandline:\n %s" % x)
return return

View File

@ -62,7 +62,7 @@ def init (server, eventHandler):
if ret != True: if ret != True:
print("Couldn't get default commandline! %s" % ret) print("Couldn't get default commandline! %s" % ret)
return 1 return 1
except xmlrpclib.Fault, x: except xmlrpclib.Fault as x:
print("XMLRPC Fault getting commandline:\n %s" % x) print("XMLRPC Fault getting commandline:\n %s" % x)
return 1 return 1

View File

@ -46,7 +46,7 @@ def init(server, eventHandler):
if ret != True: if ret != True:
print("Couldn't get default commandline! %s" % ret) print("Couldn't get default commandline! %s" % ret)
return 1 return 1
except xmlrpclib.Fault, x: except xmlrpclib.Fault as x:
print("XMLRPC Fault getting commandline:\n %s" % x) print("XMLRPC Fault getting commandline:\n %s" % x)
return 1 return 1

View File

@ -234,7 +234,7 @@ class NCursesUI:
if ret != True: if ret != True:
print("Couldn't get default commandlind! %s" % ret) print("Couldn't get default commandlind! %s" % ret)
return return
except xmlrpclib.Fault, x: except xmlrpclib.Fault as x:
print("XMLRPC Fault getting commandline:\n %s" % x) print("XMLRPC Fault getting commandline:\n %s" % x)
return return

View File

@ -104,10 +104,10 @@ class MetaDataLoader(gobject.GObject):
gobject.idle_add (MetaDataLoader.emit_success_signal, gobject.idle_add (MetaDataLoader.emit_success_signal,
self.loader) self.loader)
except MetaDataLoader.LoaderThread.LoaderImportException, e: except MetaDataLoader.LoaderThread.LoaderImportException as e:
gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader, gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader,
"Repository metadata corrupt") "Repository metadata corrupt")
except Exception, e: except Exception as e:
gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader, gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader,
"Unable to download repository metadata") "Unable to download repository metadata")
print(e) print(e)
@ -211,7 +211,7 @@ class BuildSetupDialog (gtk.Dialog):
# Build # Build
button = gtk.Button ("_Build", None, True) button = gtk.Button ("_Build", None, True)
image = gtk.Image () image = gtk.Image ()
image.set_from_stock (gtk.STOCK_EXECUTE,gtk.ICON_SIZE_BUTTON) image.set_from_stock (gtk.STOCK_EXECUTE, gtk.ICON_SIZE_BUTTON)
button.set_image (image) button.set_image (image)
self.add_action_widget (button, BuildSetupDialog.RESPONSE_BUILD) self.add_action_widget (button, BuildSetupDialog.RESPONSE_BUILD)
button.show_all () button.show_all ()

View File

@ -110,7 +110,7 @@ class UIXMLRPCServer (SimpleXMLRPCServer):
return (sock, addr) return (sock, addr)
except socket.timeout: except socket.timeout:
pass pass
return (None,None) return (None, None)
def close_request(self, request): def close_request(self, request):
if request is None: if request is None:

View File

@ -72,9 +72,9 @@ def vercmp_part(a, b):
if ca == None and cb == None: if ca == None and cb == None:
return 0 return 0
if type(ca) is types.StringType: if isinstance(ca, types.StringType):
sa = ca in separators sa = ca in separators
if type(cb) is types.StringType: if isinstance(cb, types.StringType):
sb = cb in separators sb = cb in separators
if sa and not sb: if sa and not sb:
return -1 return -1
@ -306,7 +306,7 @@ def better_compile(text, file, realfile, mode = "exec"):
""" """
try: try:
return compile(text, file, mode) return compile(text, file, mode)
except Exception, e: except Exception as e:
# split the text into lines again # split the text into lines again
body = text.split('\n') body = text.split('\n')
bb.msg.error(bb.msg.domain.Util, "Error in compiling python function in: ", realfile) bb.msg.error(bb.msg.domain.Util, "Error in compiling python function in: ", realfile)
@ -385,7 +385,7 @@ def lockfile(name):
return lf return lf
# File no longer exists or changed, retry # File no longer exists or changed, retry
lf.close lf.close
except Exception, e: except Exception as e:
continue continue
def unlockfile(lf): def unlockfile(lf):
@ -546,7 +546,7 @@ def mkdirhier(dir):
try: try:
os.makedirs(dir) os.makedirs(dir)
bb.msg.debug(2, bb.msg.domain.Util, "created " + dir) bb.msg.debug(2, bb.msg.domain.Util, "created " + dir)
except OSError, e: except OSError as e:
if e.errno != errno.EEXIST: if e.errno != errno.EEXIST:
raise e raise e
@ -561,7 +561,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
try: try:
if not sstat: if not sstat:
sstat = os.lstat(src) sstat = os.lstat(src)
except Exception, e: except Exception as e:
print("movefile: Stating source file failed...", e) print("movefile: Stating source file failed...", e)
return None return None
@ -577,7 +577,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
try: try:
os.unlink(dest) os.unlink(dest)
destexists = 0 destexists = 0
except Exception, e: except Exception as e:
pass pass
if stat.S_ISLNK(sstat[stat.ST_MODE]): if stat.S_ISLNK(sstat[stat.ST_MODE]):
@ -589,7 +589,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
#os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
os.unlink(src) os.unlink(src)
return os.lstat(dest) return os.lstat(dest)
except Exception, e: except Exception as e:
print("movefile: failed to properly create symlink:", dest, "->", target, e) print("movefile: failed to properly create symlink:", dest, "->", target, e)
return None return None
@ -598,7 +598,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
try: try:
os.rename(src, dest) os.rename(src, dest)
renamefailed = 0 renamefailed = 0
except Exception, e: except Exception as e:
if e[0] != errno.EXDEV: if e[0] != errno.EXDEV:
# Some random error. # Some random error.
print("movefile: Failed to move", src, "to", dest, e) print("movefile: Failed to move", src, "to", dest, e)
@ -612,7 +612,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
shutil.copyfile(src, dest + "#new") shutil.copyfile(src, dest + "#new")
os.rename(dest + "#new", dest) os.rename(dest + "#new", dest)
didcopy = 1 didcopy = 1
except Exception, e: except Exception as e:
print('movefile: copy', src, '->', dest, 'failed.', e) print('movefile: copy', src, '->', dest, 'failed.', e)
return None return None
else: else:
@ -626,7 +626,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
os.unlink(src) os.unlink(src)
except Exception, e: except Exception as e:
print("movefile: Failed to chown/chmod/unlink", dest, e) print("movefile: Failed to chown/chmod/unlink", dest, e)
return None return None
@ -647,7 +647,7 @@ def copyfile(src, dest, newmtime = None, sstat = None):
try: try:
if not sstat: if not sstat:
sstat = os.lstat(src) sstat = os.lstat(src)
except Exception, e: except Exception as e:
print("copyfile: Stating source file failed...", e) print("copyfile: Stating source file failed...", e)
return False return False
@ -663,7 +663,7 @@ def copyfile(src, dest, newmtime = None, sstat = None):
try: try:
os.unlink(dest) os.unlink(dest)
destexists = 0 destexists = 0
except Exception, e: except Exception as e:
pass pass
if stat.S_ISLNK(sstat[stat.ST_MODE]): if stat.S_ISLNK(sstat[stat.ST_MODE]):
@ -674,7 +674,7 @@ def copyfile(src, dest, newmtime = None, sstat = None):
os.symlink(target, dest) os.symlink(target, dest)
#os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
return os.lstat(dest) return os.lstat(dest)
except Exception, e: except Exception as e:
print("copyfile: failed to properly create symlink:", dest, "->", target, e) print("copyfile: failed to properly create symlink:", dest, "->", target, e)
return False return False
@ -682,7 +682,7 @@ def copyfile(src, dest, newmtime = None, sstat = None):
try: # For safety copy then move it over. try: # For safety copy then move it over.
shutil.copyfile(src, dest + "#new") shutil.copyfile(src, dest + "#new")
os.rename(dest + "#new", dest) os.rename(dest + "#new", dest)
except Exception, e: except Exception as e:
print('copyfile: copy', src, '->', dest, 'failed.', e) print('copyfile: copy', src, '->', dest, 'failed.', e)
return False return False
else: else:
@ -694,7 +694,7 @@ def copyfile(src, dest, newmtime = None, sstat = None):
try: try:
os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
except Exception, e: except Exception as e:
print("copyfile: Failed to chown/chmod/unlink", dest, e) print("copyfile: Failed to chown/chmod/unlink", dest, e)
return False return False