From 1180bab54e2879401f3586c91a48174191a1ee8b Mon Sep 17 00:00:00 2001 From: Chris Larson Date: Sun, 11 Apr 2010 17:03:55 -0700 Subject: [PATCH] Apply some 2to3 transforms that don't cause issues in 2.6 (Bitbake rev: d39ab776e7ceaefc8361150151cf0892dcb70d9c) Signed-off-by: Chris Larson Signed-off-by: Richard Purdie --- bitbake/lib/bb/build.py | 4 +- bitbake/lib/bb/cache.py | 8 +- bitbake/lib/bb/daemonize.py | 380 +++++++++---------- bitbake/lib/bb/data.py | 2 +- bitbake/lib/bb/data_smart.py | 16 +- bitbake/lib/bb/fetch/cvs.py | 4 +- bitbake/lib/bb/fetch/perforce.py | 26 +- bitbake/lib/bb/fetch/wget.py | 2 +- bitbake/lib/bb/parse/__init__.py | 4 +- bitbake/lib/bb/parse/ast.py | 2 +- bitbake/lib/bb/parse/parse_py/BBHandler.py | 6 +- bitbake/lib/bb/parse/parse_py/ConfHandler.py | 2 +- bitbake/lib/bb/persist_data.py | 4 +- bitbake/lib/bb/runqueue.py | 11 +- bitbake/lib/bb/server/none.py | 2 +- bitbake/lib/bb/server/xmlrpc.py | 2 +- bitbake/lib/bb/taskdata.py | 4 +- bitbake/lib/bb/ui/crumbs/buildmanager.py | 4 +- bitbake/lib/bb/ui/crumbs/runningbuild.py | 6 +- bitbake/lib/bb/ui/depexp.py | 2 +- bitbake/lib/bb/ui/goggle.py | 2 +- bitbake/lib/bb/ui/knotty.py | 2 +- bitbake/lib/bb/ui/ncurses.py | 2 +- bitbake/lib/bb/ui/puccho.py | 6 +- bitbake/lib/bb/ui/uievent.py | 2 +- bitbake/lib/bb/utils.py | 32 +- 26 files changed, 268 insertions(+), 269 deletions(-) diff --git a/bitbake/lib/bb/build.py b/bitbake/lib/bb/build.py index 3d71013998..4f14e63ac7 100644 --- a/bitbake/lib/bb/build.py +++ b/bitbake/lib/bb/build.py @@ -140,7 +140,7 @@ def exec_func(func, d, dirs = None): so = os.popen("tee \"%s\"" % logfile, "w") else: so = file(logfile, 'w') - except OSError, e: + except OSError as e: bb.msg.error(bb.msg.domain.Build, "opening log file: %s" % e) pass @@ -285,7 +285,7 @@ def exec_task(task, d): event.fire(TaskStarted(task, localdata), localdata) exec_func(task, localdata) event.fire(TaskSucceeded(task, localdata), localdata) - except FuncFailed, message: + except FuncFailed as message: # Try to extract the optional logfile try: (msg, logfile) = message diff --git a/bitbake/lib/bb/cache.py b/bitbake/lib/bb/cache.py index 0d165aec2f..6e124b2e83 100644 --- a/bitbake/lib/bb/cache.py +++ b/bitbake/lib/bb/cache.py @@ -61,7 +61,7 @@ class Cache: return self.has_cache = True - self.cachefile = os.path.join(self.cachedir,"bb_cache.dat") + self.cachefile = os.path.join(self.cachedir, "bb_cache.dat") bb.msg.debug(1, bb.msg.domain.Cache, "Using cache in '%s'" % self.cachedir) try: @@ -82,9 +82,9 @@ class Cache: p = pickle.Unpickler(file(self.cachefile, "rb")) self.depends_cache, version_data = p.load() if version_data['CACHE_VER'] != __cache_version__: - raise ValueError, 'Cache Version Mismatch' + raise ValueError('Cache Version Mismatch') if version_data['BITBAKE_VER'] != bb.__version__: - raise ValueError, 'Bitbake Version Mismatch' + raise ValueError('Bitbake Version Mismatch') except EOFError: bb.msg.note(1, bb.msg.domain.Cache, "Truncated cache found, rebuilding...") self.depends_cache = {} @@ -446,7 +446,7 @@ class Cache: self.getVar('__BB_DONT_CACHE', file_name, True) self.getVar('__VARIANTS', file_name, True) - def load_bbfile( self, bbfile , config): + def load_bbfile( self, bbfile, config): """ Load and parse one .bb build file Return the data and whether parsing resulted in the file being skipped diff --git a/bitbake/lib/bb/daemonize.py b/bitbake/lib/bb/daemonize.py index a944af2238..f0714b3af6 100644 --- a/bitbake/lib/bb/daemonize.py +++ b/bitbake/lib/bb/daemonize.py @@ -1,190 +1,190 @@ -""" -Python Deamonizing helper - -Configurable daemon behaviors: - - 1.) The current working directory set to the "/" directory. - 2.) The current file creation mode mask set to 0. - 3.) Close all open files (1024). - 4.) Redirect standard I/O streams to "/dev/null". - -A failed call to fork() now raises an exception. - -References: - 1) Advanced Programming in the Unix Environment: W. Richard Stevens - 2) Unix Programming Frequently Asked Questions: - http://www.erlenstar.demon.co.uk/unix/faq_toc.html - -Modified to allow a function to be daemonized and return for -bitbake use by Richard Purdie -""" - -__author__ = "Chad J. Schroeder" -__copyright__ = "Copyright (C) 2005 Chad J. Schroeder" -__version__ = "0.2" - -# Standard Python modules. -import os # Miscellaneous OS interfaces. -import sys # System-specific parameters and functions. - -# Default daemon parameters. -# File mode creation mask of the daemon. -# For BitBake's children, we do want to inherit the parent umask. -UMASK = None - -# Default maximum for the number of available file descriptors. -MAXFD = 1024 - -# The standard I/O file descriptors are redirected to /dev/null by default. -if (hasattr(os, "devnull")): - REDIRECT_TO = os.devnull -else: - REDIRECT_TO = "/dev/null" - -def createDaemon(function, logfile): - """ - Detach a process from the controlling terminal and run it in the - background as a daemon, returning control to the caller. - """ - - try: - # Fork a child process so the parent can exit. This returns control to - # the command-line or shell. It also guarantees that the child will not - # be a process group leader, since the child receives a new process ID - # and inherits the parent's process group ID. This step is required - # to insure that the next call to os.setsid is successful. - pid = os.fork() - except OSError, e: - raise Exception, "%s [%d]" % (e.strerror, e.errno) - - if (pid == 0): # The first child. - # To become the session leader of this new session and the process group - # leader of the new process group, we call os.setsid(). The process is - # also guaranteed not to have a controlling terminal. - os.setsid() - - # Is ignoring SIGHUP necessary? - # - # It's often suggested that the SIGHUP signal should be ignored before - # the second fork to avoid premature termination of the process. The - # reason is that when the first child terminates, all processes, e.g. - # the second child, in the orphaned group will be sent a SIGHUP. - # - # "However, as part of the session management system, there are exactly - # two cases where SIGHUP is sent on the death of a process: - # - # 1) When the process that dies is the session leader of a session that - # is attached to a terminal device, SIGHUP is sent to all processes - # in the foreground process group of that terminal device. - # 2) When the death of a process causes a process group to become - # orphaned, and one or more processes in the orphaned group are - # stopped, then SIGHUP and SIGCONT are sent to all members of the - # orphaned group." [2] - # - # The first case can be ignored since the child is guaranteed not to have - # a controlling terminal. The second case isn't so easy to dismiss. - # The process group is orphaned when the first child terminates and - # POSIX.1 requires that every STOPPED process in an orphaned process - # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the - # second child is not STOPPED though, we can safely forego ignoring the - # SIGHUP signal. In any case, there are no ill-effects if it is ignored. - # - # import signal # Set handlers for asynchronous events. - # signal.signal(signal.SIGHUP, signal.SIG_IGN) - - try: - # Fork a second child and exit immediately to prevent zombies. This - # causes the second child process to be orphaned, making the init - # process responsible for its cleanup. And, since the first child is - # a session leader without a controlling terminal, it's possible for - # it to acquire one by opening a terminal in the future (System V- - # based systems). This second fork guarantees that the child is no - # longer a session leader, preventing the daemon from ever acquiring - # a controlling terminal. - pid = os.fork() # Fork a second child. - except OSError, e: - raise Exception, "%s [%d]" % (e.strerror, e.errno) - - if (pid == 0): # The second child. - # We probably don't want the file mode creation mask inherited from - # the parent, so we give the child complete control over permissions. - if UMASK is not None: - os.umask(UMASK) - else: - # Parent (the first child) of the second child. - os._exit(0) - else: - # exit() or _exit()? - # _exit is like exit(), but it doesn't call any functions registered - # with atexit (and on_exit) or any registered signal handlers. It also - # closes any open file descriptors. Using exit() may cause all stdio - # streams to be flushed twice and any temporary files may be unexpectedly - # removed. It's therefore recommended that child branches of a fork() - # and the parent branch(es) of a daemon use _exit(). - return - - # Close all open file descriptors. This prevents the child from keeping - # open any file descriptors inherited from the parent. There is a variety - # of methods to accomplish this task. Three are listed below. - # - # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum - # number of open file descriptors to close. If it doesn't exists, use - # the default value (configurable). - # - # try: - # maxfd = os.sysconf("SC_OPEN_MAX") - # except (AttributeError, ValueError): - # maxfd = MAXFD - # - # OR - # - # if (os.sysconf_names.has_key("SC_OPEN_MAX")): - # maxfd = os.sysconf("SC_OPEN_MAX") - # else: - # maxfd = MAXFD - # - # OR - # - # Use the getrlimit method to retrieve the maximum file descriptor number - # that can be opened by this process. If there is not limit on the - # resource, use the default value. - # - import resource # Resource usage information. - maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] - if (maxfd == resource.RLIM_INFINITY): - maxfd = MAXFD - - # Iterate through and close all file descriptors. -# for fd in range(0, maxfd): -# try: -# os.close(fd) -# except OSError: # ERROR, fd wasn't open to begin with (ignored) -# pass - - # Redirect the standard I/O file descriptors to the specified file. Since - # the daemon has no controlling terminal, most daemons redirect stdin, - # stdout, and stderr to /dev/null. This is done to prevent side-effects - # from reads and writes to the standard I/O file descriptors. - - # This call to open is guaranteed to return the lowest file descriptor, - # which will be 0 (stdin), since it was closed above. -# os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) - - # Duplicate standard input to standard output and standard error. -# os.dup2(0, 1) # standard output (1) -# os.dup2(0, 2) # standard error (2) - - - si = file('/dev/null', 'r') - so = file(logfile, 'w') - se = so - - - # Replace those fds with our own - os.dup2(si.fileno(), sys.stdin.fileno()) - os.dup2(so.fileno(), sys.stdout.fileno()) - os.dup2(se.fileno(), sys.stderr.fileno()) - - function() - - os._exit(0) +""" +Python Deamonizing helper + +Configurable daemon behaviors: + + 1.) The current working directory set to the "/" directory. + 2.) The current file creation mode mask set to 0. + 3.) Close all open files (1024). + 4.) Redirect standard I/O streams to "/dev/null". + +A failed call to fork() now raises an exception. + +References: + 1) Advanced Programming in the Unix Environment: W. Richard Stevens + 2) Unix Programming Frequently Asked Questions: + http://www.erlenstar.demon.co.uk/unix/faq_toc.html + +Modified to allow a function to be daemonized and return for +bitbake use by Richard Purdie +""" + +__author__ = "Chad J. Schroeder" +__copyright__ = "Copyright (C) 2005 Chad J. Schroeder" +__version__ = "0.2" + +# Standard Python modules. +import os # Miscellaneous OS interfaces. +import sys # System-specific parameters and functions. + +# Default daemon parameters. +# File mode creation mask of the daemon. +# For BitBake's children, we do want to inherit the parent umask. +UMASK = None + +# Default maximum for the number of available file descriptors. +MAXFD = 1024 + +# The standard I/O file descriptors are redirected to /dev/null by default. +if (hasattr(os, "devnull")): + REDIRECT_TO = os.devnull +else: + REDIRECT_TO = "/dev/null" + +def createDaemon(function, logfile): + """ + Detach a process from the controlling terminal and run it in the + background as a daemon, returning control to the caller. + """ + + try: + # Fork a child process so the parent can exit. This returns control to + # the command-line or shell. It also guarantees that the child will not + # be a process group leader, since the child receives a new process ID + # and inherits the parent's process group ID. This step is required + # to insure that the next call to os.setsid is successful. + pid = os.fork() + except OSError as e: + raise Exception("%s [%d]" % (e.strerror, e.errno)) + + if (pid == 0): # The first child. + # To become the session leader of this new session and the process group + # leader of the new process group, we call os.setsid(). The process is + # also guaranteed not to have a controlling terminal. + os.setsid() + + # Is ignoring SIGHUP necessary? + # + # It's often suggested that the SIGHUP signal should be ignored before + # the second fork to avoid premature termination of the process. The + # reason is that when the first child terminates, all processes, e.g. + # the second child, in the orphaned group will be sent a SIGHUP. + # + # "However, as part of the session management system, there are exactly + # two cases where SIGHUP is sent on the death of a process: + # + # 1) When the process that dies is the session leader of a session that + # is attached to a terminal device, SIGHUP is sent to all processes + # in the foreground process group of that terminal device. + # 2) When the death of a process causes a process group to become + # orphaned, and one or more processes in the orphaned group are + # stopped, then SIGHUP and SIGCONT are sent to all members of the + # orphaned group." [2] + # + # The first case can be ignored since the child is guaranteed not to have + # a controlling terminal. The second case isn't so easy to dismiss. + # The process group is orphaned when the first child terminates and + # POSIX.1 requires that every STOPPED process in an orphaned process + # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the + # second child is not STOPPED though, we can safely forego ignoring the + # SIGHUP signal. In any case, there are no ill-effects if it is ignored. + # + # import signal # Set handlers for asynchronous events. + # signal.signal(signal.SIGHUP, signal.SIG_IGN) + + try: + # Fork a second child and exit immediately to prevent zombies. This + # causes the second child process to be orphaned, making the init + # process responsible for its cleanup. And, since the first child is + # a session leader without a controlling terminal, it's possible for + # it to acquire one by opening a terminal in the future (System V- + # based systems). This second fork guarantees that the child is no + # longer a session leader, preventing the daemon from ever acquiring + # a controlling terminal. + pid = os.fork() # Fork a second child. + except OSError as e: + raise Exception("%s [%d]" % (e.strerror, e.errno)) + + if (pid == 0): # The second child. + # We probably don't want the file mode creation mask inherited from + # the parent, so we give the child complete control over permissions. + if UMASK is not None: + os.umask(UMASK) + else: + # Parent (the first child) of the second child. + os._exit(0) + else: + # exit() or _exit()? + # _exit is like exit(), but it doesn't call any functions registered + # with atexit (and on_exit) or any registered signal handlers. It also + # closes any open file descriptors. Using exit() may cause all stdio + # streams to be flushed twice and any temporary files may be unexpectedly + # removed. It's therefore recommended that child branches of a fork() + # and the parent branch(es) of a daemon use _exit(). + return + + # Close all open file descriptors. This prevents the child from keeping + # open any file descriptors inherited from the parent. There is a variety + # of methods to accomplish this task. Three are listed below. + # + # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum + # number of open file descriptors to close. If it doesn't exists, use + # the default value (configurable). + # + # try: + # maxfd = os.sysconf("SC_OPEN_MAX") + # except (AttributeError, ValueError): + # maxfd = MAXFD + # + # OR + # + # if (os.sysconf_names.has_key("SC_OPEN_MAX")): + # maxfd = os.sysconf("SC_OPEN_MAX") + # else: + # maxfd = MAXFD + # + # OR + # + # Use the getrlimit method to retrieve the maximum file descriptor number + # that can be opened by this process. If there is not limit on the + # resource, use the default value. + # + import resource # Resource usage information. + maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] + if (maxfd == resource.RLIM_INFINITY): + maxfd = MAXFD + + # Iterate through and close all file descriptors. +# for fd in range(0, maxfd): +# try: +# os.close(fd) +# except OSError: # ERROR, fd wasn't open to begin with (ignored) +# pass + + # Redirect the standard I/O file descriptors to the specified file. Since + # the daemon has no controlling terminal, most daemons redirect stdin, + # stdout, and stderr to /dev/null. This is done to prevent side-effects + # from reads and writes to the standard I/O file descriptors. + + # This call to open is guaranteed to return the lowest file descriptor, + # which will be 0 (stdin), since it was closed above. +# os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) + + # Duplicate standard input to standard output and standard error. +# os.dup2(0, 1) # standard output (1) +# os.dup2(0, 2) # standard error (2) + + + si = file('/dev/null', 'r') + so = file(logfile, 'w') + se = so + + + # Replace those fds with our own + os.dup2(si.fileno(), sys.stdin.fileno()) + os.dup2(so.fileno(), sys.stdout.fileno()) + os.dup2(se.fileno(), sys.stderr.fileno()) + + function() + + os._exit(0) diff --git a/bitbake/lib/bb/data.py b/bitbake/lib/bb/data.py index 85de6bfeb3..e401c53429 100644 --- a/bitbake/lib/bb/data.py +++ b/bitbake/lib/bb/data.py @@ -193,7 +193,7 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False): if all: o.write('# %s=%s\n' % (var, oval)) - if type(val) is not types.StringType: + if not isinstance(val, types.StringType): return 0 if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all: diff --git a/bitbake/lib/bb/data_smart.py b/bitbake/lib/bb/data_smart.py index 2edeec064e..1704ed631c 100644 --- a/bitbake/lib/bb/data_smart.py +++ b/bitbake/lib/bb/data_smart.py @@ -66,10 +66,10 @@ class DataSmart: code = match.group()[3:-1] codeobj = compile(code.strip(), varname or "", "eval") s = utils.better_eval(codeobj, {"d": self}) - if type(s) == types.IntType: s = str(s) + if isinstance(s, types.IntType): s = str(s) return s - if type(s) is not types.StringType: # sanity check + if not isinstance(s, types.StringType): # sanity check return s if varname and varname in self.expand_cache: @@ -81,7 +81,7 @@ class DataSmart: s = __expand_var_regexp__.sub(var_sub, s) s = __expand_python_regexp__.sub(python_sub, s) if s == olds: break - if type(s) is not types.StringType: # sanity check + if not isinstance(s, types.StringType): # sanity check bb.msg.error(bb.msg.domain.Data, 'expansion of %s returned non-string %s' % (olds, s)) except KeyboardInterrupt: raise @@ -118,7 +118,7 @@ class DataSmart: l = len(o)+1 # see if one should even try - if not self._seen_overrides.has_key(o): + if o not in self._seen_overrides: continue vars = self._seen_overrides[o] @@ -130,7 +130,7 @@ class DataSmart: bb.msg.note(1, bb.msg.domain.Data, "Untracked delVar") # now on to the appends and prepends - if self._special_values.has_key("_append"): + if "_append" in self._special_values: appends = self._special_values['_append'] or [] for append in appends: for (a, o) in self.getVarFlag(append, '_append') or []: @@ -145,7 +145,7 @@ class DataSmart: self.setVar(append, sval) - if self._special_values.has_key("_prepend"): + if "_prepend" in self._special_values: prepends = self._special_values['_prepend'] or [] for prepend in prepends: @@ -215,7 +215,7 @@ class DataSmart: # more cookies for the cookie monster if '_' in var: override = var[var.rfind('_')+1:] - if not self._seen_overrides.has_key(override): + if override not in self._seen_overrides: self._seen_overrides[override] = set() self._seen_overrides[override].add( var ) @@ -246,7 +246,7 @@ class DataSmart: dest.extend(src) self.setVarFlag(newkey, i, dest) - if self._special_values.has_key(i) and key in self._special_values[i]: + if i in self._special_values and key in self._special_values[i]: self._special_values[i].remove(key) self._special_values[i].add(newkey) diff --git a/bitbake/lib/bb/fetch/cvs.py b/bitbake/lib/bb/fetch/cvs.py index c0d43618f9..61976f7ef4 100644 --- a/bitbake/lib/bb/fetch/cvs.py +++ b/bitbake/lib/bb/fetch/cvs.py @@ -139,8 +139,8 @@ class Cvs(Fetch): bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory") pkg = data.expand('${PN}', d) pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg) - moddir = os.path.join(pkgdir,localdir) - if os.access(os.path.join(moddir,'CVS'), os.R_OK): + moddir = os.path.join(pkgdir, localdir) + if os.access(os.path.join(moddir, 'CVS'), os.R_OK): bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) # update sources there os.chdir(moddir) diff --git a/bitbake/lib/bb/fetch/perforce.py b/bitbake/lib/bb/fetch/perforce.py index 67de6f59fa..5b6c601876 100644 --- a/bitbake/lib/bb/fetch/perforce.py +++ b/bitbake/lib/bb/fetch/perforce.py @@ -35,15 +35,15 @@ class Perforce(Fetch): def supports(self, url, ud, d): return ud.type in ['p4'] - def doparse(url,d): + def doparse(url, d): parm = {} path = url.split("://")[1] delim = path.find("@"); if delim != -1: - (user,pswd,host,port) = path.split('@')[0].split(":") + (user, pswd, host, port) = path.split('@')[0].split(":") path = path.split('@')[1] else: - (host,port) = data.getVar('P4PORT', d).split(':') + (host, port) = data.getVar('P4PORT', d).split(':') user = "" pswd = "" @@ -53,19 +53,19 @@ class Perforce(Fetch): plist = path.split(';') for item in plist: if item.count('='): - (key,value) = item.split('=') + (key, value) = item.split('=') keys.append(key) values.append(value) - parm = dict(zip(keys,values)) + parm = dict(zip(keys, values)) path = "//" + path.split(';')[0] host += ":%s" % (port) parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm) - return host,path,user,pswd,parm + return host, path, user, pswd, parm doparse = staticmethod(doparse) - def getcset(d, depot,host,user,pswd,parm): + def getcset(d, depot, host, user, pswd, parm): p4opt = "" if "cset" in parm: return parm["cset"]; @@ -97,7 +97,7 @@ class Perforce(Fetch): def localpath(self, url, ud, d): - (host,path,user,pswd,parm) = Perforce.doparse(url,d) + (host, path, user, pswd, parm) = Perforce.doparse(url, d) # If a label is specified, we use that as our filename @@ -115,7 +115,7 @@ class Perforce(Fetch): cset = Perforce.getcset(d, path, host, user, pswd, parm) - ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host,base.replace('/', '.'), cset), d) + ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host, base.replace('/', '.'), cset), d) return os.path.join(data.getVar("DL_DIR", d, 1), ud.localfile) @@ -124,7 +124,7 @@ class Perforce(Fetch): Fetch urls """ - (host,depot,user,pswd,parm) = Perforce.doparse(loc, d) + (host, depot, user, pswd, parm) = Perforce.doparse(loc, d) if depot.find('/...') != -1: path = depot[:depot.find('/...')] @@ -164,10 +164,10 @@ class Perforce(Fetch): raise FetchError(module) if "label" in parm: - depot = "%s@%s" % (depot,parm["label"]) + depot = "%s@%s" % (depot, parm["label"]) else: cset = Perforce.getcset(d, depot, host, user, pswd, parm) - depot = "%s@%s" % (depot,cset) + depot = "%s@%s" % (depot, cset) os.chdir(tmpfile) bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) @@ -189,7 +189,7 @@ class Perforce(Fetch): dest = list[0][len(path)+1:] where = dest.find("#") - os.system("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module,dest[:where],list[0])) + os.system("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module, dest[:where], list[0])) count = count + 1 if count == 0: diff --git a/bitbake/lib/bb/fetch/wget.py b/bitbake/lib/bb/fetch/wget.py index 8b687372a4..581362038a 100644 --- a/bitbake/lib/bb/fetch/wget.py +++ b/bitbake/lib/bb/fetch/wget.py @@ -38,7 +38,7 @@ class Wget(Fetch): """ Check to see if a given url can be fetched with wget. """ - return ud.type in ['http','https','ftp'] + return ud.type in ['http', 'https', 'ftp'] def localpath(self, url, ud, d): diff --git a/bitbake/lib/bb/parse/__init__.py b/bitbake/lib/bb/parse/__init__.py index adc1408b9e..4b957884cc 100644 --- a/bitbake/lib/bb/parse/__init__.py +++ b/bitbake/lib/bb/parse/__init__.py @@ -37,12 +37,12 @@ class SkipPackage(Exception): __mtime_cache = {} def cached_mtime(f): - if not __mtime_cache.has_key(f): + if f not in __mtime_cache: __mtime_cache[f] = os.stat(f)[8] return __mtime_cache[f] def cached_mtime_noerror(f): - if not __mtime_cache.has_key(f): + if f not in __mtime_cache: try: __mtime_cache[f] = os.stat(f)[8] except OSError: diff --git a/bitbake/lib/bb/parse/ast.py b/bitbake/lib/bb/parse/ast.py index a586c5cde1..e7d389e7a5 100644 --- a/bitbake/lib/bb/parse/ast.py +++ b/bitbake/lib/bb/parse/ast.py @@ -311,7 +311,7 @@ def finalize(fn, d): all_handlers = {} for var in bb.data.getVar('__BBHANDLERS', d) or []: # try to add the handler - handler = bb.data.getVar(var,d) + handler = bb.data.getVar(var, d) bb.event.register(var, handler) tasklist = bb.data.getVar('__BBTASKS', d) or [] diff --git a/bitbake/lib/bb/parse/parse_py/BBHandler.py b/bitbake/lib/bb/parse/parse_py/BBHandler.py index a770131fbc..a388773bb7 100644 --- a/bitbake/lib/bb/parse/parse_py/BBHandler.py +++ b/bitbake/lib/bb/parse/parse_py/BBHandler.py @@ -90,7 +90,7 @@ def get_statements(filename, absolsute_filename, base_name): statements = ast.StatementGroup() lineno = 0 - while 1: + while True: lineno = lineno + 1 s = file.readline() if not s: break @@ -118,7 +118,7 @@ def handle(fn, d, include): bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data, include)") (root, ext) = os.path.splitext(os.path.basename(fn)) - base_name = "%s%s" % (root,ext) + base_name = "%s%s" % (root, ext) init(d) if ext == ".bbclass": @@ -164,7 +164,7 @@ def handle(fn, d, include): return d def feeder(lineno, s, fn, root, statements): - global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__,__infunc__, __body__, classes, bb, __residue__ + global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, classes, bb, __residue__ if __infunc__: if s == '}': __body__.append('') diff --git a/bitbake/lib/bb/parse/parse_py/ConfHandler.py b/bitbake/lib/bb/parse/parse_py/ConfHandler.py index 8e17182ba7..9188119e4d 100644 --- a/bitbake/lib/bb/parse/parse_py/ConfHandler.py +++ b/bitbake/lib/bb/parse/parse_py/ConfHandler.py @@ -89,7 +89,7 @@ def handle(fn, data, include): statements = ast.StatementGroup() lineno = 0 - while 1: + while True: lineno = lineno + 1 s = f.readline() if not s: break diff --git a/bitbake/lib/bb/persist_data.py b/bitbake/lib/bb/persist_data.py index a26244510a..80ddeb5560 100644 --- a/bitbake/lib/bb/persist_data.py +++ b/bitbake/lib/bb/persist_data.py @@ -52,7 +52,7 @@ class PersistData: except OSError: bb.utils.mkdirhier(self.cachedir) - self.cachefile = os.path.join(self.cachedir,"bb_persist_data.sqlite3") + self.cachefile = os.path.join(self.cachedir, "bb_persist_data.sqlite3") bb.msg.debug(1, bb.msg.domain.PersistData, "Using '%s' as the persistent data cache" % self.cachefile) self.connection = sqlite3.connect(self.cachefile, timeout=5, isolation_level=None) @@ -113,7 +113,7 @@ class PersistData: try: self.connection.execute(*query) return - except sqlite3.OperationalError, e: + except sqlite3.OperationalError as e: if 'database is locked' in str(e): continue raise diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index de1160eb87..6025142e08 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py @@ -109,8 +109,7 @@ class RunQueueSchedulerSpeed(RunQueueScheduler): self.rq = runqueue - sortweight = deepcopy(self.rq.runq_weight) - sortweight.sort() + sortweight = sorted(deepcopy(self.rq.runq_weight)) copyweight = deepcopy(self.rq.runq_weight) self.prio_map = [] @@ -307,7 +306,7 @@ class RunQueue: weight[listid] = 1 task_done[listid] = True - while 1: + while True: next_points = [] for listid in endpoints: for revdep in self.runq_depends[listid]: @@ -631,7 +630,7 @@ class RunQueue: for dep in revdeps: if dep in self.runq_depends[listid]: #self.dump_data(taskData) - bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid])) + bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep], taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid])) bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints)) @@ -814,7 +813,7 @@ class RunQueue: bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s < %s" % (stampfile, stampfile2)) iscurrent = False except: - bb.msg.debug(2, bb.msg.domain.RunQueue, "Exception reading %s for %s" % (stampfile2 , stampfile)) + bb.msg.debug(2, bb.msg.domain.RunQueue, "Exception reading %s for %s" % (stampfile2, stampfile)) iscurrent = False return iscurrent @@ -948,7 +947,7 @@ class RunQueue: try: pipein, pipeout = os.pipe() pid = os.fork() - except OSError, e: + except OSError as e: bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror)) if pid == 0: os.close(pipein) diff --git a/bitbake/lib/bb/server/none.py b/bitbake/lib/bb/server/none.py index d4b7fdeea6..e28aa8d7d7 100644 --- a/bitbake/lib/bb/server/none.py +++ b/bitbake/lib/bb/server/none.py @@ -115,7 +115,7 @@ class BitBakeServer(): def register_idle_function(self, function, data): """Register a function to be called while the server is idle""" - assert callable(function) + assert hasattr(function, '__call__') self._idlefuns[function] = data def idle_commands(self, delay): diff --git a/bitbake/lib/bb/server/xmlrpc.py b/bitbake/lib/bb/server/xmlrpc.py index 3844a1e33e..cb2949fb9f 100644 --- a/bitbake/lib/bb/server/xmlrpc.py +++ b/bitbake/lib/bb/server/xmlrpc.py @@ -112,7 +112,7 @@ class BitBakeServer(SimpleXMLRPCServer): def register_idle_function(self, function, data): """Register a function to be called while the server is idle""" - assert callable(function) + assert hasattr(function, '__call__') self._idlefuns[function] = data def serve_forever(self): diff --git a/bitbake/lib/bb/taskdata.py b/bitbake/lib/bb/taskdata.py index 58e0d9d8f2..d4fd1498b6 100644 --- a/bitbake/lib/bb/taskdata.py +++ b/bitbake/lib/bb/taskdata.py @@ -34,7 +34,7 @@ def re_match_strings(target, strings): for name in strings: if (name==target or - re.search(name,target)!=None): + re.search(name, target)!=None): return True return False @@ -539,7 +539,7 @@ class TaskData: Resolve all unresolved build and runtime targets """ bb.msg.note(1, bb.msg.domain.TaskData, "Resolving any missing task queue dependencies") - while 1: + while True: added = 0 for target in self.get_unresolved_build_targets(dataCache): try: diff --git a/bitbake/lib/bb/ui/crumbs/buildmanager.py b/bitbake/lib/bb/ui/crumbs/buildmanager.py index 37a62f189f..b5a4dae0de 100644 --- a/bitbake/lib/bb/ui/crumbs/buildmanager.py +++ b/bitbake/lib/bb/ui/crumbs/buildmanager.py @@ -157,7 +157,7 @@ class BuildResult(gobject.GObject): # format build-- we can easily # pull it out. # TODO: Better to stat a file? - (_ , date, revision) = identifier.split ("-") + (_, date, revision) = identifier.split ("-") print(date) year = int (date[0:4]) @@ -385,7 +385,7 @@ class BuildManager (gobject.GObject): build_directory]) server.runCommand(["buildTargets", [conf.image], "rootfs"]) - except Exception, e: + except Exception as e: print(e) class BuildManagerTreeView (gtk.TreeView): diff --git a/bitbake/lib/bb/ui/crumbs/runningbuild.py b/bitbake/lib/bb/ui/crumbs/runningbuild.py index 79e2c9060d..b4416ecbb3 100644 --- a/bitbake/lib/bb/ui/crumbs/runningbuild.py +++ b/bitbake/lib/bb/ui/crumbs/runningbuild.py @@ -63,7 +63,7 @@ class RunningBuild (gobject.GObject): # for the message. if hasattr(event, 'pid'): pid = event.pid - if self.pids_to_task.has_key(pid): + if pid in self.pids_to_task: (package, task) = self.pids_to_task[pid] parent = self.tasks_to_iter[(package, task)] @@ -93,12 +93,12 @@ class RunningBuild (gobject.GObject): (package, task) = (event._package, event._task) # Save out this PID. - self.pids_to_task[pid] = (package,task) + self.pids_to_task[pid] = (package, task) # Check if we already have this package in our model. If so then # that can be the parent for the task. Otherwise we create a new # top level for the package. - if (self.tasks_to_iter.has_key ((package, None))): + if ((package, None) in self.tasks_to_iter): parent = self.tasks_to_iter[(package, None)] else: parent = self.model.append (None, (None, diff --git a/bitbake/lib/bb/ui/depexp.py b/bitbake/lib/bb/ui/depexp.py index e386e34958..1cd58cac18 100644 --- a/bitbake/lib/bb/ui/depexp.py +++ b/bitbake/lib/bb/ui/depexp.py @@ -207,7 +207,7 @@ def init(server, eventHandler): if ret != True: print("Couldn't run command! %s" % ret) return - except xmlrpclib.Fault, x: + except xmlrpclib.Fault as x: print("XMLRPC Fault getting commandline:\n %s" % x) return diff --git a/bitbake/lib/bb/ui/goggle.py b/bitbake/lib/bb/ui/goggle.py index 7a3427f715..2cfa002f8a 100644 --- a/bitbake/lib/bb/ui/goggle.py +++ b/bitbake/lib/bb/ui/goggle.py @@ -62,7 +62,7 @@ def init (server, eventHandler): if ret != True: print("Couldn't get default commandline! %s" % ret) return 1 - except xmlrpclib.Fault, x: + except xmlrpclib.Fault as x: print("XMLRPC Fault getting commandline:\n %s" % x) return 1 diff --git a/bitbake/lib/bb/ui/knotty.py b/bitbake/lib/bb/ui/knotty.py index dba9530ef6..b6ca15b4fb 100644 --- a/bitbake/lib/bb/ui/knotty.py +++ b/bitbake/lib/bb/ui/knotty.py @@ -46,7 +46,7 @@ def init(server, eventHandler): if ret != True: print("Couldn't get default commandline! %s" % ret) return 1 - except xmlrpclib.Fault, x: + except xmlrpclib.Fault as x: print("XMLRPC Fault getting commandline:\n %s" % x) return 1 diff --git a/bitbake/lib/bb/ui/ncurses.py b/bitbake/lib/bb/ui/ncurses.py index 89e67900b2..e3bca2af83 100644 --- a/bitbake/lib/bb/ui/ncurses.py +++ b/bitbake/lib/bb/ui/ncurses.py @@ -234,7 +234,7 @@ class NCursesUI: if ret != True: print("Couldn't get default commandlind! %s" % ret) return - except xmlrpclib.Fault, x: + except xmlrpclib.Fault as x: print("XMLRPC Fault getting commandline:\n %s" % x) return diff --git a/bitbake/lib/bb/ui/puccho.py b/bitbake/lib/bb/ui/puccho.py index 7dffa5c3ba..2ac025303e 100644 --- a/bitbake/lib/bb/ui/puccho.py +++ b/bitbake/lib/bb/ui/puccho.py @@ -104,10 +104,10 @@ class MetaDataLoader(gobject.GObject): gobject.idle_add (MetaDataLoader.emit_success_signal, self.loader) - except MetaDataLoader.LoaderThread.LoaderImportException, e: + except MetaDataLoader.LoaderThread.LoaderImportException as e: gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader, "Repository metadata corrupt") - except Exception, e: + except Exception as e: gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader, "Unable to download repository metadata") print(e) @@ -211,7 +211,7 @@ class BuildSetupDialog (gtk.Dialog): # Build button = gtk.Button ("_Build", None, True) image = gtk.Image () - image.set_from_stock (gtk.STOCK_EXECUTE,gtk.ICON_SIZE_BUTTON) + image.set_from_stock (gtk.STOCK_EXECUTE, gtk.ICON_SIZE_BUTTON) button.set_image (image) self.add_action_widget (button, BuildSetupDialog.RESPONSE_BUILD) button.show_all () diff --git a/bitbake/lib/bb/ui/uievent.py b/bitbake/lib/bb/ui/uievent.py index 5b3efffcba..f1e4d791ee 100644 --- a/bitbake/lib/bb/ui/uievent.py +++ b/bitbake/lib/bb/ui/uievent.py @@ -110,7 +110,7 @@ class UIXMLRPCServer (SimpleXMLRPCServer): return (sock, addr) except socket.timeout: pass - return (None,None) + return (None, None) def close_request(self, request): if request is None: diff --git a/bitbake/lib/bb/utils.py b/bitbake/lib/bb/utils.py index 7446be875d..02668b16c4 100644 --- a/bitbake/lib/bb/utils.py +++ b/bitbake/lib/bb/utils.py @@ -72,9 +72,9 @@ def vercmp_part(a, b): if ca == None and cb == None: return 0 - if type(ca) is types.StringType: + if isinstance(ca, types.StringType): sa = ca in separators - if type(cb) is types.StringType: + if isinstance(cb, types.StringType): sb = cb in separators if sa and not sb: return -1 @@ -306,7 +306,7 @@ def better_compile(text, file, realfile, mode = "exec"): """ try: return compile(text, file, mode) - except Exception, e: + except Exception as e: # split the text into lines again body = text.split('\n') bb.msg.error(bb.msg.domain.Util, "Error in compiling python function in: ", realfile) @@ -385,7 +385,7 @@ def lockfile(name): return lf # File no longer exists or changed, retry lf.close - except Exception, e: + except Exception as e: continue def unlockfile(lf): @@ -546,7 +546,7 @@ def mkdirhier(dir): try: os.makedirs(dir) bb.msg.debug(2, bb.msg.domain.Util, "created " + dir) - except OSError, e: + except OSError as e: if e.errno != errno.EEXIST: raise e @@ -561,7 +561,7 @@ def movefile(src, dest, newmtime = None, sstat = None): try: if not sstat: sstat = os.lstat(src) - except Exception, e: + except Exception as e: print("movefile: Stating source file failed...", e) return None @@ -577,7 +577,7 @@ def movefile(src, dest, newmtime = None, sstat = None): try: os.unlink(dest) destexists = 0 - except Exception, e: + except Exception as e: pass if stat.S_ISLNK(sstat[stat.ST_MODE]): @@ -589,7 +589,7 @@ def movefile(src, dest, newmtime = None, sstat = None): #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) os.unlink(src) return os.lstat(dest) - except Exception, e: + except Exception as e: print("movefile: failed to properly create symlink:", dest, "->", target, e) return None @@ -598,7 +598,7 @@ def movefile(src, dest, newmtime = None, sstat = None): try: os.rename(src, dest) renamefailed = 0 - except Exception, e: + except Exception as e: if e[0] != errno.EXDEV: # Some random error. print("movefile: Failed to move", src, "to", dest, e) @@ -612,7 +612,7 @@ def movefile(src, dest, newmtime = None, sstat = None): shutil.copyfile(src, dest + "#new") os.rename(dest + "#new", dest) didcopy = 1 - except Exception, e: + except Exception as e: print('movefile: copy', src, '->', dest, 'failed.', e) return None else: @@ -626,7 +626,7 @@ def movefile(src, dest, newmtime = None, sstat = None): os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown os.unlink(src) - except Exception, e: + except Exception as e: print("movefile: Failed to chown/chmod/unlink", dest, e) return None @@ -647,7 +647,7 @@ def copyfile(src, dest, newmtime = None, sstat = None): try: if not sstat: sstat = os.lstat(src) - except Exception, e: + except Exception as e: print("copyfile: Stating source file failed...", e) return False @@ -663,7 +663,7 @@ def copyfile(src, dest, newmtime = None, sstat = None): try: os.unlink(dest) destexists = 0 - except Exception, e: + except Exception as e: pass if stat.S_ISLNK(sstat[stat.ST_MODE]): @@ -674,7 +674,7 @@ def copyfile(src, dest, newmtime = None, sstat = None): os.symlink(target, dest) #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) return os.lstat(dest) - except Exception, e: + except Exception as e: print("copyfile: failed to properly create symlink:", dest, "->", target, e) return False @@ -682,7 +682,7 @@ def copyfile(src, dest, newmtime = None, sstat = None): try: # For safety copy then move it over. shutil.copyfile(src, dest + "#new") os.rename(dest + "#new", dest) - except Exception, e: + except Exception as e: print('copyfile: copy', src, '->', dest, 'failed.', e) return False else: @@ -694,7 +694,7 @@ def copyfile(src, dest, newmtime = None, sstat = None): try: os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown - except Exception, e: + except Exception as e: print("copyfile: Failed to chown/chmod/unlink", dest, e) return False