[IMP] openerp.tools: _logger with fully qualified module name.

bzr revid: vmt@openerp.com-20120124140056-hqoy49bh7wyr1xce
This commit is contained in:
Vo Minh Thu 2012-01-24 15:00:56 +01:00
parent f0b63f8028
commit a142292f91
6 changed files with 88 additions and 101 deletions

View File

@ -29,10 +29,13 @@ import re
# for eval context:
import time
import openerp.release as release
_logger = logging.getLogger(__name__)
try:
import pytz
except:
logging.getLogger("init").warning('could not find pytz library, please install it')
_logger.warning('could not find pytz library, please install it')
class pytzclass(object):
all_timezones=[]
pytz=pytzclass()
@ -135,8 +138,7 @@ def _eval_xml(self, node, pool, cr, uid, idref, context=None):
try:
return unsafe_eval(a_eval, idref2)
except Exception:
logger = logging.getLogger('init')
logger.warning('could not eval(%s) for %s in %s' % (a_eval, node.get('name'), context), exc_info=True)
_logger.warning('could not eval(%s) for %s in %s' % (a_eval, node.get('name'), context), exc_info=True)
return ""
if t == 'xml':
def _process(s, idref):
@ -228,7 +230,6 @@ class assertion_report(object):
return res
class xml_import(object):
__logger = logging.getLogger('tools.convert.xml_import')
@staticmethod
def nodeattr2bool(node, attr, default=False):
if not node.get(attr):
@ -258,7 +259,7 @@ class xml_import(object):
# client-side, so in that case we keep the original context string
# as it is. We also log it, just in case.
context = ctx
logging.getLogger("init").debug('Context value (%s) for element with id "%s" or its data node does not parse '\
_logger.debug('Context value (%s) for element with id "%s" or its data node does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
ctx, node.get('id','n/a'), exc_info=True)
return context
@ -281,7 +282,7 @@ form: module.record_id""" % (xml_id,)
assert modcnt == 1, """The ID "%s" refers to an uninstalled module""" % (xml_id,)
if len(id) > 64:
self.logger.error('id: %s is to long (max: 64)', id)
_logger.error('id: %s is to long (max: 64)', id)
def _tag_delete(self, cr, rec, data_node=None):
d_model = rec.get("model",'')
@ -486,9 +487,9 @@ form: module.record_id""" % (xml_id,)
# Some domains contain references that are only valid at runtime at
# client-side, so in that case we keep the original domain string
# as it is. We also log it, just in case.
logging.getLogger("init").debug('Domain value (%s) for element with id "%s" does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
domain, xml_id or 'n/a', exc_info=True)
_logger.debug('Domain value (%s) for element with id "%s" does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
domain, xml_id or 'n/a', exc_info=True)
res = {
'name': name,
'type': type,
@ -593,7 +594,7 @@ form: module.record_id""" % (xml_id,)
pid = res[0]
else:
# the menuitem does't exist but we are in branch (not a leaf)
self.logger.warning('Warning no ID for submenu %s of menu %s !', menu_elem, str(m_l))
_logger.warning('Warning no ID for submenu %s of menu %s !', menu_elem, str(m_l))
pid = self.pool.get('ir.ui.menu').create(cr, self.uid, {'parent_id' : pid, 'name' : menu_elem})
values['parent_id'] = pid
else:
@ -733,7 +734,7 @@ form: module.record_id""" % (xml_id,)
' obtained count: %d\n' \
% (rec_string, count, len(ids))
sevval = getattr(logging, severity.upper())
self.logger.log(sevval, msg)
_logger.log(sevval, msg)
if sevval >= config['assert_exit_level']:
# TODO: define a dedicated exception
raise Exception('Severe assertion failure')
@ -765,7 +766,7 @@ form: module.record_id""" % (xml_id,)
' obtained value: %r\n' \
% (rec_string, etree.tostring(test), expected_value, expression_value)
sevval = getattr(logging, severity.upper())
self.logger.log(sevval, msg)
_logger.log(sevval, msg)
if sevval >= config['assert_exit_level']:
# TODO: define a dedicated exception
raise Exception('Severe assertion failure')
@ -876,11 +877,11 @@ form: module.record_id""" % (xml_id,)
def parse(self, de):
if not de.tag in ['terp', 'openerp']:
self.logger.error("Mismatch xml format")
_logger.error("Mismatch xml format")
raise Exception( "Mismatch xml format: only terp or openerp as root tag" )
if de.tag == 'terp':
self.logger.warning("The tag <terp/> is deprecated, use <openerp/>")
_logger.warning("The tag <terp/> is deprecated, use <openerp/>")
for n in de.findall('./data'):
for rec in n:
@ -888,17 +889,16 @@ form: module.record_id""" % (xml_id,)
try:
self._tags[rec.tag](self.cr, rec, n)
except:
self.__logger.error('Parse error in %s:%d: \n%s',
rec.getroottree().docinfo.URL,
rec.sourceline,
etree.tostring(rec).strip(), exc_info=True)
_logger.error('Parse error in %s:%d: \n%s',
rec.getroottree().docinfo.URL,
rec.sourceline,
etree.tostring(rec).strip(), exc_info=True)
self.cr.rollback()
raise
return True
def __init__(self, cr, module, idref, mode, report=None, noupdate=False):
self.logger = logging.getLogger('init')
self.mode = mode
self.module = module
self.cr = cr
@ -931,7 +931,6 @@ def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
encoding: utf-8'''
if not idref:
idref={}
logger = logging.getLogger('init')
model = ('.'.join(fname.split('.')[:-1]).split('-'))[0]
#remove folder path from model
head, model = os.path.split(model)
@ -956,7 +955,7 @@ def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
reader.next()
if not (mode == 'init' or 'id' in fields):
logger.error("Import specification does not contain 'id' and we are in init mode, Cannot continue.")
_logger.error("Import specification does not contain 'id' and we are in init mode, Cannot continue.")
return
uid = 1
@ -967,7 +966,7 @@ def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
try:
datas.append(map(lambda x: misc.ustr(x), line))
except:
logger.error("Cannot import the line: %s", line)
_logger.error("Cannot import the line: %s", line)
result, rows, warning_msg, dummy = pool.get(model).import_data(cr, uid, fields, datas,mode, module, noupdate, filename=fname_partial)
if result < 0:
# Report failed import and abort module install
@ -988,9 +987,8 @@ def convert_xml_import(cr, module, xmlfile, idref=None, mode='init', noupdate=Fa
try:
relaxng.assert_(doc)
except Exception:
logger = loglevels.Logger()
logger.notifyChannel('init', loglevels.LOG_ERROR, 'The XML file does not fit the required schema !')
logger.notifyChannel('init', loglevels.LOG_ERROR, misc.ustr(relaxng.error_log.last_error))
_logger.error('The XML file does not fit the required schema !')
_logger.error(misc.ustr(relaxng.error_log.last_error))
raise
if idref is None:

View File

@ -66,7 +66,7 @@ from cache import *
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode
_logger = logging.getLogger('tools')
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
@ -386,7 +386,7 @@ def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=Non
smtp_server=smtp_server, smtp_port=smtp_port, smtp_user=smtp_user, smtp_password=smtp_password,
smtp_encryption=('ssl' if ssl else None), debug=debug)
except Exception:
_log.exception("tools.email_send failed to deliver email")
_logger.exception("tools.email_send failed to deliver email")
return False
finally:
cr.close()
@ -706,7 +706,7 @@ def logged(f):
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
loglevels.Logger().notifyChannel('logged', loglevels.LOG_DEBUG, '\n'.join(vector))
_logger.debug('\n'.join(vector))
return res
return wrapper
@ -876,8 +876,8 @@ def detect_server_timezone():
try:
import pytz
except Exception:
loglevels.Logger().notifyChannel("detect_server_timezone", loglevels.LOG_WARNING,
"Python pytz module is not available. Timezone will be set to UTC by default.")
_logger.warning("Python pytz module is not available. "
"Timezone will be set to UTC by default.")
return 'UTC'
# Option 1: the configuration option (did not exist before, so no backwards compatibility issue)
@ -910,15 +910,14 @@ def detect_server_timezone():
if value:
try:
tz = pytz.timezone(value)
loglevels.Logger().notifyChannel("detect_server_timezone", loglevels.LOG_INFO,
"Using timezone %s obtained from %s." % (tz.zone,source))
_logger.info("Using timezone %s obtained from %s.", tz.zone, source)
return value
except pytz.UnknownTimeZoneError:
loglevels.Logger().notifyChannel("detect_server_timezone", loglevels.LOG_WARNING,
"The timezone specified in %s (%s) is invalid, ignoring it." % (source,value))
_logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value)
loglevels.Logger().notifyChannel("detect_server_timezone", loglevels.LOG_WARNING,
"No valid timezone could be detected, using default UTC timezone. You can specify it explicitly with option 'timezone' in the server configuration.")
_logger.warning("No valid timezone could be detected, using default UTC "
"timezone. You can specify it explicitly with option 'timezone' in "
"the server configuration.")
return 'UTC'
def get_server_timezone():

View File

@ -70,7 +70,7 @@ _SAFE_OPCODES = _EXPR_OPCODES.union(set(opmap[x] for x in [
'POP_JUMP_IF_TRUE', 'SETUP_EXCEPT', 'END_FINALLY'
] if x in opmap))
_logger = logging.getLogger('safe_eval')
_logger = logging.getLogger(__name__)
def _get_opcodes(codeobj):
"""_get_opcodes(codeobj) -> [opcodes]
@ -206,8 +206,9 @@ def safe_eval(expr, globals_dict=None, locals_dict=None, mode="eval", nocopy=Fal
# isinstance() does not work below, we want *exactly* the dict class
if (globals_dict is not None and type(globals_dict) is not dict) \
or (locals_dict is not None and type(locals_dict) is not dict):
logging.getLogger('safe_eval').warning('Looks like you are trying to pass a dynamic environment,"\
"you should probably pass nocopy=True to safe_eval()')
_logger.warning(
"Looks like you are trying to pass a dynamic environment, "
"you should probably pass nocopy=True to safe_eval().")
globals_dict = dict(globals_dict)
if locals_dict is not None:

View File

@ -34,15 +34,13 @@ from subprocess import Popen, PIPE
import os
import tempfile
_logger = logging.getLogger(__name__)
def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None):
""" Try to render a report <rname> with contents of ids
This function should also check for common pitfalls of reports.
"""
if our_module:
log = logging.getLogger('tests.%s' % our_module)
else:
log = logging.getLogger('tools.test_reports')
if data is None:
data = {}
if context is None:
@ -51,7 +49,7 @@ def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None):
rname_s = rname[7:]
else:
rname_s = rname
log.log(netsvc.logging.TEST, " - Trying %s.create(%r)", rname, ids)
_logger.log(netsvc.logging.TEST, " - Trying %s.create(%r)", rname, ids)
res = netsvc.LocalService(rname).create(cr, uid, ids, data, context)
if not isinstance(res, tuple):
raise RuntimeError("Result of %s.create() should be a (data,format) tuple, now it is a %s" % \
@ -64,7 +62,7 @@ def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None):
if tools.config['test_report_directory']:
file(os.path.join(tools.config['test_report_directory'], rname+ '.'+res_format), 'wb+').write(res_data)
log.debug("Have a %s report for %s, will examine it", res_format, rname)
_logger.debug("Have a %s report for %s, will examine it", res_format, rname)
if res_format == 'pdf':
if res_data[:5] != '%PDF-':
raise ValueError("Report %s produced a non-pdf header, %r" % (rname, res_data[:10]))
@ -79,21 +77,21 @@ def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None):
res_text = tools.ustr(fp.read())
os.unlink(rfname)
except Exception:
log.debug("Unable to parse PDF report: install pdftotext to perform automated tests.")
_logger.debug("Unable to parse PDF report: install pdftotext to perform automated tests.")
if res_text is not False:
for line in res_text.split('\n'):
if ('[[' in line) or ('[ [' in line):
log.error("Report %s may have bad expression near: \"%s\".", rname, line[80:])
_logger.error("Report %s may have bad expression near: \"%s\".", rname, line[80:])
# TODO more checks, what else can be a sign of a faulty report?
elif res_format == 'foobar':
# TODO
pass
else:
log.warning("Report %s produced a \"%s\" chunk, cannot examine it", rname, res_format)
_logger.warning("Report %s produced a \"%s\" chunk, cannot examine it", rname, res_format)
return False
log.log(netsvc.logging.TEST, " + Report %s produced correctly.", rname)
_logger.log(netsvc.logging.TEST, " + Report %s produced correctly.", rname)
return True
def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
@ -125,13 +123,9 @@ def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
# TODO context fill-up
pool = pooler.get_pool(cr.dbname)
if our_module:
log = logging.getLogger('tests.%s' % our_module)
else:
log = logging.getLogger('tools.test_reports')
def log_test(msg, *args):
log.log(netsvc.logging.TEST, " - " + msg, *args)
_logger.log(netsvc.logging.TEST, " - " + msg, *args)
datas = {}
if active_model:
@ -195,7 +189,7 @@ def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
view_data.update(datas.get('form'))
if wiz_data:
view_data.update(wiz_data)
log.debug("View data is: %r", view_data)
_logger.debug("View data is: %r", view_data)
for fk, field in view_res.get('fields',{}).items():
# Default fields returns list of int, while at create()
@ -237,7 +231,7 @@ def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
'weight': button_weight,
})
except Exception, e:
log.warning("Cannot resolve the view arch and locate the buttons!", exc_info=True)
_logger.warning("Cannot resolve the view arch and locate the buttons!", exc_info=True)
raise AssertionError(e.args[0])
if not datas['res_id']:
@ -249,7 +243,7 @@ def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
raise AssertionError("view form doesn't have any buttons to press!")
buttons.sort(key=lambda b: b['weight'])
log.debug('Buttons are: %s', ', '.join([ '%s: %d' % (b['string'], b['weight']) for b in buttons]))
_logger.debug('Buttons are: %s', ', '.join([ '%s: %d' % (b['string'], b['weight']) for b in buttons]))
res = None
while buttons and not res:
@ -262,12 +256,12 @@ def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
#there we are! press the button!
fn = getattr(pool.get(datas['res_model']), b['name'])
if not fn:
log.error("The %s model doesn't have a %s attribute!", datas['res_model'], b['name'])
_logger.error("The %s model doesn't have a %s attribute!", datas['res_model'], b['name'])
continue
res = fn(cr, uid, [datas['res_id'],], context)
break
else:
log.warning("in the \"%s\" form, the \"%s\" button has unknown type %s",
_logger.warning("in the \"%s\" form, the \"%s\" button has unknown type %s",
action_name, b['string'], b['type'])
return res
@ -293,7 +287,7 @@ def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
loop += 1
# This part tries to emulate the loop of the Gtk client
if loop > 100:
log.error("Passed %d loops, giving up", loop)
_logger.error("Passed %d loops, giving up", loop)
raise Exception("Too many loops at action")
log_test("it is an %s action at loop #%d", action.get('type', 'unknown'), loop)
result = _exec_action(action, datas, context)

View File

@ -44,6 +44,8 @@ from misc import UpdateableStr
from misc import SKIPPED_ELEMENT_TYPES
import osutil
_logger = logging.getLogger(__name__)
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
@ -153,8 +155,6 @@ def translate(cr, name, source_type, lang, source=None):
res = res_trans and res_trans[0] or False
return res
logger = logging.getLogger('translate')
class GettextAlias(object):
def _get_db(self):
@ -216,11 +216,11 @@ class GettextAlias(object):
pool = pooler.get_pool(cr.dbname)
res = pool.get('ir.translation')._get_source(cr, 1, None, ('code','sql_constraint'), lang, source)
else:
logger.debug('no context cursor detected, skipping translation for "%r"', source)
_logger.debug('no context cursor detected, skipping translation for "%r"', source)
else:
logger.debug('no translation language detected, skipping translation for "%r" ', source)
_logger.debug('no translation language detected, skipping translation for "%r" ', source)
except Exception:
logger.debug('translation went wrong for "%r", skipped', source)
_logger.debug('translation went wrong for "%r", skipped', source)
# if so, double-check the root/base translations filenames
finally:
if cr and is_new_cr:
@ -250,11 +250,10 @@ def unquote(str):
# class to handle po files
class TinyPoFile(object):
def __init__(self, buffer):
self.logger = logging.getLogger('i18n')
self.buffer = buffer
def warn(self, msg, *args):
self.logger.warning(msg, *args)
_logger.warning(msg, *args)
def __iter__(self):
self.buffer.seek(0)
@ -529,7 +528,6 @@ def in_modules(object_name, modules):
return module in modules
def trans_generate(lang, modules, cr):
logger = logging.getLogger('i18n')
dbname = cr.dbname
pool = pooler.get_pool(dbname)
@ -576,12 +574,12 @@ def trans_generate(lang, modules, cr):
xml_name = "%s.%s" % (module, encode(xml_name))
if not pool.get(model):
logger.error("Unable to find object %r", model)
_logger.error("Unable to find object %r", model)
continue
exists = pool.get(model).exists(cr, uid, res_id)
if not exists:
logger.warning("Unable to find object %r with id %d", model, res_id)
_logger.warning("Unable to find object %r with id %d", model, res_id)
continue
obj = pool.get(model).browse(cr, uid, res_id)
@ -609,7 +607,7 @@ def trans_generate(lang, modules, cr):
# export fields
if not result.has_key('fields'):
logger.warning("res has no fields: %r", result)
_logger.warning("res has no fields: %r", result)
continue
for field_name, field_def in result['fields'].iteritems():
res_name = name + ',' + field_name
@ -638,7 +636,7 @@ def trans_generate(lang, modules, cr):
try:
field_name = encode(obj.name)
except AttributeError, exc:
logger.error("name error in %s: %s", xml_name, str(exc))
_logger.error("name error in %s: %s", xml_name, str(exc))
continue
objmodel = pool.get(obj.model)
if not objmodel or not field_name in objmodel._columns:
@ -690,7 +688,7 @@ def trans_generate(lang, modules, cr):
finally:
report_file.close()
except (IOError, etree.XMLSyntaxError):
logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
_logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
for field_name,field_def in obj._table._columns.items():
if field_def.translate:
@ -718,7 +716,7 @@ def trans_generate(lang, modules, cr):
model_obj = pool.get(model)
if not model_obj:
logging.getLogger("i18n").error("Unable to find object %r", model)
_logger.error("Unable to find object %r", model)
continue
for constraint in getattr(model_obj, '_constraints', []):
@ -762,7 +760,7 @@ def trans_generate(lang, modules, cr):
for bin_path in ['osv', 'report' ]:
path_list.append(os.path.join(config.config['root_path'], bin_path))
logger.debug("Scanning modules at paths: ", path_list)
_logger.debug("Scanning modules at paths: ", path_list)
mod_paths = []
join_dquotes = re.compile(r'([^\\])"[\s\\]*"', re.DOTALL)
@ -776,7 +774,7 @@ def trans_generate(lang, modules, cr):
module = get_module_from_path(fabsolutepath, mod_paths=mod_paths)
is_mod_installed = module in installed_modules
if (('all' in modules) or (module in modules)) and is_mod_installed:
logger.debug("Scanning code of %s at module: %s", frelativepath, module)
_logger.debug("Scanning code of %s at module: %s", frelativepath, module)
src_file = misc.file_open(fabsolutepath, subdir='')
try:
code_string = src_file.read()
@ -820,7 +818,7 @@ def trans_generate(lang, modules, cr):
code_offset = i.end() # we have counted newlines up to the match end
for path in path_list:
logger.debug("Scanning files of modules at %s", path)
_logger.debug("Scanning files of modules at %s", path)
for root, dummy, files in osutil.walksymlinks(path):
for fname in itertools.chain(fnmatch.filter(files, '*.py')):
export_code_terms_from_file(fname, path, root, 'code')
@ -838,24 +836,22 @@ def trans_generate(lang, modules, cr):
return out
def trans_load(cr, filename, lang, verbose=True, context=None):
logger = logging.getLogger('i18n')
try:
fileobj = misc.file_open(filename)
logger.info("loading %s", filename)
_logger.info("loading %s", filename)
fileformat = os.path.splitext(filename)[-1][1:].lower()
r = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, context=context)
fileobj.close()
return r
except IOError:
if verbose:
logger.error("couldn't read translation file %s", filename)
_logger.error("couldn't read translation file %s", filename)
return None
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, context=None):
"""Populates the ir_translation table."""
logger = logging.getLogger('i18n')
if verbose:
logger.info('loading translation file for language %s', lang)
_logger.info('loading translation file for language %s', lang)
if context is None:
context = {}
db_name = cr.dbname
@ -884,7 +880,7 @@ def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True,
reader = TinyPoFile(fileobj)
f = ['type', 'name', 'res_id', 'src', 'value']
else:
logger.error('Bad file format: %s', fileformat)
_logger.error('Bad file format: %s', fileformat)
raise Exception(_('Bad file format'))
# read the rest of the file
@ -929,7 +925,7 @@ def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True,
dic['res_id'] = None
except Exception:
logger.warning("Could not decode resource for %s, please fix the po file.",
_logger.warning("Could not decode resource for %s, please fix the po file.",
dic['res_id'], exc_info=True)
dic['res_id'] = None
@ -937,10 +933,10 @@ def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True,
irt_cursor.finish()
if verbose:
logger.info("translation file loaded succesfully")
_logger.info("translation file loaded succesfully")
except IOError:
filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat)
logger.exception("couldn't read translation file %s", filename)
_logger.exception("couldn't read translation file %s", filename)
def get_locales(lang=None):
if lang is None:

View File

@ -19,7 +19,7 @@ from lxml import etree
unsafe_eval = eval
from safe_eval import safe_eval as eval
logger_channel = 'tests'
_logger = logging.getLogger(__name__)
class YamlImportException(Exception):
pass
@ -133,7 +133,6 @@ class YamlInterpreter(object):
self.filename = filename
self.assert_report = TestReport()
self.noupdate = noupdate
self.logger = logging.getLogger("%s.%s" % (logger_channel, self.module))
self.pool = pooler.get_pool(cr.dbname)
self.uid = 1
self.context = {} # opererp context
@ -163,7 +162,7 @@ class YamlInterpreter(object):
['&', ('name', '=', module), ('state', 'in', ['installed'])])
assert module_count == 1, 'The ID "%s" refers to an uninstalled module.' % (xml_id,)
if len(id) > 64: # TODO where does 64 come from (DB is 128)? should be a constant or loaded form DB
self.logger.log(logging.ERROR, 'id: %s is to long (max: 64)', id)
_logger.error('id: %s is to long (max: 64)', id)
def get_id(self, xml_id):
if xml_id is False or xml_id is None:
@ -219,7 +218,7 @@ class YamlInterpreter(object):
level = severity
levelname = logging.getLevelName(level)
self.assert_report.record(False, levelname)
self.logger.log(level, msg, *args)
_logger.log(level, msg, *args)
if level >= config['assert_exit_level']:
raise YamlImportAbortion('Severe assertion failure (%s), aborting.' % levelname)
return
@ -241,7 +240,7 @@ class YamlInterpreter(object):
assertion, expressions = node, []
if self.isnoupdate(assertion) and self.mode != 'init':
self.logger.warn('This assertion was not evaluated ("%s").' % assertion.string)
_logger.warning('This assertion was not evaluated ("%s").', assertion.string)
return
model = self.get_model(assertion.model)
ids = self._get_assertion_id(assertion)
@ -260,7 +259,7 @@ class YamlInterpreter(object):
try:
success = unsafe_eval(test, self.eval_context, RecordDictWrapper(record))
except Exception, e:
self.logger.debug('Exception during evaluation of !assert block in yaml_file %s.', self.filename, exc_info=True)
_logger.debug('Exception during evaluation of !assert block in yaml_file %s.', self.filename, exc_info=True)
raise YamlImportAbortion(e)
if not success:
msg = 'Assertion "%s" FAILED\ntest: %s\n'
@ -348,7 +347,7 @@ class YamlInterpreter(object):
view_id = etree.fromstring(view['arch'].encode('utf-8'))
record_dict = self._create_record(model, fields, view_id, default=default)
self.logger.debug("RECORD_DICT %s" % record_dict)
_logger.debug("RECORD_DICT %s" % record_dict)
id = self.pool.get('ir.model.data')._update(self.cr, 1, record.model, \
self.module, record_dict, record.id, noupdate=self.isnoupdate(record), mode=self.mode, context=context)
self.id_map[record.id] = int(id)
@ -519,7 +518,7 @@ class YamlInterpreter(object):
def process_python(self, node):
def log(msg, *args):
self.logger.log(logging.TEST, msg, *args)
_logger.log(logging.TEST, msg, *args)
python, statements = node.items()[0]
model = self.get_model(python.model)
statements = statements.replace("\r\n", "\n")
@ -532,7 +531,7 @@ class YamlInterpreter(object):
self._log_assert_failure(python.severity, 'AssertionError in Python code %s: %s', python.name, e)
return
except Exception, e:
self.logger.debug('Exception during evaluation of !python block in yaml_file %s.', self.filename, exc_info=True)
_logger.debug('Exception during evaluation of !python block in yaml_file %s.', self.filename, exc_info=True)
raise
else:
self.assert_report.record(True, python.severity)
@ -754,7 +753,7 @@ class YamlInterpreter(object):
if len(ids):
self.pool.get(node.model).unlink(self.cr, self.uid, ids)
else:
self.logger.log(logging.TEST, "Record not deleted.")
_logger.log(logging.TEST, "Record not deleted.")
def process_url(self, node):
self.validate_xml_id(node.id)
@ -843,9 +842,9 @@ class YamlInterpreter(object):
try:
self._process_node(node)
except YamlImportException, e:
self.logger.exception(e)
_logger.exception(e)
except Exception, e:
self.logger.exception(e)
_logger.exception(e)
raise
def _process_node(self, node):
@ -889,14 +888,14 @@ class YamlInterpreter(object):
def _log(self, node, is_preceded_by_comment):
if is_comment(node):
is_preceded_by_comment = True
self.logger.log(logging.TEST, node)
_logger.log(logging.TEST, node)
elif not is_preceded_by_comment:
if isinstance(node, types.DictionaryType):
msg = "Creating %s\n with %s"
args = node.items()[0]
self.logger.log(logging.TEST, msg, *args)
_logger.log(logging.TEST, msg, *args)
else:
self.logger.log(logging.TEST, node)
_logger.log(logging.TEST, node)
else:
is_preceded_by_comment = False
return is_preceded_by_comment