[IMP] post install for test running; fix phantomjs and run test

bzr revid: chm@openerp.com-20140325140119-jnkisyz6k3s6a34w
This commit is contained in:
chm@openerp.com 2014-03-25 15:01:19 +01:00
commit 1eb0e13c4b
4 changed files with 46 additions and 38 deletions

View File

@ -287,7 +287,7 @@ class WebRequest(object):
def checked_call(___dbname, *a, **kw):
# The decorator can call us more than once if there is an database error. In this
# case, the request cursor is unusable. Rollback transaction to create a new one.
if self._cr:
if self._cr and not openerp.tools.config['test_enable']:
self._cr.rollback()
return self.endpoint(*a, **kw)

View File

@ -42,6 +42,7 @@ from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from openerp.modules.module import initialize_sys_path, \
load_openerp_module, init_module_models, adapt_version
from module import runs_post_install
_logger = logging.getLogger(__name__)
_test_logger = logging.getLogger('openerp.tests')
@ -425,8 +426,13 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
for model in registry.models.values():
model._register_hook(cr)
# STEP 9: Run the post-install tests
cr.commit()
if openerp.tools.config['test_enable']:
cr.execute("SELECT name FROM ir_module_module WHERE state='installed'")
for module_name in cr.fetchall():
report.record_result(openerp.modules.module.run_unit_tests(module_name[0], cr.dbname, position=runs_post_install))
finally:
cr.close()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -356,30 +356,6 @@ class TestStream(object):
current_test = None
def run_unit_tests(module_name, dbname):
"""
:returns: ``True`` if all of ``module_name``'s tests succeeded, ``False``
if any of them failed.
:rtype: bool
"""
global current_test
current_test = module_name
mods = get_test_modules(module_name)
r = True
for m in mods:
tests = unwrap_suite(unittest2.TestLoader().loadTestsFromModule(m))
suite = unittest2.TestSuite(itertools.ifilter(runs_at_install, tests))
_logger.info('running %s tests.', m.__name__)
result = unittest2.TextTestRunner(verbosity=2, stream=TestStream(m.__name__)).run(suite)
if not result.wasSuccessful():
r = False
_logger.error("Module %s: %d failures, %d errors",
module_name, len(result.failures), len(result.errors))
current_test = None
return r
def runs_at(test, hook, default):
# by default, tests do not run post install
test_runs = getattr(test, hook, default)
@ -396,6 +372,30 @@ def runs_at(test, hook, default):
runs_at_install = functools.partial(runs_at, hook='at_install', default=True)
runs_post_install = functools.partial(runs_at, hook='post_install', default=False)
def run_unit_tests(module_name, dbname, position=runs_at_install):
"""
:returns: ``True`` if all of ``module_name``'s tests succeeded, ``False``
if any of them failed.
:rtype: bool
"""
global current_test
current_test = module_name
mods = get_test_modules(module_name)
r = True
for m in mods:
tests = unwrap_suite(unittest2.TestLoader().loadTestsFromModule(m))
suite = unittest2.TestSuite(itertools.ifilter(position, tests))
_logger.info('running %s tests.', m.__name__)
result = unittest2.TextTestRunner(verbosity=2, stream=TestStream(m.__name__)).run(suite)
if not result.wasSuccessful():
r = False
_logger.error("Module %s: %d failures, %d errors",
module_name, len(result.failures), len(result.errors))
current_test = None
return r
def unwrap_suite(test):
"""
Attempts to unpack testsuites (holding suites or cases) in order to

View File

@ -6,7 +6,8 @@ function waitFor (ready, callback, timeout, timeoutMessageCallback) {
(function waitLoop() {
if(new Date - start > timeout) {
error(timeoutMessageCallback ? timeoutMessageCallback() : "Timeout after "+timeout+" ms");
console.log('error', timeoutMessageCallback ? timeoutMessageCallback() : "Timeout after "+timeout+" ms");
phantom.exit(1);
} else if (ready()) {
callback();
} else {
@ -15,10 +16,6 @@ function waitFor (ready, callback, timeout, timeoutMessageCallback) {
}());
}
function error(message) {
console.log('error', message);
phantom.exit(1);
}
function PhantomTest() {
var self = this;
this.options = JSON.parse(phantom.args[phantom.args.length-1]);
@ -49,10 +46,12 @@ function PhantomTest() {
}));
msg.push('(leaf frame on top)')
}
error(JSON.stringify(msg.join('\n')));
console.log('error', JSON.stringify(msg.join('\n')));
phantom.exit(1);
};
this.page.onAlert = function(message) {
error(message);
console.log('error', message);
phantom.exit(1);
};
this.page.onConsoleMessage = function(message) {
console.log(message);
@ -78,7 +77,8 @@ function PhantomTest() {
if(!found) {
console.log('Injecting', src, 'needed for', need);
if(!self.page.injectJs(src)) {
error("Cannot inject " + src);
console.log('error', "Cannot inject " + src);
phantom.exit(1);
}
}
}
@ -88,8 +88,9 @@ function PhantomTest() {
self.page.evaluate(function () {
var message = ("Timeout\nhref: " + window.location.href
+ "\nreferrer: " + document.referrer
+ "\n\n" + document.body.innerHTML).replace(/[^a-z0-9\s~!@#$%^&*()_|+\-=?;:'",.<>\{\}\[\]\\\/]/gi, "*");
error(message);
+ "\n\n" + (document.body && document.body.innerHTML)).replace(/[^a-z0-9\s~!@#$%^&*()_|+\-=?;:'",.<>\{\}\[\]\\\/]/gi, "*");
console.log('error', message);
phantom.exit(1);
});
}, self.timeout);
@ -108,7 +109,8 @@ function PhantomTest() {
var url = self.origin + url_path;
self.page.open(url, function(status) {
if (status !== 'success') {
error("failed to load " + url)
console.log('error', "failed to load " + url);
phantom.exit(1);
} else {
console.log('loaded', url, status);
// process ready
@ -119,7 +121,7 @@ function PhantomTest() {
try {
console.log("page.evaluate eval expr:", ready);
r = !!eval(ready);
} catch(ex) {
} catch(ex) {
}
console.log("page.evaluate eval result:", r);
return r;