[IMP] base_module_quality: make minimal score for all test, add message if the score is below limit,varible for whether to activate the test or not

bzr revid: mra@tinyerp.com-20090828072457-en2vpm5l1514xtj7
This commit is contained in:
mra (Open ERP) 2009-08-28 12:54:57 +05:30
parent 466cdeb5d3
commit fc5d45c54a
11 changed files with 70 additions and 36 deletions

View File

@ -30,12 +30,12 @@ from osv import osv, fields
class abstract_quality_check(object):
'''
This Class provides...
This Class is abstract class for all test
'''
def __init__(self):
'''
this method should initialize the var
this method should initialize the variables
'''
#This float have to store the rating of the module.
#Used to compute the final score (average of all scores).
@ -73,6 +73,15 @@ class abstract_quality_check(object):
#Specify test got an error on module
self.error = False
#Specify the minimal score for the test (in percentage(%))
self.min_score = 50
#Specify whether test should be consider for Quality checking of the module
self.active = True
#This variable used to give message if test result is good or not
self.message = ''
#The tests have to subscribe itselfs in this list, that contains
#all the test that have to be performed.
self.tests = []
@ -189,40 +198,41 @@ class module_quality_check(osv.osv):
ad = tools.config['root_path']+'/addons'
module_path = os.path.join(ad, module_name)
val = test.quality_test()
if not val.bool_installed_only or module_state == "installed":
val.run_test(cr, uid, str(module_path))
if not val.error:
data = {
'name': val.name,
'score': val.score * 100,
'ponderation': val.ponderation,
'summary': val.result,
'detail': val.result_details,
'state': 'done',
'note': val.note,
}
if val.bool_count_score:
score_sum += val.score * val.ponderation
ponderation_sum += val.ponderation
if val.active:
if not val.bool_installed_only or module_state == "installed":
val.run_test(cr, uid, str(module_path))
if not val.error:
data = {
'name': val.name,
'score': val.score * 100,
'ponderation': val.ponderation,
'summary': val.result,
'detail': val.result_details,
'state': 'done',
'note': val.note,
'message': val.message
}
if val.bool_count_score:
score_sum += val.score * val.ponderation
ponderation_sum += val.ponderation
else:
data = {
'name': val.name,
'score': 0,
'summary': val.result,
'state': 'skipped',
'note': val.note,
}
else:
data = {
'name': val.name,
'score': 0,
'summary': val.result,
'state': 'skipped',
'note': val.note,
'score': 0,
'state': 'skipped',
'summary': _("The module has to be installed before running this test.")
}
else:
data = {
'name': val.name,
'note': val.note,
'score': 0,
'state': 'skipped',
'summary': _("The module has to be installed before running this test.")
}
create_ids.append((0, 0, data))
final_score = '%.2f' % (score_sum / ponderation_sum * 100)
create_ids.append((0, 0, data))
final_score = ponderation_sum and '%.2f' % (score_sum / ponderation_sum * 100) or 0
data = {
'name': module_name,
'final_score': final_score,
@ -236,12 +246,13 @@ class module_quality_detail(osv.osv):
_name = 'module.quality.detail'
_columns = {
'quality_check_id': fields.many2one('module.quality.check', 'Quality'),
'name': fields.char('Name',size=128,),
'name': fields.char('Name',size=128),
'score': fields.float('Score (%)'),
'ponderation': fields.float('Ponderation', help='Some tests are more critical than others, so they have a bigger weight in the computation of final rating'),
'note': fields.text('Note',),
'note': fields.text('Note'),
'summary': fields.text('Summary'),
'detail': fields.text('Details'),
'message': fields.char('Message', size=64),
'state': fields.selection([('done','Done'),('skipped','Skipped'),], 'State', size=6, help='The test will be completed only if the module is installed or if the test may be processed on uninstalled module.'),
}

View File

@ -62,6 +62,7 @@
<field name="state"/>
<field name="score" required="1"/>
<field name="ponderation"/>
<field name="message" />
</tree>
</field>
</record>

View File

@ -34,6 +34,7 @@ class quality_test(base_module_quality.abstract_quality_check):
This test checks if the module classes are raising exception when calling basic methods or not.
""")
self.bool_installed_only = True
self.min_score = 60
return None
def run_test(self, cr, uid, module_path):
@ -72,6 +73,8 @@ This test checks if the module classes are raising exception when calling basic
ex_count += 1
result_dict[obj] = temp
self.score = (ok_count + ex_count) and float(ok_count)/float(ok_count + ex_count) or 0.0
if self.score*100 < self.min_score:
self.message = 'Score is below than minimal score(%s%%)' % self.min_score
self.result = self.get_result(result_dict)
return None

View File

@ -36,6 +36,7 @@ class quality_test(base_module_quality.abstract_quality_check):
Test checks for fields, views, security rules, dependancy level
""")
self.bool_installed_only = True
self.min_score = 40
return None
def run_test(self, cr, uid, module_path):
@ -166,6 +167,8 @@ Test checks for fields, views, security rules, dependancy level
score_depend = (100 - (bad_depend * 5)) / 100.0 # note : score is calculated based on if you have for e.g. two module extra in dependancy it will score -10 out of 100
score_security = good_sec and float(good_sec - bad_sec) / float(good_sec)
self.score = (score_view + score_field + score_security + score_depend) / 4
if self.score*100 < self.min_score:
self.message = 'Score is below than minimal score(%s%%)' % self.min_score
self.result = self.get_result({ module_name: [int(score_field * 100), int(score_view * 100), int(score_security * 100), int(score_depend * 100)]})
self.result_details += self.get_result_details(result_dict)
self.result_details += self.get_result_general(result_view, name="View")

View File

@ -37,6 +37,7 @@ PEP-8 Test , copyright of py files check, method can not call from loops
self.bad_standard = 0
self.good_standard = 0
self.result_py = {}
self.min_score = 40
return None
def run_test(self, cr, uid, module_path):
@ -85,6 +86,8 @@ PEP-8 Test , copyright of py files check, method can not call from loops
self.check_boolean(open_files)
self.score = self.good_standard and float(self.good_standard) / float(self.good_standard + self.bad_standard)
if self.score*100 < self.min_score:
self.message = 'Score is below than minimal score(%s%%)' % self.min_score
self.result = self.get_result({ module_path: [int(self.score * 100)]})
self.result_details += self.get_result_general(self.result_py)
return None

View File

@ -32,6 +32,7 @@ class quality_test(base_module_quality.abstract_quality_check):
self.name = _("Pylint Test")
self.note = _("""This test uses Pylint and checks if the module satisfies the coding standard of Python. See http://www.logilab.org/project/name/pylint for further info.\n """)
self.bool_installed_only = False
self.min_score = 30
return None
def run_test(self, cr, uid, module_path):
@ -92,6 +93,8 @@ class quality_test(base_module_quality.abstract_quality_check):
self.result_details += '</body></html>'
average_score = count and score / count or score
self.score = (max(average_score, 0)) / 10
if self.score*100 < self.min_score:
self.message = 'Score is below than minimal score(%s%%)' % self.min_score
self.result = self.get_result(dict_py)
return None

View File

@ -36,6 +36,7 @@ class quality_test(base_module_quality.abstract_quality_check):
This test checks the speed of the module. Note that at least 5 demo data is needed in order to run it.
""")
self.min_score = 30
return None
def run_test(self, cr, uid, module_path):
@ -104,6 +105,8 @@ This test checks the speed of the module. Note that at least 5 demo data is need
result_dict[obj] = speed_list
result_dict2[obj] = list2
self.score = obj_counter and score / obj_counter or 0.0
if self.score*100 < self.min_score:
self.message = 'Score is below than minimal score(%s%%)' % self.min_score
self.result_details += self.get_result_details(result_dict)
self.result += self.get_result(result_dict2)
return None

View File

@ -37,6 +37,7 @@ This test checks if the module satisfy tiny structure
self.module_score = 0.0
self.count = 0
self.recur = True
self.min_score = 30
return None
def run_test_struct(self, cr, uid, module_path):
@ -139,6 +140,10 @@ This test checks if the module satisfy tiny structure
def run_test(self, cr, uid, module_path):
self.run_test_struct(cr, uid, module_path)
if self.score*100 < self.min_score:
self.message = 'Score is below than minimal score(%s%%)' % self.min_score
else:
self.message = ''
if self.score != 1:
self.result_details = self.get_result_details(self.result_dict)
return None

View File

@ -123,6 +123,8 @@ class quality_test(base_module_quality.abstract_quality_check):
def run_test(self, cr, uid, module_path):
terp_score = self.run_test_terp(cr, uid, module_path)
self.score = terp_score and terp_score[1] or 0.0
if self.score*100 < self.min_score:
self.message = 'Score is below than minimal score(%s%%)' % self.min_score
if terp_score:
self.result = self.get_result({'__terp__.py': terp_score})
return None

View File

@ -25,8 +25,6 @@ import wizard
import pooler
from osv import osv, fields
#TODO: add cheks: do the class quality_check inherits the class abstract_quality_check?
class quality_check(wizard.interface):
def _create_quality_check(self, cr, uid, data, context={}):

View File

@ -33,6 +33,7 @@ class quality_test(base_module_quality.abstract_quality_check):
self.name = _("Workflow Test")
self.note = _("This test checks where object has workflow or not on it if there is a state field and several buttons on it and also checks validity of workflow xml file")
self.bool_installed_only = True
self.min_score = 40
return None
def run_test(self, cr, uid, module_path):
@ -116,7 +117,8 @@ class quality_test(base_module_quality.abstract_quality_check):
good_view += 1
score_avail = good_view and float(good_view) / float(bad_view + good_view)
self.score = (score_general + score_avail) / 2
if self.score*100 < self.min_score:
self.message = 'Score is below than minimal score(%s%%)' % self.min_score
if not wkf_ids and not bad_view:
self.error = True
self.result = _("No Workflow define")