*imrpovements
bzr revid: qdp@tinyerp.com-20090108084926-n4dxtv1bczx9id1t
This commit is contained in:
parent
bc3451b402
commit
5b6bcfb559
|
@ -21,6 +21,7 @@
|
|||
##############################################################################
|
||||
import pooler
|
||||
import os
|
||||
import osv
|
||||
from tools import config
|
||||
|
||||
class abstract_quality_check(object):
|
||||
|
@ -77,7 +78,8 @@ class abstract_quality_check(object):
|
|||
'''
|
||||
this method should do the test and fill the score, result and result_details var
|
||||
'''
|
||||
raise 'Not Implemented'
|
||||
|
||||
raise osv.except_osv(_('Programming Error'), _('Test Is Not Implemented'))
|
||||
|
||||
def get_objects(self, cr, uid, module):
|
||||
# This function returns all object of the given module..
|
||||
|
@ -103,14 +105,12 @@ class abstract_quality_check(object):
|
|||
|
||||
def format_table(self, header=[], data_list=[]):
|
||||
detail = ""
|
||||
header_list = []
|
||||
final_list = []
|
||||
for head in data_list[1]:
|
||||
header_list.append(head)
|
||||
detail += (header[0]) % tuple(header_list)
|
||||
for res in data_list[0]:
|
||||
data_list[0][res].append(res)
|
||||
detail += (header[1]) % tuple(data_list[0][res])
|
||||
detail += (header[0]) % tuple(header[1])
|
||||
frow = '\n|-'
|
||||
for i in header[1]:
|
||||
frow += '\n| %s'
|
||||
for key, value in data_list.items():
|
||||
detail += (frow) % tuple(value)
|
||||
detail = detail + '\n|}'
|
||||
return detail
|
||||
|
||||
|
|
|
@ -21,8 +21,8 @@
|
|||
<field name="type">form</field>
|
||||
<field name="arch" type="xml">
|
||||
<form string="Result">
|
||||
<field name="name" nolabel="1" readonly="1"/>
|
||||
<field name="final_score" nolabel="1" readonly="1"/>
|
||||
<field name="name" readonly="1" search="1"/>
|
||||
<field name="final_score" readonly="1" search="1"/>
|
||||
<separator colspan="4" string="Tests"/>
|
||||
<field name="test_ids" nolabel="1" colspan="4" height="350" width="800" readonly="1"/>
|
||||
</form>
|
||||
|
@ -38,8 +38,10 @@
|
|||
<page string="Summary">
|
||||
<field name="name" readonly="1"/>
|
||||
<field name="score" readonly="1"/>
|
||||
<field name="ponderation" readonly="1"/>
|
||||
<field name="summary" widget="text_wiki" nolabel="1" colspan="4" height="350" width="800" readonly="1"/>
|
||||
|
||||
<field name="ponderation" readonly="1"/>
|
||||
<field name="state" readonly="1"/>
|
||||
</page>
|
||||
<page string="Detail">
|
||||
<field name="detail" widget="text_wiki" nolabel="1" colspan="4" readonly="1"/>
|
||||
|
@ -55,8 +57,9 @@
|
|||
<field name="type">tree</field>
|
||||
<field name="arch" type="xml">
|
||||
<tree string="Result">
|
||||
<field name="name"/>
|
||||
<field name="score"/>
|
||||
<field name="name" required="1"/>
|
||||
<field name="state"/>
|
||||
<field name="score" required="1"/>
|
||||
<field name="ponderation"/>
|
||||
</tree>
|
||||
</field>
|
||||
|
|
|
@ -34,8 +34,6 @@ class quality_test(base_module_quality.abstract_quality_check):
|
|||
self.name = _("Method Test")
|
||||
self.bool_installed_only = True
|
||||
self.ponderation = 1.0
|
||||
self.result_det = {}
|
||||
self.data_list = []
|
||||
return None
|
||||
|
||||
def run_test(self, cr, uid, module_path):
|
||||
|
@ -44,8 +42,9 @@ class quality_test(base_module_quality.abstract_quality_check):
|
|||
obj_list = self.get_objects(cr, uid, module_name)
|
||||
ok_count = 0
|
||||
ex_count = 0
|
||||
result_dict = {}
|
||||
for obj in obj_list:
|
||||
temp = []
|
||||
temp = [obj]
|
||||
try:
|
||||
res = pool.get(obj).search(cr, uid, [])
|
||||
temp.append('Ok')
|
||||
|
@ -67,11 +66,10 @@ class quality_test(base_module_quality.abstract_quality_check):
|
|||
except:
|
||||
temp.append('Exception')
|
||||
ex_count += 1
|
||||
self.result_det[obj] = temp
|
||||
self.data_list.append(self.result_det)
|
||||
result_dict[obj] = temp
|
||||
self.score = (ok_count + ex_count) and float(ok_count)/float(ok_count + ex_count) or 0.0
|
||||
self.result = self.get_result()
|
||||
self.result_details = self.get_result_details()
|
||||
self.result_details = self.get_result_details(result_dict)
|
||||
return None
|
||||
|
||||
def get_result(self):
|
||||
|
@ -80,15 +78,11 @@ This test checks if the module classes are raising exception when calling basic
|
|||
"""
|
||||
return summary
|
||||
|
||||
def get_result_details(self):
|
||||
header_list = []
|
||||
header_list.append('{| border="1" cellspacing="0" cellpadding="5" align="left" \n! %-40s \n! %-16s \n! %-20s \n! %-16s ')
|
||||
header_list.append('\n|-\n| %s \n| %s \n| %s \n| %s ')
|
||||
header_view = ['Object Name', 'search()', 'fields_view_get', 'read']
|
||||
self.data_list.append(header_view)
|
||||
def get_result_details(self, dict):
|
||||
header = ('{| border="1" cellspacing="0" cellpadding="5" align="left" \n! %-40s \n! %-16s \n! %-20s \n! %-16s ', [_('Object Name'), 'search()', 'fields_view_get()', 'read()'])
|
||||
detail = ""
|
||||
if not self.error:
|
||||
detail += self.format_table(header=header_list, data_list=self.data_list)
|
||||
detail += self.format_table(header, dict)
|
||||
return detail
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
|
|
@ -47,7 +47,8 @@ class quality_test(base_module_quality.abstract_quality_check):
|
|||
|
||||
n = 0
|
||||
score = 0.0
|
||||
self.result += "\nThis test checks if the module satisfies the current coding standard used by OpenERP. \n\n"
|
||||
self.result += """\nThis test checks if the module satisfies the current coding standard used by OpenERP. \n
|
||||
Rating for *.py files goes from -10/10 to 10/10\n\n"""
|
||||
for file in list_files:
|
||||
if file.split('.')[-1] == 'py' and not file.endswith('__init__.py') and not file.endswith('__terp__.py'):
|
||||
file_path = os.path.join(module_path, file)
|
||||
|
@ -72,29 +73,29 @@ class quality_test(base_module_quality.abstract_quality_check):
|
|||
self.result_details += res
|
||||
|
||||
average_score = n and score / n or score
|
||||
self.score = (average_score + 10) /20
|
||||
self.score = (average_score + 10) / 20
|
||||
return None
|
||||
|
||||
def get_result(self, cr, uid, module_path):
|
||||
self.run_test(cr, uid, module_path)
|
||||
if not self.bool_installed_only or module_state=="installed":
|
||||
summary ="""
|
||||
===Pylint Test===:
|
||||
#~ def get_result(self, cr, uid, module_path):
|
||||
#~ self.run_test(cr, uid, module_path)
|
||||
#~ if not self.bool_installed_only or module_state=="installed":
|
||||
#~ summary ="""
|
||||
#~ ===Pylint Test===:
|
||||
|
||||
This test checks if the module satisfies the current coding standard used by OpenERP.
|
||||
#~ This test checks if the module satisfies the current coding standard used by OpenERP.
|
||||
|
||||
""" + "Score: " + str(self.score) + "/10\n"
|
||||
else:
|
||||
summary =""" \n===Pylint Test===:
|
||||
#~ """ + "Score: " + str(self.score) + "/10\n"
|
||||
#~ else:
|
||||
#~ summary =""" \n===Pylint Test===:
|
||||
|
||||
The module has to be installed before running this test.\n\n """
|
||||
header_list = ""
|
||||
self.error = True
|
||||
return summary
|
||||
#~ The module has to be installed before running this test.\n\n """
|
||||
#~ header_list = ""
|
||||
#~ self.error = True
|
||||
#~ return summary
|
||||
|
||||
def get_result_details(self):
|
||||
detail = "\n===Pylint Test===\n" + self.result
|
||||
return detail
|
||||
#~ def get_result_details(self):
|
||||
#~ detail = "\n===Pylint Test===\n" + self.result
|
||||
#~ return detail
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
||||
|
|
|
@ -38,9 +38,8 @@ class quality_test(base_module_quality.abstract_quality_check):
|
|||
self.name = _("Speed Test")
|
||||
self.bool_installed_only = True
|
||||
self.ponderation = 1.0
|
||||
self.result_det = {}
|
||||
self.data_list = []
|
||||
return None
|
||||
|
||||
def run_test(self, cr, uid, module_path):
|
||||
pool = pooler.get_pool(cr.dbname)
|
||||
module_name = module_path.split('/')[-1]
|
||||
|
@ -48,48 +47,45 @@ class quality_test(base_module_quality.abstract_quality_check):
|
|||
obj_counter = 0
|
||||
score = 0
|
||||
obj_ids = self.get_ids(cr, uid, obj_list)
|
||||
result_dict = {}
|
||||
for obj in obj_ids:
|
||||
obj_counter += 1
|
||||
ids = obj_ids[obj]
|
||||
ids = ids[:100]
|
||||
size = len(ids)
|
||||
if size:
|
||||
c1 = cr.count
|
||||
list = []
|
||||
|
||||
#we perform the operation twice, and count the number of queries in the second run. This allows to avoid the cache effect. (like translated terms that asks for more queries)
|
||||
pool.get(obj).read(cr, uid, [ids[0]])
|
||||
c = cr.count
|
||||
pool.get(obj).read(cr, uid, [ids[0]])
|
||||
code_base_complexity = cr.count - c1
|
||||
code_base_complexity = cr.count - c
|
||||
|
||||
pool.get(obj).read(cr, uid, ids[:size/2])
|
||||
c = cr.count
|
||||
pool.get(obj).read(cr, uid, ids[:size/2])
|
||||
code_half_complexity = cr.count - c1
|
||||
code_half_complexity = cr.count - c
|
||||
|
||||
pool.get(obj).read(cr, uid, ids)
|
||||
c = cr.count
|
||||
pool.get(obj).read(cr, uid, ids)
|
||||
code_size_complexity = cr.count - c1
|
||||
code_size_complexity = cr.count - c
|
||||
|
||||
if size < 5:
|
||||
self.score += -2
|
||||
list = [size, code_base_complexity, code_half_complexity, code_size_complexity, "Warning! Not enough demo data"]
|
||||
self.result_det[obj] = list
|
||||
list = [obj, size, code_base_complexity, code_half_complexity, code_size_complexity, "Warning! Not enough demo data"]
|
||||
else:
|
||||
if code_size_complexity <= (code_base_complexity + size):
|
||||
complexity = "O(1)"
|
||||
score = 10
|
||||
score += 1
|
||||
else:
|
||||
complexity = "O(n) or worst"
|
||||
score = 0
|
||||
list = [size, code_base_complexity, code_half_complexity, code_size_complexity, complexity]
|
||||
self.result_det[obj] = list
|
||||
list = [obj, size, code_base_complexity, code_half_complexity, code_size_complexity, complexity]
|
||||
else:
|
||||
score += -5
|
||||
list = [size, "", "", "", "Warning! Object has no demo data"]
|
||||
self.result_det[obj] = list
|
||||
self.data_list.append(self.result_det)
|
||||
self.score = obj_counter and score/obj_counter or 0.0
|
||||
self.result = self.get_result()
|
||||
self.result_details = self.get_result_details()
|
||||
|
||||
list = [obj, size, "", "", "", "Warning! Object has no demo data"]
|
||||
result_dict[obj] = list
|
||||
self.score = obj_counter and score / obj_counter or 0.0
|
||||
self.result_details = self.get_result_details(result_dict)
|
||||
return None
|
||||
|
||||
def get_result(self):
|
||||
|
@ -98,16 +94,11 @@ This test checks the speed of the module.
|
|||
"""
|
||||
return summary
|
||||
|
||||
def get_result_details(self):
|
||||
header_list = []
|
||||
header_list.append('{| border="1" cellspacing="0" cellpadding="5" align="left" \n! %-40s \n! %-10s \n! %-10s \n! %-10s \n! %-10s \n! %-20s')
|
||||
header_list.append('\n|-\n| %s \n| %s \n| %s \n| %s \n| %s \n| %s ')
|
||||
header_view = ['Object Name', 'Size-Number of Records (S)', '1', 'S/2', 'S', 'Complexity using query']
|
||||
detail = ""
|
||||
self.data_list.append(header_view)
|
||||
def get_result_details(self, dict):
|
||||
header = ('{| border="1" cellspacing="0" cellpadding="5" align="left" \n! %-40s \n! %-10s \n! %-10s \n! %-10s \n! %-10s \n! %-20s', [_('Object Name'), _('Size-Number of Records (S)'), _('1'), _('S/2'), _('S'), _('Reading Complexity')])
|
||||
if not self.error:
|
||||
detail += self.format_table(header=header_list, data_list=self.data_list)
|
||||
return detail
|
||||
return self.format_table(header, data_list=dict)
|
||||
return ""
|
||||
|
||||
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
||||
|
||||
|
|
|
@ -58,8 +58,8 @@ class quality_test(base_module_quality.abstract_quality_check):
|
|||
feel_bad_factor = 0
|
||||
if '__terp__.py' not in list_files:
|
||||
self.no_terp = True
|
||||
if self.no_terp:
|
||||
self.result += "The module does not contain the __terp__.py file"
|
||||
return None
|
||||
|
||||
terp_file = os.path.join(module_path,'__terp__.py')
|
||||
res = eval(tools.file_open(terp_file).read())
|
||||
|
@ -105,21 +105,21 @@ class quality_test(base_module_quality.abstract_quality_check):
|
|||
# self.result += "__terp__.py : "+ str(self.score) + "/10\n"
|
||||
return None
|
||||
|
||||
def get_result(self, cr, uid, module_path, module_state):
|
||||
# self.run_test(cr, uid, module_path)
|
||||
# summary = "\n===TERP Test===:\n"
|
||||
if self.no_terp:
|
||||
summary += """
|
||||
The module does not contain the __terp__.py file.\n\n """
|
||||
# else:
|
||||
# summary += """
|
||||
# This test checks if the module satisfies the current coding standard for __terp__.py file used by OpenERP.
|
||||
# """ + "Score: " + str(self.score) + "/10\n"
|
||||
return summary
|
||||
#~ def get_result(self, cr, uid, module_path, module_state):
|
||||
#~ # self.run_test(cr, uid, module_path)
|
||||
#~ # summary = "\n===TERP Test===:\n"
|
||||
#~ if self.no_terp:
|
||||
#~ summary += """
|
||||
#~ The module does not contain the __terp__.py file.\n\n """
|
||||
#~ # else:
|
||||
#~ # summary += """
|
||||
#~ # This test checks if the module satisfies the current coding standard for __terp__.py file used by OpenERP.
|
||||
#~ # """ + "Score: " + str(self.score) + "/10\n"
|
||||
#~ return summary
|
||||
|
||||
def get_result_details(self):
|
||||
detail = "\n===TERP Test===\n" + self.result
|
||||
return detail
|
||||
#~ def get_result_details(self):
|
||||
#~ detail = "\n===TERP Test===\n" + self.result
|
||||
#~ return detail
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -35,8 +35,8 @@ class wiz_quality_check(osv.osv):
|
|||
_name = 'wizard.quality.check'
|
||||
_columns = {
|
||||
'name': fields.char('Rated Module', size=64, ),
|
||||
'final_score': fields.char('Final Score', size=10,),
|
||||
'test_ids' : fields.one2many('quality.check.detail', 'quality_check_id', 'Test Details',)
|
||||
'final_score': fields.char('Final Score (%)', size=10,),
|
||||
'test_ids' : fields.one2many('quality.check.detail', 'quality_check_id', 'Tests',)
|
||||
}
|
||||
wiz_quality_check()
|
||||
|
||||
|
@ -46,10 +46,11 @@ class quality_check_detail(osv.osv):
|
|||
_columns = {
|
||||
'quality_check_id': fields.many2one('wizard.quality.check', 'Quality'),
|
||||
'name': fields.char('Name',size=128,),
|
||||
'score': fields.float('Score',),
|
||||
'ponderation': fields.float('Ponderation',),
|
||||
'score': fields.float('Score (%)',),
|
||||
'ponderation': fields.float('Ponderation',help='Some tests are more critical than others, so they have a bigger weight in the computation of final rating'),
|
||||
'summary': fields.text('Summary',),
|
||||
'detail' : fields.text('Details',),
|
||||
'state': fields.selection([('done','Done'),('skipped','Skipped'),], 'State', size=6, help='The test will be completed only if the module is installed or if the test may be processed on uninstalled module.'),
|
||||
}
|
||||
quality_check_detail()
|
||||
|
||||
|
@ -58,8 +59,10 @@ class create_quality_check(wizard.interface):
|
|||
|
||||
def _create_quality_check(self, cr, uid, data, context={}):
|
||||
pool = pooler.get_pool(cr.dbname)
|
||||
if data['id']:
|
||||
module_data = pool.get('ir.module.module').browse(cr, uid, [data['id']])[0]
|
||||
print data, context
|
||||
objs = []
|
||||
for id in data['ids']:
|
||||
module_data = pool.get('ir.module.module').browse(cr, uid, id)
|
||||
#list_folders = os.listdir(config['addons_path']+'/base_module_quality/')
|
||||
abstract_obj = base_module_quality.abstract_quality_check()
|
||||
score_sum = 0.0
|
||||
|
@ -76,10 +79,11 @@ class create_quality_check(wizard.interface):
|
|||
val.run_test(cr, uid, str(module_path))
|
||||
data = {
|
||||
'name': val.name,
|
||||
'score': val.score,
|
||||
'score': val.score * 100,
|
||||
'ponderation': val.ponderation,
|
||||
'summary': val.result,
|
||||
'detail': val.result_details,
|
||||
'state': 'done',
|
||||
}
|
||||
create_ids.append((0,0,data))
|
||||
score_sum += val.score * val.ponderation
|
||||
|
@ -88,6 +92,7 @@ class create_quality_check(wizard.interface):
|
|||
data = {
|
||||
'name': val.name,
|
||||
'score': 0,
|
||||
'state': 'skipped',
|
||||
'summary': _("The module has to be installed before running this test.")
|
||||
}
|
||||
create_ids.append((0,0,data))
|
||||
|
@ -99,12 +104,13 @@ class create_quality_check(wizard.interface):
|
|||
'test_ids' : create_ids,
|
||||
}
|
||||
obj = pool.get('wizard.quality.check').create(cr, uid, data, context)
|
||||
return obj
|
||||
objs.append(obj)
|
||||
return objs
|
||||
|
||||
def _open_quality_check(self, cr, uid, data, context):
|
||||
obj_id = self._create_quality_check(cr, uid, data, context)
|
||||
obj_ids = self._create_quality_check(cr, uid, data, context)
|
||||
return {
|
||||
'domain': "[('id','=', "+ str(obj_id)+")]",
|
||||
'domain': "[('id','in', ["+','.join(map(str,obj_ids))+"])]",
|
||||
'name': _('Quality Check'),
|
||||
'view_type': 'form',
|
||||
'view_mode': 'tree,form',
|
||||
|
|
Loading…
Reference in New Issue