[IMP] new tools.cache cleaner, 10% speedup and more to come...

bzr revid: al@openerp.com-20110608030330-d0dsv2k6n0w3lyd5
This commit is contained in:
Antony Lesuisse 2011-06-08 05:03:30 +02:00
parent b681b0ec75
commit 970b19b8b4
9 changed files with 142 additions and 187 deletions

View File

@ -450,6 +450,7 @@ class ir_model_access(osv.osv):
# pass no groups -> no access
return False
@tools.ormcache()
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
@ -513,8 +514,6 @@ class ir_model_access(osv.osv):
raise except_orm(_('AccessError'), msgs[mode] % (model_name, groups) )
return r
check = tools.cache()(check)
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
@ -528,7 +527,7 @@ class ir_model_access(osv.osv):
pass
def call_cache_clearing_methods(self, cr):
self.check.clear_cache(cr.dbname) # clear the cache of check function
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
object_ = self.pool.get(model)
if object_:
@ -593,7 +592,7 @@ class ir_model_data(osv.osv):
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
@tools.cache()
@tools.ormcache()
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
ids = self.search(cr, uid, [('module','=',module), ('name','=', xml_id)])
@ -602,7 +601,7 @@ class ir_model_data(osv.osv):
# the sql constraints ensure us we have only one result
return ids[0]
@tools.cache()
@tools.ormcache()
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
data_id = self._get_id(cr, uid, module, xml_id)
@ -652,8 +651,8 @@ class ir_model_data(osv.osv):
results = cr.fetchall()
for imd_id2,res_id2,real_id2 in results:
if not real_id2:
self._get_id.clear_cache(cr.dbname, uid, module, xml_id)
self.get_object_reference.clear_cache(cr.dbname, uid, module, xml_id)
self._get_id.clear_cache(self, uid, module, xml_id)
self.get_object_reference.clear_cache(self, uid, module, xml_id)
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:

View File

@ -94,7 +94,7 @@ class ir_rule(osv.osv):
return ['&'] * (count-1) + dom
return []
@tools.cache()
@tools.ormcache()
def _compute_domain(self, cr, uid, model_name, mode="read"):
if mode not in self._MODES:
raise ValueError('Invalid mode: %r' % (mode,))
@ -147,12 +147,12 @@ class ir_rule(osv.osv):
AND u_rel.uid = %s)
""", (uid,))
models = map(itemgetter(0), cr.fetchall())
clear = partial(self._compute_domain.clear_cache, cr.dbname, uid)
clear = partial(self._compute_domain.clear_cache, self, uid)
[clear(model, mode) for model in models for mode in self._MODES]
def domain_get(self, cr, uid, model_name, mode='read', context=None):
dom = self._compute_domain(cr, uid, model_name, mode=mode)
dom = self._compute_domain(cr, uid, model_name, mode)
if dom:
# _where_calc is called as superuser. This means that rules can
# involve objects on which the real uid has no acces rights.
@ -165,19 +165,19 @@ class ir_rule(osv.osv):
def unlink(self, cr, uid, ids, context=None):
res = super(ir_rule, self).unlink(cr, uid, ids, context=context)
# Restart the cache on the _compute_domain method of ir.rule
self._compute_domain.clear_cache(cr.dbname)
self._compute_domain.clear_cache(self)
return res
def create(self, cr, user, vals, context=None):
res = super(ir_rule, self).create(cr, user, vals, context=context)
# Restart the cache on the _compute_domain method of ir.rule
self._compute_domain.clear_cache(cr.dbname)
self._compute_domain.clear_cache(self)
return res
def write(self, cr, uid, ids, vals, context=None):
res = super(ir_rule, self).write(cr, uid, ids, vals, context=context)
# Restart the cache on the _compute_domain method
self._compute_domain.clear_cache(cr.dbname)
self._compute_domain.clear_cache(self)
return res
ir_rule()

View File

@ -92,7 +92,7 @@ class ir_translation(osv.osv):
cr.execute('CREATE INDEX ir_translation_ltn ON ir_translation (name, lang, type)')
cr.commit()
@tools.cache(skiparg=3, multi='ids')
@tools.ormcache_multi(skiparg=3, multi=7)
def _get_ids(self, cr, uid, name, tt, lang, ids):
translations = dict.fromkeys(ids, False)
if ids:
@ -112,9 +112,9 @@ class ir_translation(osv.osv):
tr = self._get_ids(cr, uid, name, tt, lang, ids)
for res_id in tr:
if tr[res_id]:
self._get_source.clear_cache(cr.dbname, uid, name, tt, lang, tr[res_id])
self._get_source.clear_cache(cr.dbname, uid, name, tt, lang)
self._get_ids.clear_cache(cr.dbname, uid, name, tt, lang, ids)
self._get_source.clear_cache(self, uid, name, tt, lang, tr[res_id])
self._get_source.clear_cache(self, uid, name, tt, lang)
self._get_ids.clear_cache(self, uid, name, tt, lang, ids)
cr.execute('delete from ir_translation ' \
'where lang=%s ' \
@ -133,7 +133,7 @@ class ir_translation(osv.osv):
})
return len(ids)
@tools.cache(skiparg=3)
@tools.ormcache(skiparg=3)
def _get_source(self, cr, uid, name, types, lang, source=None):
"""
Returns the translation for the given combination of name, type, language
@ -183,8 +183,8 @@ class ir_translation(osv.osv):
context = {}
ids = super(ir_translation, self).create(cursor, user, vals, context=context)
for trans_obj in self.read(cursor, user, [ids], ['name','type','res_id','src','lang'], context=context):
self._get_source.clear_cache(cursor.dbname, user, trans_obj['name'], trans_obj['type'], trans_obj['lang'], source=trans_obj['src'])
self._get_ids.clear_cache(cursor.dbname, user, trans_obj['name'], trans_obj['type'], trans_obj['lang'], [trans_obj['res_id']])
self._get_source.clear_cache(self, user, trans_obj['name'], trans_obj['type'], trans_obj['lang'], source=trans_obj['src'])
self._get_ids.clear_cache(self, user, trans_obj['name'], trans_obj['type'], trans_obj['lang'], [trans_obj['res_id']])
return ids
def write(self, cursor, user, ids, vals, context=None):
@ -194,8 +194,8 @@ class ir_translation(osv.osv):
ids = [ids]
result = super(ir_translation, self).write(cursor, user, ids, vals, context=context)
for trans_obj in self.read(cursor, user, ids, ['name','type','res_id','src','lang'], context=context):
self._get_source.clear_cache(cursor.dbname, user, trans_obj['name'], trans_obj['type'], trans_obj['lang'], source=trans_obj['src'])
self._get_ids.clear_cache(cursor.dbname, user, trans_obj['name'], trans_obj['type'], trans_obj['lang'], [trans_obj['res_id']])
self._get_source.clear_cache(self, user, trans_obj['name'], trans_obj['type'], trans_obj['lang'], source=trans_obj['src'])
self._get_ids.clear_cache(self, user, trans_obj['name'], trans_obj['type'], trans_obj['lang'], [trans_obj['res_id']])
return result
def unlink(self, cursor, user, ids, context=None):
@ -204,8 +204,8 @@ class ir_translation(osv.osv):
if isinstance(ids, (int, long)):
ids = [ids]
for trans_obj in self.read(cursor, user, ids, ['name','type','res_id','src','lang'], context=context):
self._get_source.clear_cache(cursor.dbname, user, trans_obj['name'], trans_obj['type'], trans_obj['lang'], source=trans_obj['src'])
self._get_ids.clear_cache(cursor.dbname, user, trans_obj['name'], trans_obj['type'], trans_obj['lang'], [trans_obj['res_id']])
self._get_source.clear_cache(self, user, trans_obj['name'], trans_obj['type'], trans_obj['lang'], source=trans_obj['src'])
self._get_ids.clear_cache(self, user, trans_obj['name'], trans_obj['type'], trans_obj['lang'], [trans_obj['res_id']])
result = super(ir_translation, self).unlink(cursor, user, ids, context=context)
return result

View File

@ -132,7 +132,7 @@ class res_company(osv.osv):
ids = self._get_company_children(cr, uid, company)
return ids
@tools.cache()
@tools.ormcache()
def _get_company_children(self, cr, uid=None, company=None):
if not company:
return []
@ -159,7 +159,7 @@ class res_company(osv.osv):
# This function restart the cache on the _get_company_children method
#
def cache_restart(self, cr):
self._get_company_children.clear_cache(cr.dbname)
self._get_company_children.clear_cache(self)
def create(self, cr, uid, vals, context=None):
if not vals.get('name', False) or vals.get('partner_id', False):

View File

@ -160,7 +160,7 @@ class lang(osv.osv):
(_check_format, 'Invalid date/time format directive specified. Please refer to the list of allowed directives, displayed when you edit a language.', ['time_format', 'date_format'])
]
@tools.cache(skiparg=3)
@tools.ormcache(skiparg=3)
def _lang_data_get(self, cr, uid, lang_id, monetary=False):
conv = localeconv()
lang_obj = self.browse(cr, uid, lang_id)
@ -171,7 +171,7 @@ class lang(osv.osv):
def write(self, cr, uid, ids, vals, context=None):
for lang_id in ids :
self._lang_data_get.clear_cache(cr.dbname,lang_id= lang_id)
self._lang_data_get.clear_cache(self,lang_id= lang_id)
return super(lang, self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, context=None):

View File

@ -267,7 +267,7 @@ class users(osv.osv):
o['password'] = '********'
return o
result = super(users, self).read(cr, uid, ids, fields, context, load)
canwrite = self.pool.get('ir.model.access').check(cr, uid, 'res.users', 'write', raise_exception=False)
canwrite = self.pool.get('ir.model.access').check(cr, uid, 'res.users', 'write', False)
if not canwrite:
if isinstance(ids, (int, float)):
result = override_password(result)
@ -351,7 +351,7 @@ class users(osv.osv):
'menu_tips':True
}
@tools.cache()
@tools.ormcache()
def company_get(self, cr, uid, uid2, context=None):
return self._get_company(cr, uid, context=context, uid2=uid2)
@ -374,7 +374,7 @@ class users(osv.osv):
res = super(users, self).write(cr, uid, ids, values, context=context)
# clear caches linked to the users
self.company_get.clear_cache(cr.dbname)
self.company_get.clear_cache(self)
self.pool.get('ir.model.access').call_cache_clearing_methods(cr)
clear = partial(self.pool.get('ir.rule').clear_cache, cr)
map(clear, ids)

View File

@ -1981,7 +1981,7 @@ class orm_template(object):
def read_string(self, cr, uid, id, langs, fields=None, context=None):
res = {}
res2 = {}
self.pool.get('ir.model.access').check(cr, uid, 'ir.translation', 'read', context=context)
self.pool.get('ir.model.access').check(cr, uid, 'ir.translation', 'read')
if not fields:
fields = self._columns.keys() + self._inherit_fields.keys()
#FIXME: collect all calls to _get_source into one SQL call.
@ -2005,7 +2005,7 @@ class orm_template(object):
return res
def write_string(self, cr, uid, id, langs, vals, context=None):
self.pool.get('ir.model.access').check(cr, uid, 'ir.translation', 'write', context=context)
self.pool.get('ir.model.access').check(cr, uid, 'ir.translation', 'write')
#FIXME: try to only call the translation in one SQL
for lang in langs:
for field in vals:
@ -2312,7 +2312,7 @@ class orm(orm_template):
"""
context = context or {}
self.pool.get('ir.model.access').check(cr, uid, self._name, 'read', context=context)
self.pool.get('ir.model.access').check(cr, uid, self._name, 'read')
if not fields:
fields = self._columns.keys()
@ -3122,14 +3122,14 @@ class orm(orm_template):
"""
ira = self.pool.get('ir.model.access')
write_access = ira.check(cr, user, self._name, 'write', raise_exception=False, context=context) or \
ira.check(cr, user, self._name, 'create', raise_exception=False, context=context)
write_access = ira.check(cr, user, self._name, 'write', False) or \
ira.check(cr, user, self._name, 'create', False)
return super(orm, self).fields_get(cr, user, fields, context, write_access)
def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
if not context:
context = {}
self.pool.get('ir.model.access').check(cr, user, self._name, 'read', context=context)
self.pool.get('ir.model.access').check(cr, user, self._name, 'read')
if not fields:
fields = list(set(self._columns.keys() + self._inherit_fields.keys()))
if isinstance(ids, (int, long)):
@ -3415,7 +3415,7 @@ class orm(orm_template):
self._check_concurrency(cr, ids, context)
self.pool.get('ir.model.access').check(cr, uid, self._name, 'unlink', context=context)
self.pool.get('ir.model.access').check(cr, uid, self._name, 'unlink')
properties = self.pool.get('ir.property')
domain = [('res_id', '=', False),
@ -3549,7 +3549,7 @@ class orm(orm_template):
ids = [ids]
self._check_concurrency(cr, ids, context)
self.pool.get('ir.model.access').check(cr, user, self._name, 'write', context=context)
self.pool.get('ir.model.access').check(cr, user, self._name, 'write')
result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
@ -3758,7 +3758,7 @@ class orm(orm_template):
"""
if not context:
context = {}
self.pool.get('ir.model.access').check(cr, user, self._name, 'create', context=context)
self.pool.get('ir.model.access').check(cr, user, self._name, 'create')
vals = self._add_missing_default_values(cr, user, vals, context)
@ -4214,7 +4214,7 @@ class orm(orm_template):
"""
if context is None:
context = {}
self.pool.get('ir.model.access').check(cr, access_rights_uid or user, self._name, 'read', context=context)
self.pool.get('ir.model.access').check(cr, access_rights_uid or user, self._name, 'read')
query = self._where_calc(cr, user, args, context=context)
self._apply_ir_rules(cr, user, query, 'read', context=context)

View File

@ -12,7 +12,7 @@ class LRUNode(object):
self.me = me
self.next = None
class LRU:
class LRU(object):
"""
Implementation of a length-limited O(1) LRU queue.
Built for and used by PyPE:
@ -113,3 +113,8 @@ class LRU:
del self[key]
return v
@synchronized()
def clear(self):
self.d = {}
self.first = None
self.last = None

View File

@ -639,162 +639,113 @@ class currency(float):
# display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy))
# return str(display_value)
class ormcache(object):
""" LRU cache decorator for orm methods
"""
def is_hashable(h):
try:
hash(h)
return True
except TypeError:
return False
def __init__(self, skiparg=2, size=8192, multi=None, timeout=None):
self.skiparg = skiparg
self.size = size
self.method = None
self.stat_miss = 0
self.stat_hit = 0
self.stat_err = 0
def __call__(self,m):
self.method = m
def lookup(self2, cr, *args):
r = self.lookup(m, m, self2, cr, *args)
# print "lookup-stats hit miss err",self.stat_hit,self.stat_miss,self.stat_err
return r
def clear(self2, *args):
return self.clear(self2, *args)
lookup.clear_cache = self.clear
print "returned",lookup
return lookup
def lookup(self, cacheid, method, self2, cr, *args):
try:
ormcache = getattr(self2, '_ormcache')
except AttributeError:
ormcache = self2._ormcache = {}
try:
d = ormcache[cacheid]
except KeyError:
d = ormcache[cacheid] = LRU(self.size)
key = args[self.skiparg-2:]
try:
r = d[key]
# print "lookup-hit",self2,cr,key,r
self.stat_hit += 1
return r
except KeyError:
self.stat_miss += 1
# print "lookup-miss",self2,cr,key
value = d[args] = method(self2, cr, *args)
# print "lookup-miss-value",value
return value
except TypeError:
self.stat_err += 1
# print "lookup-typeerror",self2,cr,key
return method(self2, cr, *args)
def clear(self, self2, *args, **kw):
""" Remove *args entry from the cache or all keys if *args is undefined
"""
try:
ormcache = getattr(self2, '_ormcache')
except AttributeError:
ormcache = self2._ormcache = {}
try:
d = ormcache[self.method]
except KeyError:
d = ormcache[self.method] = LRU(self.size)
d.clear()
class ormcache_multi(ormcache):
def __init__(self, skiparg=2, size=8192, multi=4):
super(ormcache_multi,self).__init__(skiparg,size)
self.multi = multi - 3
def lookup(self, cacheid, method, self2, cr, *args):
superlookup = super(ormcache_multi,self).lookup
args = list(args)
multi = self.multi
# print args,multi
ids = args[multi]
r = {}
miss = []
def add_to_miss(_self2, _cr, *_args):
miss.append(_args[multi])
for i in ids:
args[multi] = i
r[i] = superlookup(method, add_to_miss, self2, cr, *args)
args[multi] = miss
r.update(method(self2, cr, *args))
for i in miss:
args[multi] = i
key = tuple(args[self.skiparg-2:])
self2._ormcache[cacheid][key] = r[i]
return r
class dummy_cache(object):
""" Cache decorator replacement to actually do no caching.
This can be useful to benchmark and/or track memory leak.
"""
def __init__(self, timeout=None, skiparg=2, multi=None, size=8192):
def __init__(self, *l, **kw):
pass
def clear(self, dbname, *args, **kwargs):
def clear(self, *l, **kw):
pass
@classmethod
def clean_caches_for_db(cls, dbname):
pass
def __call__(self, fn):
fn.clear_cache = self.clear
return fn
class real_cache(object):
"""
Use it as a decorator of the function you plan to cache
Timeout: 0 = no timeout, otherwise in seconds
"""
__caches = []
def __init__(self, timeout=None, skiparg=2, multi=None, size=8192):
assert skiparg >= 2 # at least self and cr
if timeout is None:
self.timeout = config['cache_timeout']
else:
self.timeout = timeout
self.skiparg = skiparg
self.multi = multi
self.lasttime = time.time()
self.cache = LRU(size) # TODO take size from config
self.fun = None
cache.__caches.append(self)
def _generate_keys(self, dbname, kwargs2):
"""
Generate keys depending of the arguments and the self.mutli value
"""
def to_tuple(d):
pairs = d.items()
pairs.sort(key=lambda (k,v): k)
for i, (k, v) in enumerate(pairs):
if isinstance(v, dict):
pairs[i] = (k, to_tuple(v))
if isinstance(v, (list, set)):
pairs[i] = (k, tuple(v))
elif not is_hashable(v):
pairs[i] = (k, repr(v))
return tuple(pairs)
if not self.multi:
key = (('dbname', dbname),) + to_tuple(kwargs2)
yield key, None
else:
multis = kwargs2[self.multi][:]
for id in multis:
kwargs2[self.multi] = (id,)
key = (('dbname', dbname),) + to_tuple(kwargs2)
yield key, id
def _unify_args(self, *args, **kwargs):
# Update named arguments with positional argument values (without self and cr)
kwargs2 = self.fun_default_values.copy()
kwargs2.update(kwargs)
kwargs2.update(dict(zip(self.fun_arg_names, args[self.skiparg-2:])))
return kwargs2
def clear(self, dbname, *args, **kwargs):
"""clear the cache for database dbname
if *args and **kwargs are both empty, clear all the keys related to this database
"""
if not args and not kwargs:
keys_to_del = [key for key in self.cache.keys() if key[0][1] == dbname]
else:
kwargs2 = self._unify_args(*args, **kwargs)
keys_to_del = [key for key, _ in self._generate_keys(dbname, kwargs2) if key in self.cache.keys()]
for key in keys_to_del:
self.cache.pop(key)
@classmethod
def clean_caches_for_db(cls, dbname):
for c in cls.__caches:
c.clear(dbname)
def __call__(self, fn):
if self.fun is not None:
raise Exception("Can not use a cache instance on more than one function")
self.fun = fn
argspec = inspect.getargspec(fn)
self.fun_arg_names = argspec[0][self.skiparg:]
self.fun_default_values = {}
if argspec[3]:
self.fun_default_values = dict(zip(self.fun_arg_names[-len(argspec[3]):], argspec[3]))
def cached_result(self2, cr, *args, **kwargs):
if time.time()-int(self.timeout) > self.lasttime:
self.lasttime = time.time()
t = time.time()-int(self.timeout)
old_keys = [key for key in self.cache.keys() if self.cache[key][1] < t]
for key in old_keys:
self.cache.pop(key)
kwargs2 = self._unify_args(*args, **kwargs)
result = {}
notincache = {}
for key, id in self._generate_keys(cr.dbname, kwargs2):
if key in self.cache:
result[id] = self.cache[key][0]
else:
notincache[id] = key
if notincache:
if self.multi:
kwargs2[self.multi] = notincache.keys()
result2 = fn(self2, cr, *args[:self.skiparg-2], **kwargs2)
if not self.multi:
key = notincache[None]
self.cache[key] = (result2, time.time())
result[None] = result2
else:
for id in result2:
key = notincache[id]
self.cache[key] = (result2[id], time.time())
result.update(result2)
if not self.multi:
return result[None]
return result
cached_result.clear_cache = self.clear
return cached_result
# TODO make it an option
cache = real_cache
#ormcache = dummy_cache
cache = dummy_cache
def to_xml(s):
return s.replace('&','&amp;').replace('<','&lt;').replace('>','&gt;')