From c9154e08aab9114ce1e269f91c7c6d3972d4b114 Mon Sep 17 00:00:00 2001 From: Denis Ledoux Date: Thu, 12 Feb 2015 14:57:31 +0100 Subject: [PATCH] [FIX] api: environment recomputation In a workflow context (for instance, in the invoice workflow), context is not passed. Therefore, relying on the 'recompute' key being the context in order to not recompute the fields does not work with Workflows. It leads to huge performance issues, as fields are recomputed recursively (instead of sequentially) when several records are implied. For instance, when reconciling several invoices with one payment (100 invoices with 1 payment for instance), records of each invoice are recomputed uselessly in each workflow call (for each "confirm_paid" method done for each invoice). With a significant number of invoices (100, for instance), it even leads to a "Maximum recursion depth reached" errror. closes #4905 --- addons/account/account_move_line.py | 3 +- openerp/api.py | 14 +++++ openerp/models.py | 11 ++-- openerp/osv/fields.py | 95 +++++++++++++++-------------- 4 files changed, 70 insertions(+), 53 deletions(-) diff --git a/addons/account/account_move_line.py b/addons/account/account_move_line.py index b852672ad8f..75f6198c7d2 100644 --- a/addons/account/account_move_line.py +++ b/addons/account/account_move_line.py @@ -1391,7 +1391,8 @@ class account_move_line(osv.osv): self.create(cr, uid, data, context) del vals['account_tax_id'] - if check and not context.get('novalidate') and (context.get('recompute', True) or journal.entry_posted): + recompute = journal.env.recompute and context.get('recompute', True) + if check and not context.get('novalidate') and (recompute or journal.entry_posted): tmp = move_obj.validate(cr, uid, [vals['move_id']], context) if journal.entry_posted and tmp: move_obj.button_validate(cr,uid, [vals['move_id']], context) diff --git a/openerp/api.py b/openerp/api.py index 070dfd4e69b..4ab512be6a9 100644 --- a/openerp/api.py +++ b/openerp/api.py @@ -890,6 +890,19 @@ class Environment(object): if invalids: raise Warning('Invalid cache for fields\n' + pformat(invalids)) + @property + def recompute(self): + return self.all.recompute + + @contextmanager + def norecompute(self): + tmp = self.all.recompute + self.all.recompute = False + try: + yield + finally: + self.all.recompute = tmp + class Environments(object): """ A common object for all environments in a request. """ @@ -897,6 +910,7 @@ class Environments(object): self.envs = WeakSet() # weak set of environments self.todo = {} # recomputations {field: [records]} self.mode = False # flag for draft/onchange + self.recompute = True def add(self, env): """ Add the environment `env`. """ diff --git a/openerp/models.py b/openerp/models.py index fde2e393751..a9f32c72588 100644 --- a/openerp/models.py +++ b/openerp/models.py @@ -3973,7 +3973,7 @@ class BaseModel(object): self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context) # recompute new-style fields - if context.get('recompute', True): + if recs.env.recompute and context.get('recompute', True): recs.recompute() self.step_workflow(cr, user, ids, context=context) @@ -4220,7 +4220,7 @@ class BaseModel(object): # check Python constraints recs._validate_fields(vals) - if context.get('recompute', True): + if recs.env.recompute and context.get('recompute', True): result += self._store_get_values(cr, user, [id_new], list(set(vals.keys() + self._inherits.values())), context) @@ -4233,7 +4233,7 @@ class BaseModel(object): # recompute new-style fields recs.recompute() - if self._log_create and context.get('recompute', True): + if self._log_create and recs.env.recompute and context.get('recompute', True): message = self._description + \ " '" + \ self.name_get(cr, user, [id_new], context=context)[0][1] + \ @@ -5629,12 +5629,13 @@ class BaseModel(object): field, recs = self.env.get_todo() # evaluate the fields to recompute, and save them to database names = [f.name for f in field.computed_fields if f.store] - for rec, rec1 in zip(recs, recs.with_context(recompute=False)): + for rec in recs: try: values = rec._convert_to_write({ name: rec[name] for name in names }) - rec1._write(values) + with rec.env.norecompute(): + rec._write(values) except MissingError: pass # mark the computed fields as done diff --git a/openerp/osv/fields.py b/openerp/osv/fields.py index 25028e16305..d0fc0dfcaea 100644 --- a/openerp/osv/fields.py +++ b/openerp/osv/fields.py @@ -736,57 +736,58 @@ class one2many(_column): result = [] context = dict(context or {}) context.update(self._context) - context['recompute'] = False # recomputation is done by outer create/write if not values: return obj = obj.pool[self._obj] - _table = obj._table - for act in values: - if act[0] == 0: - act[2][self._fields_id] = id - id_new = obj.create(cr, user, act[2], context=context) - result += obj._store_get_values(cr, user, [id_new], act[2].keys(), context) - elif act[0] == 1: - obj.write(cr, user, [act[1]], act[2], context=context) - elif act[0] == 2: - obj.unlink(cr, user, [act[1]], context=context) - elif act[0] == 3: - inverse_field = obj._fields.get(self._fields_id) - assert inverse_field, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o' - # if the model has on delete cascade, just delete the row - if inverse_field.ondelete == "cascade": + rec = obj.browse(cr, user, [], context=context) + with rec.env.norecompute(): + _table = obj._table + for act in values: + if act[0] == 0: + act[2][self._fields_id] = id + id_new = obj.create(cr, user, act[2], context=context) + result += obj._store_get_values(cr, user, [id_new], act[2].keys(), context) + elif act[0] == 1: + obj.write(cr, user, [act[1]], act[2], context=context) + elif act[0] == 2: obj.unlink(cr, user, [act[1]], context=context) - else: - cr.execute('update '+_table+' set '+self._fields_id+'=null where id=%s', (act[1],)) - elif act[0] == 4: - # table of the field (parent_model in case of inherit) - field = obj.pool[self._obj]._fields[self._fields_id] - field_model = field.base_field.model_name - field_table = obj.pool[field_model]._table - cr.execute("select 1 from {0} where id=%s and {1}=%s".format(field_table, self._fields_id), (act[1], id)) - if not cr.fetchone(): - # Must use write() to recompute parent_store structure if needed and check access rules - obj.write(cr, user, [act[1]], {self._fields_id:id}, context=context or {}) - elif act[0] == 5: - inverse_field = obj._fields.get(self._fields_id) - assert inverse_field, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o' - # if the o2m has a static domain we must respect it when unlinking - domain = self._domain(obj) if callable(self._domain) else self._domain - extra_domain = domain or [] - ids_to_unlink = obj.search(cr, user, [(self._fields_id,'=',id)] + extra_domain, context=context) - # If the model has cascade deletion, we delete the rows because it is the intended behavior, - # otherwise we only nullify the reverse foreign key column. - if inverse_field.ondelete == "cascade": - obj.unlink(cr, user, ids_to_unlink, context=context) - else: - obj.write(cr, user, ids_to_unlink, {self._fields_id: False}, context=context) - elif act[0] == 6: - # Must use write() to recompute parent_store structure if needed - obj.write(cr, user, act[2], {self._fields_id:id}, context=context or {}) - ids2 = act[2] or [0] - cr.execute('select id from '+_table+' where '+self._fields_id+'=%s and id <> ALL (%s)', (id,ids2)) - ids3 = map(lambda x:x[0], cr.fetchall()) - obj.write(cr, user, ids3, {self._fields_id:False}, context=context or {}) + elif act[0] == 3: + inverse_field = obj._fields.get(self._fields_id) + assert inverse_field, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o' + # if the model has on delete cascade, just delete the row + if inverse_field.ondelete == "cascade": + obj.unlink(cr, user, [act[1]], context=context) + else: + cr.execute('update '+_table+' set '+self._fields_id+'=null where id=%s', (act[1],)) + elif act[0] == 4: + # table of the field (parent_model in case of inherit) + field = obj.pool[self._obj]._fields[self._fields_id] + field_model = field.base_field.model_name + field_table = obj.pool[field_model]._table + cr.execute("select 1 from {0} where id=%s and {1}=%s".format(field_table, self._fields_id), (act[1], id)) + if not cr.fetchone(): + # Must use write() to recompute parent_store structure if needed and check access rules + obj.write(cr, user, [act[1]], {self._fields_id:id}, context=context or {}) + elif act[0] == 5: + inverse_field = obj._fields.get(self._fields_id) + assert inverse_field, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o' + # if the o2m has a static domain we must respect it when unlinking + domain = self._domain(obj) if callable(self._domain) else self._domain + extra_domain = domain or [] + ids_to_unlink = obj.search(cr, user, [(self._fields_id,'=',id)] + extra_domain, context=context) + # If the model has cascade deletion, we delete the rows because it is the intended behavior, + # otherwise we only nullify the reverse foreign key column. + if inverse_field.ondelete == "cascade": + obj.unlink(cr, user, ids_to_unlink, context=context) + else: + obj.write(cr, user, ids_to_unlink, {self._fields_id: False}, context=context) + elif act[0] == 6: + # Must use write() to recompute parent_store structure if needed + obj.write(cr, user, act[2], {self._fields_id:id}, context=context or {}) + ids2 = act[2] or [0] + cr.execute('select id from '+_table+' where '+self._fields_id+'=%s and id <> ALL (%s)', (id,ids2)) + ids3 = map(lambda x:x[0], cr.fetchall()) + obj.write(cr, user, ids3, {self._fields_id:False}, context=context or {}) return result def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):