2008-07-23 15:01:27 +00:00
# -*- encoding: utf-8 -*-
2006-12-07 13:41:40 +00:00
##############################################################################
#
2008-11-10 11:07:21 +00:00
# OpenERP, Open Source Management Solution
2009-01-04 22:13:29 +00:00
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
2008-11-03 18:27:16 +00:00
# $Id$
2006-12-07 13:41:40 +00:00
#
2008-11-03 18:27:16 +00:00
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
2006-12-07 13:41:40 +00:00
#
2008-11-03 18:27:16 +00:00
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
2006-12-07 13:41:40 +00:00
#
2008-11-03 18:27:16 +00:00
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
2006-12-07 13:41:40 +00:00
#
2008-11-03 18:27:16 +00:00
##############################################################################
2006-12-07 13:41:40 +00:00
#
# Object relationnal mapping to postgresql module
# . Hierarchical structure
# . Constraints consistency, validations
# . Object meta Data depends on its status
# . Optimised processing by complex query (multiple actions at once)
# . Default fields value
# . Permissions optimisation
# . Persistant object: DB postgresql
# . Datas conversions
# . Multi-level caching system
# . 2 different inheritancies
# . Fields:
# - classicals (varchar, integer, boolean, ...)
# - relations (one2many, many2one, many2many)
# - functions
#
#
2008-06-15 14:59:23 +00:00
import time
2009-02-02 14:07:15 +00:00
import calendar
2008-07-01 05:58:30 +00:00
import types
2006-12-07 13:41:40 +00:00
import string
import netsvc
import re
2008-08-19 13:26:12 +00:00
import pickle
2006-12-07 13:41:40 +00:00
import fields
import tools
2008-08-12 23:02:28 +00:00
import sys
2009-06-19 16:04:29 +00:00
2008-08-12 23:02:28 +00:00
try :
from xml import dom , xpath
except ImportError :
sys . stderr . write ( " ERROR: Import xpath module \n " )
sys . stderr . write ( " ERROR: Try to install the old python-xml package \n " )
2009-06-19 16:08:11 +00:00
sys . stderr . write ( ' On Ubuntu Jaunty, try this: sudo cp /usr/lib/python2.6/dist-packages/oldxml/_xmlplus/utils/boolean.so /usr/lib/python2.5/site-packages/oldxml/_xmlplus/utils \n ' )
2009-06-19 16:04:29 +00:00
raise
2008-08-12 23:02:28 +00:00
2008-05-27 05:38:42 +00:00
from tools . config import config
2009-02-27 06:51:18 +00:00
regex_order = re . compile ( ' ^([a-z0-9_]+( *desc| *asc)?( *, *|))+$ ' , re . I )
2008-05-26 18:33:33 +00:00
2009-02-02 14:07:15 +00:00
def last_day_of_current_month ( ) :
import datetime
import calendar
today = datetime . date . today ( )
last_day = str ( calendar . monthrange ( today . year , today . month ) [ 1 ] )
return time . strftime ( ' % Y- % m- ' + last_day )
2008-08-12 14:44:56 +00:00
2006-12-07 13:41:40 +00:00
def intersect ( la , lb ) :
2008-07-22 14:24:36 +00:00
return filter ( lambda x : x in lb , la )
2006-12-07 13:41:40 +00:00
2007-07-26 08:30:41 +00:00
2006-12-07 13:41:40 +00:00
class except_orm ( Exception ) :
2008-07-22 14:24:36 +00:00
def __init__ ( self , name , value ) :
self . name = name
self . value = value
self . args = ( name , value )
2006-12-07 13:41:40 +00:00
2009-06-10 11:15:35 +00:00
class BrowseRecordError ( Exception ) :
pass
2008-08-12 14:44:56 +00:00
2006-12-07 13:41:40 +00:00
# Readonly python database object browser
class browse_null ( object ) :
2007-07-26 08:30:50 +00:00
2008-07-22 14:24:36 +00:00
def __init__ ( self ) :
2008-08-12 14:44:56 +00:00
self . id = False
2007-07-26 08:30:50 +00:00
2008-07-22 14:24:36 +00:00
def __getitem__ ( self , name ) :
2009-01-06 09:37:43 +00:00
return None
2007-07-26 08:30:50 +00:00
2008-10-27 13:59:31 +00:00
def __getattr__ ( self , name ) :
2009-01-06 09:37:43 +00:00
return None # XXX: return self ?
2008-10-27 13:59:31 +00:00
2008-07-22 14:24:36 +00:00
def __int__ ( self ) :
return False
2007-07-26 08:30:50 +00:00
2008-07-22 14:24:36 +00:00
def __str__ ( self ) :
return ' '
2007-07-26 08:30:50 +00:00
2008-07-22 14:24:36 +00:00
def __nonzero__ ( self ) :
return False
2009-02-14 05:35:17 +00:00
2008-12-29 12:36:01 +00:00
def __unicode__ ( self ) :
return u ' '
2006-12-07 13:41:40 +00:00
2008-08-12 14:44:56 +00:00
2006-12-07 13:41:40 +00:00
#
# TODO: execute an object method on browse_record_list
#
class browse_record_list ( list ) :
2007-07-26 08:30:53 +00:00
2008-07-22 14:24:36 +00:00
def __init__ ( self , lst , context = None ) :
if not context :
context = { }
super ( browse_record_list , self ) . __init__ ( lst )
self . context = context
2006-12-07 13:41:40 +00:00
2007-08-09 06:06:22 +00:00
2006-12-07 13:41:40 +00:00
class browse_record ( object ) :
2008-07-22 14:24:36 +00:00
def __init__ ( self , cr , uid , id , table , cache , context = None , list_class = None , fields_process = { } ) :
'''
table : the object ( inherited from orm )
2009-01-28 12:52:32 +00:00
context : a dictionary with an optional context
2008-07-22 14:24:36 +00:00
'''
if not context :
context = { }
self . _list_class = list_class or browse_record_list
self . _cr = cr
self . _uid = uid
self . _id = id
self . _table = table
self . _table_name = self . _table . _name
self . _context = context
self . _fields_process = fields_process
cache . setdefault ( table . _name , { } )
self . _data = cache [ table . _name ]
2008-11-21 18:12:24 +00:00
2009-06-10 11:15:35 +00:00
if not ( id and isinstance ( id , ( int , long , ) ) ) :
raise BrowseRecordError ( _ ( ' Wrong ID for the browse record, got %r , expected an integer. ' ) % ( id , ) )
2009-06-16 06:45:56 +00:00
# if not table.exists(cr, uid, id, context):
# raise BrowseRecordError(_('Object %s does not exists') % (self,))
2009-06-10 11:15:35 +00:00
2008-12-19 19:17:59 +00:00
if id not in self . _data :
2008-08-12 14:44:56 +00:00
self . _data [ id ] = { ' id ' : id }
2008-11-21 18:12:24 +00:00
2008-07-22 14:24:36 +00:00
self . _cache = cache
def __getitem__ ( self , name ) :
if name == ' id ' :
return self . _id
2008-12-19 19:17:59 +00:00
if name not in self . _data [ self . _id ] :
2008-07-22 14:24:36 +00:00
# build the list of fields we will fetch
# fetch the definition of the field which was asked for
if name in self . _table . _columns :
col = self . _table . _columns [ name ]
elif name in self . _table . _inherit_fields :
col = self . _table . _inherit_fields [ name ] [ 2 ]
2008-10-10 09:18:05 +00:00
elif hasattr ( self . _table , str ( name ) ) :
2008-08-12 14:44:56 +00:00
if isinstance ( getattr ( self . _table , name ) , ( types . MethodType , types . LambdaType , types . FunctionType ) ) :
2008-07-22 14:24:36 +00:00
return lambda * args , * * argv : getattr ( self . _table , name ) ( self . _cr , self . _uid , [ self . _id ] , * args , * * argv )
else :
return getattr ( self . _table , name )
else :
logger = netsvc . Logger ( )
logger . notifyChannel ( ' orm ' , netsvc . LOG_ERROR , " Programming error: field ' %s ' does not exist in object ' %s ' ! " % ( name , self . _table . _name ) )
2009-01-06 09:37:43 +00:00
return None
2008-07-22 14:24:36 +00:00
# if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
2009-08-20 15:29:21 +00:00
if col . _prefetch :
2008-07-22 14:24:36 +00:00
# gen the list of "local" (ie not inherited) fields which are classic or many2one
ffields = filter ( lambda x : x [ 1 ] . _classic_write , self . _table . _columns . items ( ) )
# gen the list of inherited fields
inherits = map ( lambda x : ( x [ 0 ] , x [ 1 ] [ 2 ] ) , self . _table . _inherit_fields . items ( ) )
# complete the field list with the inherited fields which are classic or many2one
ffields + = filter ( lambda x : x [ 1 ] . _classic_write , inherits )
# otherwise we fetch only that field
else :
2008-08-12 14:44:56 +00:00
ffields = [ ( name , col ) ]
2008-12-19 19:17:59 +00:00
ids = filter ( lambda id : name not in self . _data [ id ] , self . _data . keys ( ) )
2008-07-22 14:24:36 +00:00
# read the data
fffields = map ( lambda x : x [ 0 ] , ffields )
datas = self . _table . read ( self . _cr , self . _uid , ids , fffields , context = self . _context , load = " _classic_write " )
if self . _fields_process :
2009-04-06 11:45:33 +00:00
lang = self . _context . get ( ' lang ' , ' en_US ' ) or ' en_US '
2009-06-01 22:15:25 +00:00
lang_obj_ids = self . pool . get ( ' res.lang ' ) . search ( self . _cr , self . _uid , [ ( ' code ' , ' = ' , lang ) ] )
if not lang_obj_ids :
2009-06-09 23:15:54 +00:00
raise Exception ( _ ( ' Language with code " %s " is not defined in your system ! \n Define it through the Administration menu. ' ) % ( lang , ) )
2009-06-01 22:15:25 +00:00
lang_obj = self . pool . get ( ' res.lang ' ) . browse ( self . _cr , self . _uid , lang_obj_ids [ 0 ] )
2008-08-12 14:44:56 +00:00
for n , f in ffields :
2008-07-22 14:24:36 +00:00
if f . _type in self . _fields_process :
for d in datas :
d [ n ] = self . _fields_process [ f . _type ] ( d [ n ] )
2009-06-09 23:15:54 +00:00
if ( d [ n ] is not None ) and ( d [ n ] is not False ) :
2009-04-06 11:45:33 +00:00
d [ n ] . set_value ( self . _cr , self . _uid , d [ n ] , self , f , lang_obj )
2008-07-22 14:24:36 +00:00
2008-12-15 21:52:12 +00:00
if not datas :
# Where did those ids come from? Perhaps old entries in ir_model_data?
raise except_orm ( ' NoDataError ' , ' Field %s in %s %s ' % ( name , self . _table_name , str ( ids ) ) )
2008-07-22 14:24:36 +00:00
# create browse records for 'remote' objects
for data in datas :
2008-08-12 14:44:56 +00:00
for n , f in ffields :
2008-07-22 14:24:36 +00:00
if f . _type in ( ' many2one ' , ' one2one ' ) :
if data [ n ] :
obj = self . _table . pool . get ( f . _obj )
2008-08-12 14:44:56 +00:00
compids = False
2008-11-21 18:12:24 +00:00
if type ( data [ n ] ) in ( type ( [ ] ) , type ( ( 1 , ) ) ) :
2008-07-22 14:24:36 +00:00
ids2 = data [ n ] [ 0 ]
else :
ids2 = data [ n ]
if ids2 :
data [ n ] = browse_record ( self . _cr , self . _uid , ids2 , obj , self . _cache , context = self . _context , list_class = self . _list_class , fields_process = self . _fields_process )
else :
data [ n ] = browse_null ( )
else :
data [ n ] = browse_null ( )
elif f . _type in ( ' one2many ' , ' many2many ' ) and len ( data [ n ] ) :
2008-08-12 14:44:56 +00:00
data [ n ] = self . _list_class ( [ browse_record ( self . _cr , self . _uid , id , self . _table . pool . get ( f . _obj ) , self . _cache , context = self . _context , list_class = self . _list_class , fields_process = self . _fields_process ) for id in data [ n ] ] , self . _context )
2008-07-22 14:24:36 +00:00
self . _data [ data [ ' id ' ] ] . update ( data )
2008-11-15 08:58:54 +00:00
if not name in self . _data [ self . _id ] :
#how did this happen?
2008-12-15 21:52:12 +00:00
logger = netsvc . Logger ( )
logger . notifyChannel ( " browse_record " , netsvc . LOG_ERROR , " Ffields: %s , datas: %s " % ( str ( fffields ) , str ( datas ) ) )
logger . notifyChannel ( " browse_record " , netsvc . LOG_ERROR , " Data: %s , Table: %s " % ( str ( self . _data [ self . _id ] ) , str ( self . _table ) ) )
raise AttributeError ( _ ( ' Unknown attribute %s in %s ' ) % ( str ( name ) , self . _table_name ) )
2008-07-22 14:24:36 +00:00
return self . _data [ self . _id ] [ name ]
def __getattr__ ( self , name ) :
# raise an AttributeError exception.
return self [ name ]
def __contains__ ( self , name ) :
return ( name in self . _table . _columns ) or ( name in self . _table . _inherit_fields ) or hasattr ( self . _table , name )
def __hasattr__ ( self , name ) :
return name in self
def __int__ ( self ) :
return self . _id
def __str__ ( self ) :
return " browse_record( %s , %d ) " % ( self . _table_name , self . _id )
def __eq__ ( self , other ) :
return ( self . _table_name , self . _id ) == ( other . _table_name , other . _id )
def __ne__ ( self , other ) :
return ( self . _table_name , self . _id ) != ( other . _table_name , other . _id )
# we need to define __unicode__ even though we've already defined __str__
# because we have overridden __getattr__
def __unicode__ ( self ) :
return unicode ( str ( self ) )
def __hash__ ( self ) :
return hash ( ( self . _table_name , self . _id ) )
__repr__ = __str__
2006-12-07 13:41:40 +00:00
def get_pg_type ( f ) :
2008-07-22 14:24:36 +00:00
'''
returns a tuple
( type returned by postgres when the column was created , type expression to create the column )
'''
type_dict = {
2008-08-12 14:44:56 +00:00
fields . boolean : ' bool ' ,
fields . integer : ' int4 ' ,
2008-10-25 08:53:56 +00:00
fields . integer_big : ' int8 ' ,
2008-08-12 14:44:56 +00:00
fields . text : ' text ' ,
fields . date : ' date ' ,
fields . time : ' time ' ,
fields . datetime : ' timestamp ' ,
fields . binary : ' bytea ' ,
fields . many2one : ' int4 ' ,
2008-07-22 14:24:36 +00:00
}
2008-08-12 14:44:56 +00:00
if type ( f ) in type_dict :
2008-07-22 14:24:36 +00:00
f_type = ( type_dict [ type ( f ) ] , type_dict [ type ( f ) ] )
elif isinstance ( f , fields . float ) :
if f . digits :
2008-08-12 14:44:56 +00:00
f_type = ( ' numeric ' , ' NUMERIC( %d , %d ) ' % ( f . digits [ 0 ] , f . digits [ 1 ] ) )
2008-07-22 14:24:36 +00:00
else :
f_type = ( ' float8 ' , ' DOUBLE PRECISION ' )
elif isinstance ( f , ( fields . char , fields . reference ) ) :
f_type = ( ' varchar ' , ' VARCHAR( %d ) ' % ( f . size , ) )
elif isinstance ( f , fields . selection ) :
if isinstance ( f . selection , list ) and isinstance ( f . selection [ 0 ] [ 0 ] , ( str , unicode ) ) :
2008-08-12 14:44:56 +00:00
f_size = reduce ( lambda x , y : max ( x , len ( y [ 0 ] ) ) , f . selection , f . size or 16 )
2008-07-22 14:24:36 +00:00
elif isinstance ( f . selection , list ) and isinstance ( f . selection [ 0 ] [ 0 ] , int ) :
f_size = - 1
else :
2008-08-12 14:44:56 +00:00
f_size = ( hasattr ( f , ' size ' ) and f . size ) or 16
2008-07-22 14:24:36 +00:00
if f_size == - 1 :
f_type = ( ' int4 ' , ' INTEGER ' )
else :
f_type = ( ' varchar ' , ' VARCHAR( %d ) ' % f_size )
2008-08-12 14:44:56 +00:00
elif isinstance ( f , fields . function ) and eval ( ' fields. ' + ( f . _type ) ) in type_dict :
t = eval ( ' fields. ' + ( f . _type ) )
2008-07-22 14:24:36 +00:00
f_type = ( type_dict [ t ] , type_dict [ t ] )
elif isinstance ( f , fields . function ) and f . _type == ' float ' :
f_type = ( ' float8 ' , ' DOUBLE PRECISION ' )
elif isinstance ( f , fields . function ) and f . _type == ' selection ' :
f_type = ( ' text ' , ' text ' )
2009-01-19 10:12:01 +00:00
elif isinstance ( f , fields . function ) and f . _type == ' char ' :
f_type = ( ' varchar ' , ' VARCHAR( %d ) ' % ( f . size ) )
2008-07-22 14:24:36 +00:00
else :
logger = netsvc . Logger ( )
logger . notifyChannel ( " init " , netsvc . LOG_WARNING , ' %s type not supported! ' % ( type ( f ) ) )
f_type = None
return f_type
2006-12-07 13:41:40 +00:00
2008-08-12 14:44:56 +00:00
2008-06-14 15:14:19 +00:00
class orm_template ( object ) :
2008-07-22 14:24:36 +00:00
_name = None
_columns = { }
_constraints = [ ]
_defaults = { }
_rec_name = ' name '
_parent_name = ' parent_id '
2008-08-13 10:47:38 +00:00
_parent_store = False
2009-01-17 19:22:14 +00:00
_parent_order = False
2008-07-22 14:24:36 +00:00
_date_name = ' date '
_order = ' id '
_sequence = None
_description = None
_inherits = { }
_table = None
2008-11-24 15:42:29 +00:00
_invalids = set ( )
2009-02-14 05:35:17 +00:00
2009-01-15 11:57:18 +00:00
CONCURRENCY_CHECK_FIELD = ' __last_update '
2008-07-22 14:24:36 +00:00
def _field_create ( self , cr , context = { } ) :
2008-12-19 19:17:59 +00:00
cr . execute ( " SELECT id FROM ir_model WHERE model= %s " , ( self . _name , ) )
2008-07-22 14:24:36 +00:00
if not cr . rowcount :
cr . execute ( ' SELECT nextval( %s ) ' , ( ' ir_model_id_seq ' , ) )
2008-11-25 23:33:17 +00:00
model_id = cr . fetchone ( ) [ 0 ]
2008-12-19 19:17:59 +00:00
cr . execute ( " INSERT INTO ir_model (id,model, name, info,state) VALUES ( %s , %s , %s , %s , %s ) " , ( model_id , self . _name , self . _description , self . __doc__ , ' base ' ) )
2008-11-25 23:33:17 +00:00
else :
model_id = cr . fetchone ( ) [ 0 ]
2008-12-09 14:10:34 +00:00
if ' module ' in context :
name_id = ' model_ ' + self . _name . replace ( ' . ' , ' _ ' )
cr . execute ( ' select * from ir_model_data where name= %s and res_id= %s ' , ( name_id , model_id ) )
if not cr . rowcount :
2008-07-22 14:24:36 +00:00
cr . execute ( " INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES ( %s , now(), now(), %s , %s , %s ) " , \
2008-12-09 14:10:34 +00:00
( name_id , context [ ' module ' ] , ' ir.model ' , model_id )
2008-07-22 14:24:36 +00:00
)
2008-12-09 14:10:34 +00:00
2008-07-22 14:24:36 +00:00
cr . commit ( )
cr . execute ( " SELECT * FROM ir_model_fields WHERE model= %s " , ( self . _name , ) )
cols = { }
for rec in cr . dictfetchall ( ) :
cols [ rec [ ' name ' ] ] = rec
2008-08-12 14:44:56 +00:00
for ( k , f ) in self . _columns . items ( ) :
2008-07-22 14:24:36 +00:00
vals = {
2008-08-12 14:44:56 +00:00
' model_id ' : model_id ,
' model ' : self . _name ,
' name ' : k ,
' field_description ' : f . string . replace ( " ' " , " " ) ,
' ttype ' : f . _type ,
' relation ' : f . _obj or ' NULL ' ,
' view_load ' : ( f . view_load and 1 ) or 0 ,
2008-09-22 06:48:49 +00:00
' select_level ' : str ( f . select or 0 ) ,
' readonly ' : ( f . readonly and 1 ) or 0 ,
' required ' : ( f . required and 1 ) or 0 ,
2008-07-22 14:24:36 +00:00
}
if k not in cols :
cr . execute ( ' select nextval( %s ) ' , ( ' ir_model_fields_id_seq ' , ) )
id = cr . fetchone ( ) [ 0 ]
vals [ ' id ' ] = id
cr . execute ( """ INSERT INTO ir_model_fields (
2008-08-12 14:44:56 +00:00
id , model_id , model , name , field_description , ttype ,
2008-10-23 17:45:49 +00:00
relation , view_load , state , select_level
2008-07-22 14:24:36 +00:00
) VALUES (
2008-12-09 13:35:40 +00:00
% s , % s , % s , % s , % s , % s , % s , % s , % s , % s
2008-07-22 14:24:36 +00:00
) """ , (
id , vals [ ' model_id ' ] , vals [ ' model ' ] , vals [ ' name ' ] , vals [ ' field_description ' ] , vals [ ' ttype ' ] ,
2008-10-23 17:45:49 +00:00
vals [ ' relation ' ] , bool ( vals [ ' view_load ' ] ) , ' base ' ,
2008-07-22 14:24:36 +00:00
vals [ ' select_level ' ]
) )
if ' module ' in context :
2009-07-31 14:11:15 +00:00
name1 = ' field_ ' + self . _table + ' _ ' + k
2009-08-07 13:52:08 +00:00
cr . execute ( " select name from ir_model_data where name= %s " , ( name1 , ) )
2009-07-31 14:11:15 +00:00
if cr . fetchone ( ) :
name1 = name1 + " _ " + str ( id )
2008-07-22 14:24:36 +00:00
cr . execute ( " INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES ( %s , now(), now(), %s , %s , %s ) " , \
2009-08-07 13:52:08 +00:00
( name1 , context [ ' module ' ] , ' ir.model.fields ' , id )
2008-07-22 14:24:36 +00:00
)
else :
2008-08-12 14:44:56 +00:00
for key , val in vals . items ( ) :
if cols [ k ] [ key ] != vals [ key ] :
cr . execute ( ' update ir_model_fields set field_description= %s where model= %s and name= %s ' , ( vals [ ' field_description ' ] , vals [ ' model ' ] , vals [ ' name ' ] ) )
2008-07-22 14:24:36 +00:00
cr . commit ( )
cr . execute ( """ UPDATE ir_model_fields SET
2008-10-23 17:45:49 +00:00
model_id = % s , field_description = % s , ttype = % s , relation = % s ,
2008-09-22 06:48:49 +00:00
view_load = % s , select_level = % s , readonly = % s , required = % s
2008-07-22 14:24:36 +00:00
WHERE
model = % s AND name = % s """ , (
2008-11-10 11:07:21 +00:00
vals [ ' model_id ' ] , vals [ ' field_description ' ] , vals [ ' ttype ' ] ,
2008-08-20 19:42:30 +00:00
vals [ ' relation ' ] , bool ( vals [ ' view_load ' ] ) ,
2008-09-22 06:48:49 +00:00
vals [ ' select_level ' ] , bool ( vals [ ' readonly ' ] ) , bool ( vals [ ' required ' ] ) , vals [ ' model ' ] , vals [ ' name ' ]
2008-07-22 14:24:36 +00:00
) )
continue
cr . commit ( )
def _auto_init ( self , cr , context = { } ) :
self . _field_create ( cr , context )
def __init__ ( self , cr ) :
2008-10-17 22:41:12 +00:00
if not self . _name and not hasattr ( self , ' _inherit ' ) :
name = type ( self ) . __name__ . split ( ' . ' ) [ 0 ]
msg = " The class %s has to have a _name attribute " % name
logger = netsvc . Logger ( )
logger . notifyChannel ( ' orm ' , netsvc . LOG_ERROR , msg )
raise except_orm ( ' ValueError ' , msg )
2008-07-22 14:24:36 +00:00
if not self . _description :
self . _description = self . _name
if not self . _table :
2008-08-12 14:44:56 +00:00
self . _table = self . _name . replace ( ' . ' , ' _ ' )
2008-07-22 14:24:36 +00:00
def browse ( self , cr , uid , select , context = None , list_class = None , fields_process = { } ) :
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
self . _list_class = list_class or browse_record_list
cache = { }
# need to accepts ints and longs because ids coming from a method
# launched by button in the interface have a type long...
if isinstance ( select , ( int , long ) ) :
2008-08-12 14:44:56 +00:00
return browse_record ( cr , uid , select , self , cache , context = context , list_class = self . _list_class , fields_process = fields_process )
elif isinstance ( select , list ) :
return self . _list_class ( [ browse_record ( cr , uid , id , self , cache , context = context , list_class = self . _list_class , fields_process = fields_process ) for id in select ] , context )
2008-07-22 14:24:36 +00:00
else :
return browse_null ( )
def __export_row ( self , cr , uid , row , fields , context = None ) :
2009-07-03 06:08:23 +00:00
2009-07-14 04:52:22 +00:00
def check_type ( field_type ) :
if field_type == ' float ' :
2009-07-03 06:08:23 +00:00
return 0.0
2009-07-14 04:52:22 +00:00
elif field_type == ' integer ' :
2009-07-03 06:08:23 +00:00
return 0
2009-07-14 04:52:22 +00:00
elif field_type == ' boolean ' :
return False
return ' '
2009-07-03 06:08:23 +00:00
2008-07-22 14:24:36 +00:00
lines = [ ]
data = map ( lambda x : ' ' , range ( len ( fields ) ) )
done = [ ]
for fpos in range ( len ( fields ) ) :
2009-07-14 05:42:37 +00:00
f = fields [ fpos ]
2008-07-22 14:24:36 +00:00
if f :
r = row
i = 0
2008-08-12 14:44:56 +00:00
while i < len ( f ) :
2009-07-14 04:52:22 +00:00
if f [ i ] == ' db_id ' :
r = r [ ' id ' ]
elif f [ i ] == ' id ' :
model_data = self . pool . get ( ' ir.model.data ' )
data_ids = model_data . search ( cr , uid , [ ( ' model ' , ' = ' , r . _table_name ) , ( ' res_id ' , ' = ' , r [ ' id ' ] ) ] )
if len ( data_ids ) :
d = model_data . read ( cr , uid , data_ids , [ ' name ' , ' module ' ] ) [ 0 ]
if d [ ' module ' ] :
r = ' %s . %s ' % ( d [ ' module ' ] , d [ ' name ' ] )
else :
r = d [ ' name ' ]
else :
break
else :
2009-07-14 05:42:37 +00:00
r = r [ f [ i ] ]
2008-07-22 14:24:36 +00:00
if not r :
2009-07-03 06:08:23 +00:00
if f [ i ] in self . _columns :
2009-07-14 04:52:22 +00:00
r = check_type ( self . _columns [ f [ i ] ] . _type )
2009-07-03 06:08:23 +00:00
elif f [ i ] in self . _inherit_fields :
2009-07-14 04:52:22 +00:00
r = check_type ( self . _inherit_fields [ f [ i ] ] [ 2 ] . _type )
data [ fpos ] = r
2008-07-22 14:24:36 +00:00
break
if isinstance ( r , ( browse_record_list , list ) ) :
first = True
fields2 = map ( lambda x : ( x [ : i + 1 ] == f [ : i + 1 ] and x [ i + 1 : ] ) \
or [ ] , fields )
if fields2 in done :
break
2009-07-14 05:42:37 +00:00
done . append ( fields2 )
2008-07-22 14:24:36 +00:00
for row2 in r :
lines2 = self . __export_row ( cr , uid , row2 , fields2 ,
2009-07-14 05:42:37 +00:00
context )
2008-07-22 14:24:36 +00:00
if first :
for fpos2 in range ( len ( fields ) ) :
if lines2 and lines2 [ 0 ] [ fpos2 ] :
data [ fpos2 ] = lines2 [ 0 ] [ fpos2 ]
2009-07-14 05:42:37 +00:00
if not data [ fpos ] :
dt = ' '
for rr in r :
if isinstance ( rr . name , browse_record ) :
rr = rr . name
dt + = rr . name + ' , '
data [ fpos ] = dt [ : - 1 ]
break
2008-08-12 14:44:56 +00:00
lines + = lines2 [ 1 : ]
2008-07-22 14:24:36 +00:00
first = False
else :
2009-07-14 05:42:37 +00:00
lines + = lines2
2008-07-22 14:24:36 +00:00
break
2008-08-12 14:44:56 +00:00
i + = 1
if i == len ( f ) :
2009-07-14 04:52:22 +00:00
if isinstance ( r , browse_record ) :
r = r . name
2009-01-19 10:08:46 +00:00
data [ fpos ] = tools . ustr ( r or ' ' )
2008-07-22 14:24:36 +00:00
return [ data ] + lines
2009-07-14 05:42:37 +00:00
def export_data ( self , cr , uid , ids , fields_to_export , context = None ) :
2008-07-22 14:24:36 +00:00
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2009-07-14 05:42:37 +00:00
imp_comp = context . get ( ' import_comp ' , False )
2009-07-14 04:52:22 +00:00
cols = self . _columns . copy ( )
for f in self . _inherit_fields :
cols . update ( { f : self . _inherit_fields [ f ] [ 2 ] } )
fields_to_export = map ( lambda x : x . split ( ' / ' ) , fields_to_export )
fields_export = fields_to_export + [ ]
warning = ' '
warning_fields = [ ]
for field in fields_export :
if imp_comp and len ( field ) > 1 :
warning_fields . append ( ' / ' . join ( map ( lambda x : x in cols and cols [ x ] . string or x , field ) ) )
elif len ( field ) < = 1 :
if imp_comp and cols . get ( field and field [ 0 ] , False ) :
if ( ( isinstance ( cols [ field [ 0 ] ] , fields . function ) and not cols [ field [ 0 ] ] . store ) \
or isinstance ( cols [ field [ 0 ] ] , fields . related ) \
or isinstance ( cols [ field [ 0 ] ] , fields . one2many ) ) :
warning_fields . append ( ' / ' . join ( map ( lambda x : x in cols and cols [ x ] . string or x , field ) ) )
2008-07-22 14:24:36 +00:00
datas = [ ]
2009-07-14 04:52:22 +00:00
if imp_comp and len ( warning_fields ) :
warning = ' Following columns cannot be exported since you select to be import compatible. \n %s ' % ( ' \n ' . join ( warning_fields ) )
cr . rollback ( )
return { ' warning ' : warning }
2008-07-22 14:24:36 +00:00
for row in self . browse ( cr , uid , ids , context ) :
2009-07-14 04:52:22 +00:00
datas + = self . __export_row ( cr , uid , row , fields_to_export , context )
return { ' datas ' : datas }
2008-07-22 14:24:36 +00:00
2009-02-27 16:37:20 +00:00
def import_data ( self , cr , uid , fields , datas , mode = ' init ' , current_module = ' ' , noupdate = False , context = None , filename = None ) :
2008-07-22 14:24:36 +00:00
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
fields = map ( lambda x : x . split ( ' / ' ) , fields )
logger = netsvc . Logger ( )
2009-07-14 04:52:22 +00:00
ir_model_data_obj = self . pool . get ( ' ir.model.data ' )
def process_liness ( self , datas , prefix , current_module , model_name , fields_def , position = 0 ) :
2008-07-22 14:24:36 +00:00
line = datas [ position ]
row = { }
translate = { }
todo = [ ]
warning = ' '
2008-08-12 14:44:56 +00:00
data_id = False
2009-07-14 04:52:22 +00:00
data_res_id = False
is_xml_id = False
is_db_id = False
ir_model_data_obj = self . pool . get ( ' ir.model.data ' )
2008-07-22 14:24:36 +00:00
#
# Import normal fields
#
for i in range ( len ( fields ) ) :
2008-08-12 14:44:56 +00:00
if i > = len ( line ) :
raise Exception ( _ ( ' Please check that all your lines have %d columns. ' ) % ( len ( fields ) , ) )
2009-07-14 04:52:22 +00:00
if not line [ i ] :
2008-07-22 14:24:36 +00:00
continue
2009-07-14 04:52:22 +00:00
field = fields [ i ]
2008-07-22 14:24:36 +00:00
if ( len ( field ) == len ( prefix ) + 1 ) and field [ len ( prefix ) ] . endswith ( ' :id ' ) :
res_id = False
if line [ i ] :
if fields_def [ field [ len ( prefix ) ] [ : - 3 ] ] [ ' type ' ] == ' many2many ' :
res_id = [ ]
2009-01-15 10:12:10 +00:00
for word in line [ i ] . split ( config . get ( ' csv_internal_sep ' ) ) :
2008-07-22 14:24:36 +00:00
if ' . ' in word :
module , xml_id = word . rsplit ( ' . ' , 1 )
else :
2009-07-14 04:52:22 +00:00
module , xml_id = current_module , word
2008-07-22 14:24:36 +00:00
id = ir_model_data_obj . _get_id ( cr , uid , module ,
xml_id )
res_id2 = ir_model_data_obj . read ( cr , uid , [ id ] ,
[ ' res_id ' ] ) [ 0 ] [ ' res_id ' ]
if res_id2 :
res_id . append ( res_id2 )
if len ( res_id ) :
2008-08-12 14:44:56 +00:00
res_id = [ ( 6 , 0 , res_id ) ]
2008-07-22 14:24:36 +00:00
else :
if ' . ' in line [ i ] :
module , xml_id = line [ i ] . rsplit ( ' . ' , 1 )
else :
2009-07-14 04:52:22 +00:00
module , xml_id = current_module , line [ i ]
2008-07-22 14:24:36 +00:00
id = ir_model_data_obj . _get_id ( cr , uid , module , xml_id )
2008-12-11 18:00:10 +00:00
res_res_id = ir_model_data_obj . read ( cr , uid , [ id ] ,
[ ' res_id ' ] )
if res_res_id :
res_id = res_res_id [ 0 ] [ ' res_id ' ]
2008-07-22 14:24:36 +00:00
row [ field [ 0 ] [ : - 3 ] ] = res_id or False
continue
if ( len ( field ) == len ( prefix ) + 1 ) and \
len ( field [ len ( prefix ) ] . split ( ' :lang= ' ) ) == 2 :
f , lang = field [ len ( prefix ) ] . split ( ' :lang= ' )
translate . setdefault ( lang , { } ) [ f ] = line [ i ] or False
continue
if ( len ( field ) == len ( prefix ) + 1 ) and \
( prefix == field [ 0 : len ( prefix ) ] ) :
2009-07-14 04:52:22 +00:00
if field [ len ( prefix ) ] == " id " :
# XML ID
db_id = False
is_xml_id = data_id = line [ i ]
d = data_id . split ( ' . ' )
module = len ( d ) > 1 and d [ 0 ] or ' '
name = len ( d ) > 1 and d [ 1 ] or d [ 0 ]
data_ids = ir_model_data_obj . search ( cr , uid , [ ( ' module ' , ' = ' , module ) , ( ' model ' , ' = ' , model_name ) , ( ' name ' , ' = ' , name ) ] )
if len ( data_ids ) :
d = ir_model_data_obj . read ( cr , uid , data_ids , [ ' res_id ' ] ) [ 0 ]
db_id = d [ ' res_id ' ]
if is_db_id and not db_id :
data_ids = ir_model_data_obj . search ( cr , uid , [ ( ' module ' , ' = ' , module ) , ( ' model ' , ' = ' , model_name ) , ( ' res_id ' , ' = ' , is_db_id ) ] )
if not len ( data_ids ) :
ir_model_data_obj . create ( cr , uid , { ' module ' : module , ' model ' : model_name , ' name ' : name , ' res_id ' : is_db_id } )
db_id = is_db_id
if is_db_id and int ( db_id ) != int ( is_db_id ) :
warning + = ( " Id is not the same than existing one: " + str ( is_db_id ) + " ! \n " )
logger . notifyChannel ( " import " , netsvc . LOG_ERROR ,
" Id is not the same than existing one: " + str ( is_db_id ) + ' ! \n ' )
continue
if field [ len ( prefix ) ] == " db_id " :
# Database ID
try :
line [ i ] = int ( line [ i ] )
except Exception , e :
warning + = ( str ( e ) + " ! \n " )
logger . notifyChannel ( " import " , netsvc . LOG_ERROR ,
str ( e ) + ' ! \n ' )
continue
is_db_id = line [ i ]
obj_model = self . pool . get ( model_name )
ids = obj_model . search ( cr , uid , [ ( ' id ' , ' = ' , line [ i ] ) ] )
if not len ( ids ) :
warning + = ( " Database ID doesn ' t exist: " + model_name + " : " + str ( line [ i ] ) + " ! \n " )
logger . notifyChannel ( " import " , netsvc . LOG_ERROR ,
" Database ID doesn ' t exist: " + model_name + " : " + str ( line [ i ] ) + ' ! \n ' )
continue
else :
data_res_id = ids [ 0 ]
data_ids = ir_model_data_obj . search ( cr , uid , [ ( ' model ' , ' = ' , model_name ) , ( ' res_id ' , ' = ' , line [ i ] ) ] )
if len ( data_ids ) :
d = ir_model_data_obj . read ( cr , uid , data_ids , [ ' name ' , ' module ' ] ) [ 0 ]
data_id = d [ ' name ' ]
if d [ ' module ' ] :
data_id = ' %s . %s ' % ( d [ ' module ' ] , d [ ' name ' ] )
else :
data_id = d [ ' name ' ]
if is_xml_id and not data_id :
data_id = is_xml_id
if is_xml_id and is_xml_id != data_id :
warning + = ( " Id is not the same than existing one: " + str ( line [ i ] ) + " ! \n " )
logger . notifyChannel ( " import " , netsvc . LOG_ERROR ,
" Id is not the same than existing one: " + str ( line [ i ] ) + ' ! \n ' )
continue
2008-07-22 14:24:36 +00:00
if fields_def [ field [ len ( prefix ) ] ] [ ' type ' ] == ' integer ' :
2008-08-12 14:44:56 +00:00
res = line [ i ] and int ( line [ i ] )
2008-09-04 00:22:48 +00:00
elif fields_def [ field [ len ( prefix ) ] ] [ ' type ' ] == ' boolean ' :
2009-08-18 15:43:34 +00:00
res = line [ i ] . lower ( ) not in ( ' 0 ' , ' false ' , ' off ' )
2008-07-22 14:24:36 +00:00
elif fields_def [ field [ len ( prefix ) ] ] [ ' type ' ] == ' float ' :
2008-08-12 14:44:56 +00:00
res = line [ i ] and float ( line [ i ] )
2008-07-22 14:24:36 +00:00
elif fields_def [ field [ len ( prefix ) ] ] [ ' type ' ] == ' selection ' :
res = False
if isinstance ( fields_def [ field [ len ( prefix ) ] ] [ ' selection ' ] ,
( tuple , list ) ) :
sel = fields_def [ field [ len ( prefix ) ] ] [ ' selection ' ]
else :
sel = fields_def [ field [ len ( prefix ) ] ] [ ' selection ' ] ( self ,
cr , uid , context )
for key , val in sel :
2009-07-23 14:38:03 +00:00
if line [ i ] in [ tools . ustr ( key ) , tools . ustr ( val ) ] : #Acepting key or value for selection field
2008-07-22 14:24:36 +00:00
res = key
2009-07-08 09:50:39 +00:00
break
2008-07-22 14:24:36 +00:00
if line [ i ] and not res :
logger . notifyChannel ( " import " , netsvc . LOG_WARNING ,
" key ' %s ' not found in selection field ' %s ' " % \
( line [ i ] , field [ len ( prefix ) ] ) )
2009-07-08 09:50:39 +00:00
warning + = " Key/value ' " + str ( line [ i ] ) + " ' not found in selection field ' " + str ( field [ len ( prefix ) ] ) + " ' "
2008-07-22 14:24:36 +00:00
elif fields_def [ field [ len ( prefix ) ] ] [ ' type ' ] == ' many2one ' :
res = False
if line [ i ] :
relation = fields_def [ field [ len ( prefix ) ] ] [ ' relation ' ]
res2 = self . pool . get ( relation ) . name_search ( cr , uid ,
2009-07-16 12:50:41 +00:00
line [ i ] , [ ] , operator = ' = ' , context = context )
2008-07-22 14:24:36 +00:00
res = ( res2 and res2 [ 0 ] [ 0 ] ) or False
if not res :
warning + = ( ' Relation not found: ' + line [ i ] + \
' on ' + relation + ' ! \n ' )
logger . notifyChannel ( " import " , netsvc . LOG_WARNING ,
' Relation not found: ' + line [ i ] + \
' on ' + relation + ' ! \n ' )
elif fields_def [ field [ len ( prefix ) ] ] [ ' type ' ] == ' many2many ' :
res = [ ]
if line [ i ] :
relation = fields_def [ field [ len ( prefix ) ] ] [ ' relation ' ]
2009-01-15 10:12:10 +00:00
for word in line [ i ] . split ( config . get ( ' csv_internal_sep ' ) ) :
2008-07-22 14:24:36 +00:00
res2 = self . pool . get ( relation ) . name_search ( cr ,
2009-07-16 12:50:41 +00:00
uid , word , [ ] , operator = ' = ' , context = context )
2008-07-22 14:24:36 +00:00
res3 = ( res2 and res2 [ 0 ] [ 0 ] ) or False
if not res3 :
warning + = ( ' Relation not found: ' + \
line [ i ] + ' on ' + relation + ' ! \n ' )
logger . notifyChannel ( " import " ,
netsvc . LOG_WARNING ,
' Relation not found: ' + line [ i ] + \
' on ' + relation + ' ! \n ' )
else :
res . append ( res3 )
if len ( res ) :
2008-08-12 14:44:56 +00:00
res = [ ( 6 , 0 , res ) ]
2008-07-22 14:24:36 +00:00
else :
res = line [ i ] or False
row [ field [ len ( prefix ) ] ] = res
elif ( prefix == field [ 0 : len ( prefix ) ] ) :
if field [ 0 ] not in todo :
todo . append ( field [ len ( prefix ) ] )
#
2009-07-14 04:52:22 +00:00
# Import one2many, many2many fields
2008-07-22 14:24:36 +00:00
#
nbrmax = 1
for field in todo :
2009-07-14 04:52:22 +00:00
relation_obj = self . pool . get ( fields_def [ field ] [ ' relation ' ] )
newfd = relation_obj . fields_get (
2008-07-22 14:24:36 +00:00
cr , uid , context = context )
2009-07-14 04:52:22 +00:00
res = process_liness ( self , datas , prefix + [ field ] , current_module , relation_obj . _name , newfd , position )
( newrow , max2 , w2 , translate2 , data_id2 , data_res_id2 ) = res
2008-07-22 14:24:36 +00:00
nbrmax = max ( nbrmax , max2 )
2009-07-14 04:52:22 +00:00
warning = warning + w2
reduce ( lambda x , y : x and y , newrow )
2008-07-22 14:24:36 +00:00
row [ field ] = ( reduce ( lambda x , y : x or y , newrow . values ( ) ) and \
2009-07-14 04:52:22 +00:00
[ ( 0 , 0 , newrow ) ] ) or [ ]
2008-07-22 14:24:36 +00:00
i = max2
while ( position + i ) < len ( datas ) :
ok = True
for j in range ( len ( fields ) ) :
field2 = fields [ j ]
if ( len ( field2 ) < = ( len ( prefix ) + 1 ) ) and datas [ position + i ] [ j ] :
ok = False
if not ok :
break
2009-07-14 04:52:22 +00:00
( newrow , max2 , w2 , translate2 , data_id2 , data_res_id2 ) = process_liness (
self , datas , prefix + [ field ] , current_module , relation_obj . _name , newfd , position + i )
2008-07-22 14:24:36 +00:00
warning = warning + w2
2008-08-12 14:44:56 +00:00
if reduce ( lambda x , y : x or y , newrow . values ( ) ) :
2009-07-14 04:52:22 +00:00
row [ field ] . append ( ( 0 , 0 , newrow ) )
2008-08-12 14:44:56 +00:00
i + = max2
2008-07-22 14:24:36 +00:00
nbrmax = max ( nbrmax , i )
if len ( prefix ) == 0 :
2008-08-12 14:44:56 +00:00
for i in range ( max ( nbrmax , 1 ) ) :
2008-07-22 14:24:36 +00:00
#if datas:
datas . pop ( 0 )
2009-07-14 04:52:22 +00:00
result = ( row , nbrmax , warning , translate , data_id , data_res_id )
2008-07-22 14:24:36 +00:00
return result
fields_def = self . fields_get ( cr , uid , context = context )
done = 0
2008-08-19 13:26:12 +00:00
initial_size = len ( datas )
2008-08-20 12:47:57 +00:00
if config . get ( ' import_partial ' , False ) and filename :
data = pickle . load ( file ( config . get ( ' import_partial ' ) ) )
2008-08-20 12:51:19 +00:00
original_value = data . get ( filename , 0 )
2008-08-19 13:26:12 +00:00
counter = 0
2008-07-22 14:24:36 +00:00
while len ( datas ) :
2008-08-19 13:26:12 +00:00
counter + = 1
2008-07-22 14:24:36 +00:00
res = { }
2008-08-19 13:03:11 +00:00
#try:
2009-07-14 04:52:22 +00:00
( res , other , warning , translate , data_id , res_id ) = \
process_liness ( self , datas , [ ] , current_module , self . _name , fields_def )
if warning :
2008-07-22 14:24:36 +00:00
cr . rollback ( )
2009-07-08 09:50:39 +00:00
return ( - 1 , res , ' Line ' + str ( counter ) + ' : ' + warning , ' ' )
2009-06-26 14:27:34 +00:00
try :
2009-07-14 04:52:22 +00:00
id = ir_model_data_obj . _update ( cr , uid , self . _name ,
2009-06-26 14:27:34 +00:00
current_module , res , xml_id = data_id , mode = mode ,
2009-07-14 04:52:22 +00:00
noupdate = noupdate , res_id = res_id )
2009-06-26 14:27:34 +00:00
except Exception , e :
import psycopg2
if isinstance ( e , psycopg2 . IntegrityError ) :
msg = ' Insertion Failed! '
for key in self . pool . _sql_error . keys ( ) :
if key in e [ 0 ] :
msg = self . pool . _sql_error [ key ]
break
2009-07-08 09:50:39 +00:00
return ( - 1 , res , ' Line ' + str ( counter ) + ' : ' + msg , ' ' )
2009-06-26 14:27:34 +00:00
2008-08-19 13:03:11 +00:00
for lang in translate :
context2 = context . copy ( )
context2 [ ' lang ' ] = lang
self . write ( cr , uid , [ id ] , translate [ lang ] , context2 )
2008-08-19 13:34:44 +00:00
if config . get ( ' import_partial ' , False ) and filename and ( not ( counter % 100 ) ) :
2008-08-19 13:26:12 +00:00
data = pickle . load ( file ( config . get ( ' import_partial ' ) ) )
2008-08-20 12:51:19 +00:00
data [ filename ] = initial_size - len ( datas ) + original_value
2008-08-19 13:26:12 +00:00
pickle . dump ( data , file ( config . get ( ' import_partial ' ) , ' wb ' ) )
2008-08-19 13:03:11 +00:00
cr . commit ( )
2008-08-19 13:26:12 +00:00
2008-08-19 13:03:11 +00:00
#except Exception, e:
# logger.notifyChannel("import", netsvc.LOG_ERROR, e)
# cr.rollback()
# try:
# return (-1, res, e[0], warning)
# except:
# return (-1, res, e[0], '')
2008-07-22 14:24:36 +00:00
done + = 1
#
# TODO: Send a request with the result and multi-thread !
#
return ( done , 0 , 0 , 0 )
def read ( self , cr , user , ids , fields = None , context = None , load = ' _classic_read ' ) :
raise _ ( ' The read method is not implemented on this object ! ' )
2008-11-24 12:48:39 +00:00
def get_invalid_fields ( self , cr , uid ) :
2008-11-24 15:42:29 +00:00
return list ( self . _invalids )
2008-11-24 12:48:39 +00:00
2008-07-22 14:24:36 +00:00
def _validate ( self , cr , uid , ids , context = None ) :
context = context or { }
lng = context . get ( ' lang ' , False ) or ' en_US '
trans = self . pool . get ( ' ir.translation ' )
2008-08-26 14:26:42 +00:00
error_msgs = [ ]
2008-07-22 14:24:36 +00:00
for constraint in self . _constraints :
fun , msg , fields = constraint
if not fun ( self , cr , uid , ids ) :
2008-08-26 14:26:42 +00:00
translated_msg = trans . _get_source ( cr , uid , self . _name , ' constraint ' , lng , source = msg ) or msg
error_msgs . append (
2009-01-28 12:52:32 +00:00
_ ( " Error occurred while validating the field(s) %s : %s " ) % ( ' , ' . join ( fields ) , translated_msg )
2008-09-03 11:14:29 +00:00
)
2008-11-24 15:42:29 +00:00
self . _invalids . update ( fields )
2008-08-26 14:26:42 +00:00
if error_msgs :
2008-07-22 14:24:36 +00:00
cr . rollback ( )
2008-08-26 14:26:42 +00:00
raise except_orm ( ' ValidateError ' , ' \n ' . join ( error_msgs ) )
2008-11-24 13:27:43 +00:00
else :
2008-11-24 15:42:29 +00:00
self . _invalids . clear ( )
2008-07-22 14:24:36 +00:00
def default_get ( self , cr , uid , fields_list , context = None ) :
return { }
def perm_read ( self , cr , user , ids , context = None , details = True ) :
raise _ ( ' The perm_read method is not implemented on this object ! ' )
def unlink ( self , cr , uid , ids , context = None ) :
raise _ ( ' The unlink method is not implemented on this object ! ' )
def write ( self , cr , user , ids , vals , context = None ) :
raise _ ( ' The write method is not implemented on this object ! ' )
def create ( self , cr , user , vals , context = None ) :
raise _ ( ' The create method is not implemented on this object ! ' )
# returns the definition of each field in the object
# the optional fields parameter can limit the result to some fields
2008-12-02 08:16:29 +00:00
def fields_get_keys ( self , cr , user , context = None , read_access = True ) :
if context is None :
context = { }
res = self . _columns . keys ( )
for parent in self . _inherits :
res . extend ( self . pool . get ( parent ) . fields_get_keys ( cr , user , fields , context ) )
return res
2008-07-22 14:24:36 +00:00
def fields_get ( self , cr , user , fields = None , context = None , read_access = True ) :
if context is None :
context = { }
res = { }
translation_obj = self . pool . get ( ' ir.translation ' )
model_access_obj = self . pool . get ( ' ir.model.access ' )
for parent in self . _inherits :
2008-12-15 11:48:39 +00:00
res . update ( self . pool . get ( parent ) . fields_get ( cr , user , fields , context ) )
2009-02-14 05:35:17 +00:00
2009-02-04 12:41:23 +00:00
if self . _columns . keys ( ) :
for f in self . _columns . keys ( ) :
if fields and f not in fields :
continue
res [ f ] = { ' type ' : self . _columns [ f ] . _type }
for arg in ( ' string ' , ' readonly ' , ' states ' , ' size ' , ' required ' ,
' change_default ' , ' translate ' , ' help ' , ' select ' ) :
if getattr ( self . _columns [ f ] , arg ) :
res [ f ] [ arg ] = getattr ( self . _columns [ f ] , arg )
if not read_access :
res [ f ] [ ' readonly ' ] = True
res [ f ] [ ' states ' ] = { }
for arg in ( ' digits ' , ' invisible ' , ' filters ' ) :
if hasattr ( self . _columns [ f ] , arg ) \
and getattr ( self . _columns [ f ] , arg ) :
res [ f ] [ arg ] = getattr ( self . _columns [ f ] , arg )
2009-02-14 05:35:17 +00:00
2009-02-04 12:41:23 +00:00
res_trans = translation_obj . _get_source ( cr , user , self . _name + ' , ' + f , ' field ' , context . get ( ' lang ' , False ) or ' en_US ' )
if res_trans :
res [ f ] [ ' string ' ] = res_trans
help_trans = translation_obj . _get_source ( cr , user , self . _name + ' , ' + f , ' help ' , context . get ( ' lang ' , False ) or ' en_US ' )
if help_trans :
res [ f ] [ ' help ' ] = help_trans
2009-02-14 05:35:17 +00:00
2009-02-04 12:41:23 +00:00
if hasattr ( self . _columns [ f ] , ' selection ' ) :
if isinstance ( self . _columns [ f ] . selection , ( tuple , list ) ) :
sel = self . _columns [ f ] . selection
# translate each selection option
sel2 = [ ]
for ( key , val ) in sel :
val2 = None
if val :
val2 = translation_obj . _get_source ( cr , user , self . _name + ' , ' + f , ' selection ' , context . get ( ' lang ' , False ) or ' en_US ' , val )
sel2 . append ( ( key , val2 or val ) )
sel = sel2
res [ f ] [ ' selection ' ] = sel
else :
# call the 'dynamic selection' function
res [ f ] [ ' selection ' ] = self . _columns [ f ] . selection ( self , cr ,
user , context )
if res [ f ] [ ' type ' ] in ( ' one2many ' , ' many2many ' , ' many2one ' , ' one2one ' ) :
res [ f ] [ ' relation ' ] = self . _columns [ f ] . _obj
res [ f ] [ ' domain ' ] = self . _columns [ f ] . _domain
res [ f ] [ ' context ' ] = self . _columns [ f ] . _context
else :
2009-02-14 05:35:17 +00:00
#TODO : read the fields from the database
2009-02-04 12:41:23 +00:00
pass
2009-02-14 05:35:17 +00:00
2008-07-22 14:24:36 +00:00
if fields :
# filter out fields which aren't in the fields list
for r in res . keys ( ) :
if r not in fields :
del res [ r ]
return res
#
# Overload this method if you need a window title which depends on the context
#
def view_header_get ( self , cr , user , view_id = None , view_type = ' form ' , context = None ) :
return False
2008-12-02 17:41:37 +00:00
def __view_look_dom ( self , cr , user , node , view_id , context = None ) :
2008-07-22 14:24:36 +00:00
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
result = False
fields = { }
childs = True
2008-08-12 14:44:56 +00:00
if node . nodeType == node . ELEMENT_NODE and node . localName == ' field ' :
2008-07-22 14:24:36 +00:00
if node . hasAttribute ( ' name ' ) :
attrs = { }
try :
if node . getAttribute ( ' name ' ) in self . _columns :
2009-04-28 14:40:14 +00:00
column = self . _columns [ node . getAttribute ( ' name ' ) ]
2008-07-22 14:24:36 +00:00
else :
2009-04-28 14:40:14 +00:00
column = self . _inherit_fields [ node . getAttribute ( ' name ' ) ] [ 2 ]
2008-07-22 14:24:36 +00:00
except :
2009-04-28 14:40:14 +00:00
column = False
2008-07-22 14:24:36 +00:00
2009-04-28 14:40:14 +00:00
if column :
relation = column . _obj
2008-07-22 14:24:36 +00:00
childs = False
views = { }
for f in node . childNodes :
2008-08-12 14:44:56 +00:00
if f . nodeType == f . ELEMENT_NODE and f . localName in ( ' form ' , ' tree ' , ' graph ' ) :
2008-07-22 14:24:36 +00:00
node . removeChild ( f )
2008-11-28 09:09:34 +00:00
ctx = context . copy ( )
ctx [ ' base_model_name ' ] = self . _name
2008-12-02 17:41:37 +00:00
xarch , xfields = self . pool . get ( relation ) . __view_look_dom_arch ( cr , user , f , view_id , ctx )
2008-07-22 14:24:36 +00:00
views [ str ( f . localName ) ] = {
' arch ' : xarch ,
' fields ' : xfields
}
attrs = { ' views ' : views }
2008-10-31 14:10:48 +00:00
if node . hasAttribute ( ' widget ' ) and node . getAttribute ( ' widget ' ) == ' selection ' :
2009-04-28 14:40:14 +00:00
# We can not use the 'string' domain has it is defined according to the record !
dom = None
if column . _domain and not isinstance ( column . _domain , ( str , unicode ) ) :
dom = column . _domain
attrs [ ' selection ' ] = self . pool . get ( relation ) . name_search ( cr , user , ' ' , dom , context = context )
if ( node . hasAttribute ( ' required ' ) and not int ( node . getAttribute ( ' required ' ) ) ) or not column . required :
2008-10-31 14:10:48 +00:00
attrs [ ' selection ' ] . append ( ( False , ' ' ) )
2008-07-22 14:24:36 +00:00
fields [ node . getAttribute ( ' name ' ) ] = attrs
2008-08-12 14:44:56 +00:00
elif node . nodeType == node . ELEMENT_NODE and node . localName in ( ' form ' , ' tree ' ) :
2008-09-07 23:25:24 +00:00
result = self . view_header_get ( cr , user , False , node . localName , context )
2008-07-22 14:24:36 +00:00
if result :
2009-01-09 10:07:59 +00:00
node . setAttribute ( ' string ' , result )
2008-11-05 20:19:49 +00:00
elif node . nodeType == node . ELEMENT_NODE and node . localName == ' calendar ' :
for additional_field in ( ' date_start ' , ' date_delay ' , ' date_stop ' , ' color ' ) :
if node . hasAttribute ( additional_field ) and node . getAttribute ( additional_field ) :
fields [ node . getAttribute ( additional_field ) ] = { }
2008-08-12 14:44:56 +00:00
if node . nodeType == node . ELEMENT_NODE and node . hasAttribute ( ' groups ' ) :
2008-09-23 16:27:16 +00:00
if node . getAttribute ( ' groups ' ) :
groups = node . getAttribute ( ' groups ' ) . split ( ' , ' )
2008-07-22 14:24:36 +00:00
readonly = False
access_pool = self . pool . get ( ' ir.model.access ' )
for group in groups :
2008-09-23 16:27:16 +00:00
readonly = readonly or access_pool . check_groups ( cr , user , group )
if not readonly :
node . setAttribute ( ' invisible ' , ' 1 ' )
node . removeAttribute ( ' groups ' )
2008-07-22 14:24:36 +00:00
if node . nodeType == node . ELEMENT_NODE :
# translate view
if ( ' lang ' in context ) and not result :
if node . hasAttribute ( ' string ' ) and node . getAttribute ( ' string ' ) :
2008-12-13 06:01:18 +00:00
trans = self . pool . get ( ' ir.translation ' ) . _get_source ( cr , user , self . _name , ' view ' , context [ ' lang ' ] , node . getAttribute ( ' string ' ) . encode ( ' utf8 ' ) )
2008-11-28 09:09:34 +00:00
if not trans and ( ' base_model_name ' in context ) :
2008-12-13 06:01:18 +00:00
trans = self . pool . get ( ' ir.translation ' ) . _get_source ( cr , user , context [ ' base_model_name ' ] , ' view ' , context [ ' lang ' ] , node . getAttribute ( ' string ' ) . encode ( ' utf8 ' ) )
2008-07-22 14:24:36 +00:00
if trans :
2008-12-11 22:00:09 +00:00
node . setAttribute ( ' string ' , trans )
2008-07-22 14:24:36 +00:00
if node . hasAttribute ( ' sum ' ) and node . getAttribute ( ' sum ' ) :
2008-12-13 06:01:18 +00:00
trans = self . pool . get ( ' ir.translation ' ) . _get_source ( cr , user , self . _name , ' view ' , context [ ' lang ' ] , node . getAttribute ( ' sum ' ) . encode ( ' utf8 ' ) )
2008-07-22 14:24:36 +00:00
if trans :
2008-12-11 22:00:09 +00:00
node . setAttribute ( ' sum ' , trans )
2008-07-22 14:24:36 +00:00
if childs :
for f in node . childNodes :
2008-12-02 17:41:37 +00:00
fields . update ( self . __view_look_dom ( cr , user , f , view_id , context ) )
2008-12-02 18:38:21 +00:00
2008-07-22 14:24:36 +00:00
return fields
2008-12-02 17:41:37 +00:00
def __view_look_dom_arch ( self , cr , user , node , view_id , context = None ) :
fields_def = self . __view_look_dom ( cr , user , node , view_id , context = context )
2008-07-22 14:24:36 +00:00
2009-01-02 23:18:51 +00:00
rolesobj = self . pool . get ( ' res.roles ' )
usersobj = self . pool . get ( ' res.users ' )
2009-03-04 23:00:57 +00:00
buttons = ( n for n in node . getElementsByTagName ( ' button ' ) if n . getAttribute ( ' type ' ) != ' object ' )
2009-01-19 23:24:58 +00:00
for button in buttons :
ok = True
if user != 1 : # admin user has all roles
user_roles = usersobj . read ( cr , user , [ user ] , [ ' roles_id ' ] ) [ 0 ] [ ' roles_id ' ]
cr . execute ( " select role_id from wkf_transition where signal= %s " , ( button . getAttribute ( ' name ' ) , ) )
roles = cr . fetchall ( )
for role in roles :
if role [ 0 ] :
ok = ok and rolesobj . check ( cr , user , user_roles , role [ 0 ] )
if not ok :
button . setAttribute ( ' readonly ' , ' 1 ' )
else :
button . setAttribute ( ' readonly ' , ' 0 ' )
2008-07-22 14:24:36 +00:00
arch = node . toxml ( encoding = " utf-8 " ) . replace ( ' \t ' , ' ' )
fields = self . fields_get ( cr , user , fields_def . keys ( ) , context )
for field in fields_def :
2009-04-23 08:57:50 +00:00
if field == ' id ' :
# sometime, the view may containt the (invisible) field 'id' needed for a domain (when 2 objects have cross references)
fields [ ' id ' ] = { ' readonly ' : True , ' type ' : ' integer ' , ' string ' : ' ID ' }
elif field in fields :
2009-01-16 08:54:26 +00:00
fields [ field ] . update ( fields_def [ field ] )
2008-12-02 17:41:37 +00:00
else :
2009-01-19 11:01:50 +00:00
cr . execute ( ' select name, model from ir_ui_view where (id= %s or inherit_id= %s ) and arch like %s ' , ( view_id , view_id , ' %% %s %% ' % field ) )
res = cr . fetchall ( ) [ : ]
model = res [ 0 ] [ 1 ]
res . insert ( 0 , ( " Can ' t find field ' %s ' in the following view parts composing the view of object model ' %s ' : " % ( field , model ) , None ) )
msg = " \n * " . join ( [ r [ 0 ] for r in res ] )
2009-01-28 12:52:32 +00:00
msg + = " \n \n Either you wrongly customised this view, or some modules bringing those views are not compatible with your current data model "
2009-01-19 11:01:50 +00:00
netsvc . Logger ( ) . notifyChannel ( ' orm ' , netsvc . LOG_ERROR , msg )
raise except_orm ( ' View error ' , msg )
2008-12-02 17:41:37 +00:00
2008-07-22 14:24:36 +00:00
return arch , fields
def __get_default_calendar_view ( self ) :
""" Generate a default calendar view (For internal use only).
"""
arch = ( ' <?xml version= " 1.0 " encoding= " utf-8 " ?> \n '
' <calendar string= " %s " date_start= " %s " ' ) % ( self . _description , self . _date_name )
if ' user_id ' in self . _columns :
arch + = ' color= " user_id " '
elif ' partner_id ' in self . _columns :
arch + = ' color= " partner_id " '
if ' date_stop ' in self . _columns :
arch + = ' date_stop= " date_stop " '
elif ' date_end ' in self . _columns :
arch + = ' date_stop= " date_end " '
elif ' date_delay ' in self . _columns :
arch + = ' date_delay= " date_delay " '
elif ' planned_hours ' in self . _columns :
arch + = ' date_delay= " planned_hours " '
arch + = ( ' > \n '
' <field name= " %s " /> \n '
' </calendar> ' ) % ( self . _rec_name )
return arch
#
# if view_id, view_type is not required
#
def fields_view_get ( self , cr , user , view_id = None , view_type = ' form ' , context = None , toolbar = False ) :
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2009-02-14 05:35:17 +00:00
2008-12-16 10:58:58 +00:00
def encode ( s ) :
if isinstance ( s , unicode ) :
return s . encode ( ' utf8 ' )
2009-02-14 05:35:17 +00:00
return s
2008-12-16 10:58:58 +00:00
2008-07-22 14:24:36 +00:00
def _inherit_apply ( src , inherit ) :
def _find ( node , node2 ) :
2008-08-12 14:44:56 +00:00
if node2 . nodeType == node2 . ELEMENT_NODE and node2 . localName == ' xpath ' :
2008-07-22 14:24:36 +00:00
res = xpath . Evaluate ( node2 . getAttribute ( ' expr ' ) , node )
return res and res [ 0 ]
else :
2008-08-12 14:44:56 +00:00
if node . nodeType == node . ELEMENT_NODE and node . localName == node2 . localName :
2008-07-22 14:24:36 +00:00
res = True
for attr in node2 . attributes . keys ( ) :
2008-08-12 14:44:56 +00:00
if attr == ' position ' :
2008-07-22 14:24:36 +00:00
continue
if node . hasAttribute ( attr ) :
if node . getAttribute ( attr ) == node2 . getAttribute ( attr ) :
continue
res = False
if res :
return node
for child in node . childNodes :
res = _find ( child , node2 )
2008-08-12 14:44:56 +00:00
if res :
return res
2008-07-22 14:24:36 +00:00
return None
2009-02-14 05:35:17 +00:00
2008-07-22 14:24:36 +00:00
2008-12-16 10:58:58 +00:00
doc_src = dom . minidom . parseString ( encode ( src ) )
doc_dest = dom . minidom . parseString ( encode ( inherit ) )
2008-07-22 14:24:36 +00:00
toparse = doc_dest . childNodes
while len ( toparse ) :
node2 = toparse . pop ( 0 )
2008-08-12 14:44:56 +00:00
if not node2 . nodeType == node2 . ELEMENT_NODE :
2008-07-22 14:24:36 +00:00
continue
2008-08-12 14:44:56 +00:00
if node2 . localName == ' data ' :
2008-07-22 14:24:36 +00:00
toparse + = node2 . childNodes
continue
node = _find ( doc_src , node2 )
if node :
pos = ' inside '
if node2 . hasAttribute ( ' position ' ) :
pos = node2 . getAttribute ( ' position ' )
2008-08-12 14:44:56 +00:00
if pos == ' replace ' :
2008-07-22 14:24:36 +00:00
parent = node . parentNode
for child in node2 . childNodes :
2008-08-12 14:44:56 +00:00
if child . nodeType == child . ELEMENT_NODE :
2008-07-22 14:24:36 +00:00
parent . insertBefore ( child , node )
parent . removeChild ( node )
else :
2008-10-28 00:18:25 +00:00
sib = node . nextSibling
2008-07-22 14:24:36 +00:00
for child in node2 . childNodes :
2008-08-12 14:44:56 +00:00
if child . nodeType == child . ELEMENT_NODE :
if pos == ' inside ' :
2008-07-22 14:24:36 +00:00
node . appendChild ( child )
2008-08-12 14:44:56 +00:00
elif pos == ' after ' :
2008-10-28 00:18:25 +00:00
node . parentNode . insertBefore ( child , sib )
2008-07-22 14:24:36 +00:00
elif pos == ' before ' :
node . parentNode . insertBefore ( child , node )
else :
2008-08-12 14:44:56 +00:00
raise AttributeError ( _ ( ' Unknown position in inherited view %s ! ' ) % pos )
2008-07-22 14:24:36 +00:00
else :
attrs = ' ' . join ( [
' %s = " %s " ' % ( attr , node2 . getAttribute ( attr ) )
for attr in node2 . attributes . keys ( )
if attr != ' position '
] )
tag = " < %s %s > " % ( node2 . localName , attrs )
2008-11-24 15:18:09 +00:00
raise AttributeError ( _ ( " Couldn ' t find tag ' %s ' in parent view ! \n %s " ) % ( tag , src ) )
2008-07-22 14:24:36 +00:00
return doc_src . toxml ( encoding = " utf-8 " ) . replace ( ' \t ' , ' ' )
2008-08-12 14:44:56 +00:00
result = { ' type ' : view_type , ' model ' : self . _name }
2008-07-22 14:24:36 +00:00
ok = True
model = True
sql_res = False
while ok :
if view_id :
where = ( model and ( " and model= ' %s ' " % ( self . _name , ) ) ) or ' '
2008-12-09 13:35:40 +00:00
cr . execute ( ' SELECT arch,name,field_parent,id,type,inherit_id FROM ir_ui_view WHERE id= %s ' + where , ( view_id , ) )
2008-07-22 14:24:36 +00:00
else :
2008-08-18 22:19:04 +00:00
cr . execute ( ''' SELECT
arch , name , field_parent , id , type , inherit_id
FROM
ir_ui_view
2008-08-22 10:18:41 +00:00
WHERE
2008-08-18 22:19:04 +00:00
model = % s AND
2008-08-22 10:18:41 +00:00
type = % s AND
2008-08-18 22:19:04 +00:00
inherit_id IS NULL
ORDER BY priority ''' , (self._name, view_type))
2008-07-22 14:24:36 +00:00
sql_res = cr . fetchone ( )
if not sql_res :
break
ok = sql_res [ 5 ]
view_id = ok or sql_res [ 3 ]
model = False
# if a view was found
if sql_res :
result [ ' type ' ] = sql_res [ 4 ]
result [ ' view_id ' ] = sql_res [ 3 ]
result [ ' arch ' ] = sql_res [ 0 ]
def _inherit_apply_rec ( result , inherit_id ) :
# get all views which inherit from (ie modify) this view
2008-12-09 13:35:40 +00:00
cr . execute ( ' select arch,id from ir_ui_view where inherit_id= %s and model= %s order by priority ' , ( inherit_id , self . _name ) )
2008-07-22 14:24:36 +00:00
sql_inherit = cr . fetchall ( )
2008-08-12 14:44:56 +00:00
for ( inherit , id ) in sql_inherit :
2008-07-22 14:24:36 +00:00
result = _inherit_apply ( result , inherit )
result = _inherit_apply_rec ( result , id )
return result
result [ ' arch ' ] = _inherit_apply_rec ( result [ ' arch ' ] , sql_res [ 3 ] )
result [ ' name ' ] = sql_res [ 1 ]
result [ ' field_parent ' ] = sql_res [ 2 ] or False
else :
# otherwise, build some kind of default view
if view_type == ' form ' :
res = self . fields_get ( cr , user , context = context )
2009-01-26 20:28:57 +00:00
xml = ' <?xml version= " 1.0 " encoding= " utf-8 " ?> ' \
' <form string= " %s " > ' % ( self . _description , )
2008-07-22 14:24:36 +00:00
for x in res :
if res [ x ] [ ' type ' ] not in ( ' one2many ' , ' many2many ' ) :
xml + = ' <field name= " %s " /> ' % ( x , )
if res [ x ] [ ' type ' ] == ' text ' :
xml + = " <newline/> "
xml + = " </form> "
elif view_type == ' tree ' :
2008-11-25 23:33:17 +00:00
_rec_name = self . _rec_name
if _rec_name not in self . _columns :
_rec_name = self . _columns . keys ( ) [ 0 ]
2009-01-26 20:28:57 +00:00
xml = ' <?xml version= " 1.0 " encoding= " utf-8 " ?> ' \
' <tree string= " %s " ><field name= " %s " /></tree> ' \
% ( self . _description , self . _rec_name )
2008-07-22 14:24:36 +00:00
elif view_type == ' calendar ' :
xml = self . __get_default_calendar_view ( )
else :
2009-01-26 20:28:57 +00:00
xml = ' <?xml version= " 1.0 " ?> ' # what happens here, graph case?
2008-07-22 14:24:36 +00:00
result [ ' arch ' ] = xml
result [ ' name ' ] = ' default '
result [ ' field_parent ' ] = False
result [ ' view_id ' ] = 0
2009-01-26 20:28:57 +00:00
try :
doc = dom . minidom . parseString ( encode ( result [ ' arch ' ] ) )
except Exception , ex :
logger = netsvc . Logger ( )
logger . notifyChannel ( ' init ' , netsvc . LOG_DEBUG , ' Wrong arch in %s ( %s ): \n %s ' % ( result [ ' name ' ] , view_type , result [ ' arch ' ] ) )
raise except_orm ( ' Error ' ,
( ' Invalid xml in view %s ( %d ) of %s : %s ' % ( result [ ' name ' ] , result [ ' view_id ' ] , self . _name , str ( ex ) ) ) )
2008-12-02 17:41:37 +00:00
xarch , xfields = self . __view_look_dom_arch ( cr , user , doc , view_id , context = context )
2008-07-22 14:24:36 +00:00
result [ ' arch ' ] = xarch
result [ ' fields ' ] = xfields
if toolbar :
def clean ( x ) :
x = x [ 2 ]
for key in ( ' report_sxw_content ' , ' report_rml_content ' ,
' report_sxw ' , ' report_rml ' ,
' report_sxw_content_data ' , ' report_rml_content_data ' ) :
if key in x :
del x [ key ]
return x
ir_values_obj = self . pool . get ( ' ir.values ' )
resprint = ir_values_obj . get ( cr , user , ' action ' ,
' client_print_multi ' , [ ( self . _name , False ) ] , False ,
context )
resaction = ir_values_obj . get ( cr , user , ' action ' ,
' client_action_multi ' , [ ( self . _name , False ) ] , False ,
context )
resrelate = ir_values_obj . get ( cr , user , ' action ' ,
' client_action_relate ' , [ ( self . _name , False ) ] , False ,
context )
resprint = map ( clean , resprint )
resaction = map ( clean , resaction )
2008-08-12 14:44:56 +00:00
resaction = filter ( lambda x : not x . get ( ' multi ' , False ) , resaction )
resprint = filter ( lambda x : not x . get ( ' multi ' , False ) , resprint )
resrelate = map ( lambda x : x [ 2 ] , resrelate )
2008-07-22 14:24:36 +00:00
for x in resprint + resaction + resrelate :
x [ ' string ' ] = x [ ' name ' ]
result [ ' toolbar ' ] = {
' print ' : resprint ,
' action ' : resaction ,
' relate ' : resrelate
}
return result
2008-08-12 14:44:56 +00:00
_view_look_dom_arch = __view_look_dom_arch
2008-07-22 14:24:36 +00:00
def search_count ( self , cr , user , args , context = None ) :
if not context :
context = { }
res = self . search ( cr , user , args , context = context , count = True )
if isinstance ( res , list ) :
return len ( res )
return res
def search ( self , cr , user , args , offset = 0 , limit = None , order = None ,
context = None , count = False ) :
raise _ ( ' The search method is not implemented on this object ! ' )
def name_get ( self , cr , user , ids , context = None ) :
raise _ ( ' The name_get method is not implemented on this object ! ' )
2008-09-09 07:34:54 +00:00
def name_search ( self , cr , user , name = ' ' , args = None , operator = ' ilike ' , context = None , limit = None ) :
2008-07-22 14:24:36 +00:00
raise _ ( ' The name_search method is not implemented on this object ! ' )
def copy ( self , cr , uid , id , default = None , context = None ) :
raise _ ( ' The copy method is not implemented on this object ! ' )
2008-06-14 15:14:19 +00:00
2009-06-10 11:15:35 +00:00
def exists ( self , cr , uid , id , context = None ) :
raise _ ( ' The exists method is not implemented on this object ! ' )
2008-11-27 08:24:44 +00:00
def read_string ( self , cr , uid , id , langs , fields = None , context = None ) :
res = { }
res2 = { }
2009-08-10 16:24:09 +00:00
self . pool . get ( ' ir.model.access ' ) . check ( cr , uid , ' ir.translation ' , ' read ' , context = context )
2008-11-27 08:24:44 +00:00
if not fields :
fields = self . _columns . keys ( ) + self . _inherit_fields . keys ( )
for lang in langs :
res [ lang ] = { ' code ' : lang }
for f in fields :
if f in self . _columns :
res_trans = self . pool . get ( ' ir.translation ' ) . _get_source ( cr , uid , self . _name + ' , ' + f , ' field ' , lang )
if res_trans :
res [ lang ] [ f ] = res_trans
else :
res [ lang ] [ f ] = self . _columns [ f ] . string
for table in self . _inherits :
cols = intersect ( self . _inherit_fields . keys ( ) , fields )
res2 = self . pool . get ( table ) . read_string ( cr , uid , id , langs , cols , context )
for lang in res2 :
if lang in res :
2009-01-05 15:28:43 +00:00
res [ lang ] [ ' code ' ] = lang
2008-11-27 08:24:44 +00:00
for f in res2 [ lang ] :
res [ lang ] [ f ] = res2 [ lang ] [ f ]
return res
def write_string ( self , cr , uid , id , langs , vals , context = None ) :
2009-08-10 16:24:09 +00:00
self . pool . get ( ' ir.model.access ' ) . check ( cr , uid , ' ir.translation ' , ' write ' , context = context )
2008-11-27 08:24:44 +00:00
for lang in langs :
for field in vals :
if field in self . _columns :
self . pool . get ( ' ir.translation ' ) . _set_ids ( cr , uid , self . _name + ' , ' + field , ' field ' , lang , [ 0 ] , vals [ field ] )
for table in self . _inherits :
cols = intersect ( self . _inherit_fields . keys ( ) , vals )
if cols :
self . pool . get ( table ) . write_string ( cr , uid , id , langs , vals , context )
return True
2009-01-29 09:59:37 +00:00
def _check_removed_columns ( self , cr , log = False ) :
raise NotImplementedError ( )
2008-08-12 14:44:56 +00:00
2008-06-14 15:14:19 +00:00
class orm_memory ( orm_template ) :
2009-06-10 11:15:35 +00:00
_protected = [ ' read ' , ' write ' , ' create ' , ' default_get ' , ' perm_read ' , ' unlink ' , ' fields_get ' , ' fields_view_get ' , ' search ' , ' name_get ' , ' distinct_field_get ' , ' name_search ' , ' copy ' , ' import_data ' , ' search_count ' , ' exists ' ]
2008-07-22 14:24:36 +00:00
_inherit_fields = { }
_max_count = 200
_max_hours = 1
_check_time = 20
2008-08-12 14:44:56 +00:00
2008-07-22 14:24:36 +00:00
def __init__ ( self , cr ) :
super ( orm_memory , self ) . __init__ ( cr )
self . datas = { }
self . next_id = 0
self . check_id = 0
cr . execute ( ' delete from wkf_instance where res_type= %s ' , ( self . _name , ) )
2008-09-11 15:14:54 +00:00
def vaccum ( self , cr , uid ) :
2008-08-12 14:44:56 +00:00
self . check_id + = 1
2008-07-22 14:24:36 +00:00
if self . check_id % self . _check_time :
return True
tounlink = [ ]
max = time . time ( ) - self . _max_hours * 60 * 60
for id in self . datas :
if self . datas [ id ] [ ' internal.date_access ' ] < max :
tounlink . append ( id )
2008-09-11 15:14:54 +00:00
self . unlink ( cr , uid , tounlink )
2008-07-22 14:24:36 +00:00
if len ( self . datas ) > self . _max_count :
sorted = map ( lambda x : ( x [ 1 ] [ ' internal.date_access ' ] , x [ 0 ] ) , self . datas . items ( ) )
sorted . sort ( )
ids = map ( lambda x : x [ 1 ] , sorted [ : len ( self . datas ) - self . _max_count ] )
2008-09-11 15:14:54 +00:00
self . unlink ( cr , uid , ids )
2008-07-22 14:24:36 +00:00
return True
2008-08-29 13:08:14 +00:00
def read ( self , cr , user , ids , fields_to_read = None , context = None , load = ' _classic_read ' ) :
if not context :
context = { }
if not fields_to_read :
fields_to_read = self . _columns . keys ( )
2008-07-22 14:24:36 +00:00
result = [ ]
2008-08-22 10:18:41 +00:00
if self . datas :
2008-09-24 03:21:35 +00:00
if isinstance ( ids , ( int , long ) ) :
ids = [ ids ]
2008-08-22 10:18:41 +00:00
for id in ids :
r = { ' id ' : id }
2008-08-29 13:08:14 +00:00
for f in fields_to_read :
2008-08-26 10:41:13 +00:00
if id in self . datas :
r [ f ] = self . datas [ id ] . get ( f , False )
2008-09-23 10:47:45 +00:00
if r [ f ] and isinstance ( self . _columns [ f ] , fields . binary ) and context . get ( ' bin_size ' , False ) :
2008-08-29 13:08:14 +00:00
r [ f ] = len ( r [ f ] )
2008-08-22 10:18:41 +00:00
result . append ( r )
2008-08-26 10:41:13 +00:00
if id in self . datas :
self . datas [ id ] [ ' internal.date_access ' ] = time . time ( )
2008-08-29 13:08:14 +00:00
fields_post = filter ( lambda x : x in self . _columns and not getattr ( self . _columns [ x ] , load ) , fields_to_read )
2008-08-22 10:18:41 +00:00
for f in fields_post :
2008-08-29 13:08:14 +00:00
res2 = self . _columns [ f ] . get_memory ( cr , self , ids , f , user , context = context , values = result )
2008-08-22 10:18:41 +00:00
for record in result :
record [ f ] = res2 [ record [ ' id ' ] ]
2008-09-24 03:21:35 +00:00
if isinstance ( ids , ( int , long ) ) :
return result [ 0 ]
2008-07-22 14:24:36 +00:00
return result
def write ( self , cr , user , ids , vals , context = None ) :
vals2 = { }
upd_todo = [ ]
for field in vals :
if self . _columns [ field ] . _classic_write :
vals2 [ field ] = vals [ field ]
else :
upd_todo . append ( field )
for id_new in ids :
self . datas [ id_new ] . update ( vals2 )
self . datas [ id_new ] [ ' internal.date_access ' ] = time . time ( )
for field in upd_todo :
self . _columns [ field ] . set_memory ( cr , self , id_new , field , vals [ field ] , user , context )
self . _validate ( cr , user , [ id_new ] , context )
wf_service = netsvc . LocalService ( " workflow " )
wf_service . trg_write ( user , self . _name , id_new , cr )
return id_new
def create ( self , cr , user , vals , context = None ) :
2009-08-19 13:39:10 +00:00
self . vaccum ( cr , user )
2008-07-22 14:24:36 +00:00
self . next_id + = 1
id_new = self . next_id
default = [ ]
for f in self . _columns . keys ( ) :
if not f in vals :
default . append ( f )
if len ( default ) :
vals . update ( self . default_get ( cr , user , default , context ) )
vals2 = { }
upd_todo = [ ]
for field in vals :
if self . _columns [ field ] . _classic_write :
vals2 [ field ] = vals [ field ]
else :
upd_todo . append ( field )
self . datas [ id_new ] = vals2
self . datas [ id_new ] [ ' internal.date_access ' ] = time . time ( )
2008-09-16 12:45:19 +00:00
2008-07-22 14:24:36 +00:00
for field in upd_todo :
self . _columns [ field ] . set_memory ( cr , self , id_new , field , vals [ field ] , user , context )
self . _validate ( cr , user , [ id_new ] , context )
wf_service = netsvc . LocalService ( " workflow " )
wf_service . trg_create ( user , self . _name , id_new , cr )
return id_new
def default_get ( self , cr , uid , fields_list , context = None ) :
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
value = { }
# get the default values for the inherited fields
for f in fields_list :
if f in self . _defaults :
value [ f ] = self . _defaults [ f ] ( self , cr , uid , context )
fld_def = ( ( f in self . _columns ) and self . _columns [ f ] ) \
2008-08-12 14:44:56 +00:00
or ( ( f in self . _inherit_fields ) and self . _inherit_fields [ f ] [ 2 ] ) \
2008-07-22 14:24:36 +00:00
or False
# get the default values set by the user and override the default
# values defined in the object
ir_values_obj = self . pool . get ( ' ir.values ' )
res = ir_values_obj . get ( cr , uid , ' default ' , False , [ self . _name ] )
for id , field , field_value in res :
if field in fields_list :
2008-08-26 09:38:50 +00:00
fld_def = ( field in self . _columns ) and self . _columns [ field ] or self . _inherit_fields [ field ] [ 2 ]
2008-07-22 14:24:36 +00:00
if fld_def . _type in ( ' many2one ' , ' one2one ' ) :
obj = self . pool . get ( fld_def . _obj )
if not obj . search ( cr , uid , [ ( ' id ' , ' = ' , field_value ) ] ) :
continue
if fld_def . _type in ( ' many2many ' ) :
obj = self . pool . get ( fld_def . _obj )
field_value2 = [ ]
for i in range ( len ( field_value ) ) :
if not obj . search ( cr , uid , [ ( ' id ' , ' = ' ,
field_value [ i ] ) ] ) :
continue
field_value2 . append ( field_value [ i ] )
field_value = field_value2
if fld_def . _type in ( ' one2many ' ) :
obj = self . pool . get ( fld_def . _obj )
field_value2 = [ ]
for i in range ( len ( field_value ) ) :
field_value2 . append ( { } )
for field2 in field_value [ i ] :
if obj . _columns [ field2 ] . _type in ( ' many2one ' , ' one2one ' ) :
obj2 = self . pool . get ( obj . _columns [ field2 ] . _obj )
if not obj2 . search ( cr , uid ,
[ ( ' id ' , ' = ' , field_value [ i ] [ field2 ] ) ] ) :
continue
# TODO add test for many2many and one2many
field_value2 [ i ] [ field2 ] = field_value [ i ] [ field2 ]
field_value = field_value2
value [ field ] = field_value
2008-10-31 14:10:48 +00:00
# get the default values from the context
for key in context or { } :
if key . startswith ( ' default_ ' ) :
value [ key [ 8 : ] ] = context [ key ]
2008-07-22 14:24:36 +00:00
return value
def search ( self , cr , user , args , offset = 0 , limit = None , order = None ,
context = None , count = False ) :
return self . datas . keys ( )
def unlink ( self , cr , uid , ids , context = None ) :
for id in ids :
if id in self . datas :
del self . datas [ id ]
if len ( ids ) :
2008-08-12 14:44:56 +00:00
cr . execute ( ' delete from wkf_instance where res_type= %s and res_id in ( ' + ' , ' . join ( map ( str , ids ) ) + ' ) ' , ( self . _name , ) )
2008-07-22 14:24:36 +00:00
return True
def perm_read ( self , cr , user , ids , context = None , details = True ) :
result = [ ]
for id in ids :
result . append ( {
' create_uid ' : ( user , ' Root ' ) ,
' create_date ' : time . strftime ( ' % Y- % m- %d % H: % M: % S ' ) ,
' write_uid ' : False ,
' write_date ' : False ,
' id ' : id
} )
return result
2009-02-14 05:35:17 +00:00
2009-01-29 09:59:37 +00:00
def _check_removed_columns ( self , cr , log = False ) :
# nothing to check in memory...
pass
2009-06-10 11:15:35 +00:00
def exists ( self , cr , uid , id , context = None ) :
return id in self . datas
2008-06-14 15:14:19 +00:00
class orm ( orm_template ) :
2008-07-22 14:24:36 +00:00
_sql_constraints = [ ]
_table = None
2009-06-10 11:15:35 +00:00
_protected = [ ' read ' , ' write ' , ' create ' , ' default_get ' , ' perm_read ' , ' unlink ' , ' fields_get ' , ' fields_view_get ' , ' search ' , ' name_get ' , ' distinct_field_get ' , ' name_search ' , ' copy ' , ' import_data ' , ' search_count ' , ' exists ' ]
2008-12-24 10:48:11 +00:00
2008-08-17 18:28:29 +00:00
def _parent_store_compute ( self , cr ) :
2008-08-13 10:47:38 +00:00
logger = netsvc . Logger ( )
2009-01-26 15:50:46 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_INFO , ' Computing parent left and right for table %s ... ' % ( self . _table , ) )
2008-08-13 10:47:38 +00:00
def browse_rec ( root , pos = 0 ) :
# TODO: set order
where = self . _parent_name + ' = ' + str ( root )
if not root :
where = self . _parent_name + ' IS NULL '
2009-01-17 19:22:14 +00:00
if self . _parent_order :
where + = ' order by ' + self . _parent_order
2008-08-13 10:47:38 +00:00
cr . execute ( ' SELECT id FROM ' + self . _table + ' WHERE ' + where )
pos2 = pos + 1
childs = cr . fetchall ( )
for id in childs :
pos2 = browse_rec ( id [ 0 ] , pos2 )
2008-12-09 13:35:40 +00:00
cr . execute ( ' update ' + self . _table + ' set parent_left= %s , parent_right= %s where id= %s ' , ( pos , pos2 , root ) )
2008-08-13 10:47:38 +00:00
return pos2 + 1
2009-01-27 10:25:15 +00:00
query = ' SELECT id FROM ' + self . _table + ' WHERE ' + self . _parent_name + ' IS NULL '
if self . _parent_order :
query + = ' order by ' + self . _parent_order
pos = 0
cr . execute ( query )
for ( root , ) in cr . fetchall ( ) :
pos = browse_rec ( root , pos )
2008-08-13 10:47:38 +00:00
return True
2008-12-14 16:46:47 +00:00
def _update_store ( self , cr , f , k ) :
logger = netsvc . Logger ( )
2009-01-26 15:50:46 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_INFO , " storing computed values of fields.function ' %s ' " % ( k , ) )
2008-12-14 16:46:47 +00:00
ss = self . _columns [ k ] . _symbol_set
update_query = ' UPDATE " %s " SET " %s " = %s WHERE id= %% s ' % ( self . _table , k , ss [ 0 ] )
cr . execute ( ' select id from ' + self . _table )
ids_lst = map ( lambda x : x [ 0 ] , cr . fetchall ( ) )
while ids_lst :
iids = ids_lst [ : 40 ]
ids_lst = ids_lst [ 40 : ]
res = f . get ( cr , self , iids , k , 1 , { } )
for key , val in res . items ( ) :
if f . _multi :
val = val [ k ]
# if val is a many2one, just write the ID
if type ( val ) == tuple :
val = val [ 0 ]
if ( val < > False ) or ( type ( val ) < > bool ) :
cr . execute ( update_query , ( ss [ 1 ] ( val ) , key ) )
2009-01-29 09:59:37 +00:00
def _check_removed_columns ( self , cr , log = False ) :
logger = netsvc . Logger ( )
# iterate on the database columns to drop the NOT NULL constraints
# of fields which were required but have been removed (or will be added by another module)
columns = [ c for c in self . _columns if not ( isinstance ( self . _columns [ c ] , fields . function ) and not self . _columns [ c ] . store ) ]
columns + = ( ' id ' , ' write_uid ' , ' write_date ' , ' create_uid ' , ' create_date ' ) # openerp access columns
cr . execute ( " SELECT a.attname, a.attnotnull "
" FROM pg_class c, pg_attribute a "
" WHERE c.relname= %% s "
" AND c.oid=a.attrelid "
" AND a.attisdropped= %% s "
" AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ( ' cid ' , ' tid ' , ' oid ' , ' xid ' ) "
2009-02-14 05:35:17 +00:00
" AND a.attname NOT IN ( %s ) " % " , " . join ( [ ' %s ' ] * len ( columns ) ) ,
2009-01-29 09:59:37 +00:00
[ self . _table , False ] + columns )
for column in cr . dictfetchall ( ) :
if log :
logger . notifyChannel ( " orm " , netsvc . LOG_DEBUG , " column %s is in the table %s but not in the corresponding object %s " % ( column [ ' attname ' ] , self . _table , self . _name ) )
if column [ ' attnotnull ' ] :
cr . execute ( ' ALTER TABLE " %s " ALTER COLUMN " %s " DROP NOT NULL ' % ( self . _table , column [ ' attname ' ] ) )
2008-07-22 14:24:36 +00:00
def _auto_init ( self , cr , context = { } ) :
2008-08-13 10:47:38 +00:00
store_compute = False
2008-07-22 14:24:36 +00:00
logger = netsvc . Logger ( )
create = False
2008-12-14 16:46:47 +00:00
todo_end = [ ]
2008-07-22 14:24:36 +00:00
self . _field_create ( cr , context = context )
if not hasattr ( self , " _auto " ) or self . _auto :
cr . execute ( " SELECT relname FROM pg_class WHERE relkind in ( ' r ' , ' v ' ) AND relname= ' %s ' " % self . _table )
if not cr . rowcount :
cr . execute ( " CREATE TABLE \" %s \" (id SERIAL NOT NULL, PRIMARY KEY(id)) WITH OIDS " % self . _table )
create = True
cr . commit ( )
2008-08-13 10:47:38 +00:00
if self . _parent_store :
cr . execute ( """ SELECT c.relname
FROM pg_class c , pg_attribute a
WHERE c . relname = % s AND a . attname = % s AND c . oid = a . attrelid
""" , (self._table, ' parent_left ' ))
if not cr . rowcount :
if ' parent_left ' not in self . _columns :
2009-01-26 15:50:46 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_ERROR , ' create a column parent_left on object %s : fields.integer( \' Left Parent \' , select=1) ' % ( self . _table , ) )
2008-08-13 10:47:38 +00:00
if ' parent_right ' not in self . _columns :
2009-01-26 15:50:46 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_ERROR , ' create a column parent_right on object %s : fields.integer( \' Right Parent \' , select=1) ' % ( self . _table , ) )
2008-08-13 10:47:38 +00:00
if self . _columns [ self . _parent_name ] . ondelete < > ' cascade ' :
2009-01-26 15:50:46 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_ERROR , " the columns %s on object must be set as ondelete= ' cascasde ' " % ( self . _name , self . _parent_name ) )
2008-12-09 13:35:40 +00:00
cr . execute ( ' ALTER TABLE " %s " ADD COLUMN " parent_left " INTEGER ' % ( self . _table , ) )
cr . execute ( ' ALTER TABLE " %s " ADD COLUMN " parent_right " INTEGER ' % ( self . _table , ) )
2008-08-13 10:47:38 +00:00
cr . commit ( )
store_compute = True
2008-07-22 14:24:36 +00:00
if self . _log_access :
logs = {
' create_uid ' : ' INTEGER REFERENCES res_users ON DELETE SET NULL ' ,
' create_date ' : ' TIMESTAMP ' ,
' write_uid ' : ' INTEGER REFERENCES res_users ON DELETE SET NULL ' ,
' write_date ' : ' TIMESTAMP '
}
for k in logs :
2008-12-09 13:35:40 +00:00
cr . execute ( """
2008-07-22 14:24:36 +00:00
SELECT c . relname
2008-12-09 13:35:40 +00:00
FROM pg_class c , pg_attribute a
WHERE c . relname = % s AND a . attname = % s AND c . oid = a . attrelid
""" , (self._table, k))
2008-07-22 14:24:36 +00:00
if not cr . rowcount :
2008-12-09 13:35:40 +00:00
cr . execute ( ' ALTER TABLE " %s " ADD COLUMN " %s " %s ' % ( self . _table , k , logs [ k ] ) )
2008-07-22 14:24:36 +00:00
cr . commit ( )
2009-02-14 05:35:17 +00:00
2009-01-29 09:59:37 +00:00
self . _check_removed_columns ( cr , log = False )
2008-07-22 14:24:36 +00:00
# iterate on the "object columns"
2008-12-07 12:45:02 +00:00
todo_update_store = [ ]
2008-07-22 14:24:36 +00:00
for k in self . _columns :
2008-08-12 14:44:56 +00:00
if k in ( ' id ' , ' write_uid ' , ' write_date ' , ' create_uid ' , ' create_date ' ) :
2008-07-22 14:24:36 +00:00
continue
#raise _('Can not define a column %s. Reserved keyword !') % (k,)
f = self . _columns [ k ]
if isinstance ( f , fields . one2many ) :
cr . execute ( " SELECT relname FROM pg_class WHERE relkind= ' r ' AND relname= %s " , ( f . _obj , ) )
if cr . fetchone ( ) :
2008-12-09 13:35:40 +00:00
cr . execute ( " SELECT count(1) as c FROM pg_class c,pg_attribute a WHERE c.relname= %s AND a.attname= %s AND c.oid=a.attrelid " , ( f . _obj , f . _fields_id ) )
2008-07-22 14:24:36 +00:00
res = cr . fetchone ( ) [ 0 ]
if not res :
2008-12-09 13:35:40 +00:00
cr . execute ( ' ALTER TABLE " %s " ADD FOREIGN KEY ( %s ) REFERENCES " %s " ON DELETE SET NULL ' % ( self . _obj , f . _fields_id , f . _table ) )
2008-07-22 14:24:36 +00:00
elif isinstance ( f , fields . many2many ) :
cr . execute ( " SELECT relname FROM pg_class WHERE relkind in ( ' r ' , ' v ' ) AND relname= %s " , ( f . _rel , ) )
if not cr . dictfetchall ( ) :
#FIXME: Remove this try/except
try :
ref = self . pool . get ( f . _obj ) . _table
except AttributeError :
2008-08-12 14:44:56 +00:00
ref = f . _obj . replace ( ' . ' , ' _ ' )
2008-12-09 13:35:40 +00:00
cr . execute ( ' CREATE TABLE " %s " ( " %s " INTEGER NOT NULL REFERENCES " %s " ON DELETE CASCADE, " %s " INTEGER NOT NULL REFERENCES " %s " ON DELETE CASCADE) WITH OIDS ' % ( f . _rel , f . _id1 , self . _table , f . _id2 , ref ) )
cr . execute ( ' CREATE INDEX " %s _ %s _index " ON " %s " ( " %s " ) ' % ( f . _rel , f . _id1 , f . _rel , f . _id1 ) )
cr . execute ( ' CREATE INDEX " %s _ %s _index " ON " %s " ( " %s " ) ' % ( f . _rel , f . _id2 , f . _rel , f . _id2 ) )
2008-07-22 14:24:36 +00:00
cr . commit ( )
else :
2008-12-10 22:08:00 +00:00
cr . execute ( " SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN a.atttypmod-4 ELSE a.attlen END as size " \
" FROM pg_class c,pg_attribute a,pg_type t " \
" WHERE c.relname= %s " \
" AND a.attname= %s " \
" AND c.oid=a.attrelid " \
" AND a.atttypid=t.oid " , ( self . _table , k ) )
2008-07-22 14:24:36 +00:00
res = cr . dictfetchall ( )
if not res :
2008-08-12 14:44:56 +00:00
if not isinstance ( f , fields . function ) or f . store :
2008-07-22 14:24:36 +00:00
# add the missing field
2008-12-09 13:35:40 +00:00
cr . execute ( ' ALTER TABLE " %s " ADD COLUMN " %s " %s ' % ( self . _table , k , get_pg_type ( f ) [ 1 ] ) )
2008-07-22 14:24:36 +00:00
# initialize it
if not create and k in self . _defaults :
default = self . _defaults [ k ] ( self , cr , 1 , { } )
2008-12-09 13:35:40 +00:00
ss = self . _columns [ k ] . _symbol_set
query = ' UPDATE " %s " SET " %s " = %s ' % ( self . _table , k , ss [ 0 ] )
2008-12-14 16:46:47 +00:00
cr . execute ( query , ( ss [ 1 ] ( default ) , ) )
cr . commit ( )
2009-01-26 15:50:46 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_DEBUG , ' setting default value of new column %s of table %s ' % ( k , self . _table ) )
2008-12-14 16:46:47 +00:00
elif not create :
2009-01-26 15:50:46 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_DEBUG , ' creating new column %s of table %s ' % ( k , self . _table ) )
2008-12-07 12:45:02 +00:00
2008-08-12 14:44:56 +00:00
if isinstance ( f , fields . function ) :
2008-12-14 16:46:47 +00:00
order = 10
if f . store is not True :
order = f . store [ f . store . keys ( ) [ 0 ] ] [ 2 ]
todo_update_store . append ( ( order , f , k ) )
2008-07-22 14:24:36 +00:00
# and add constraints if needed
if isinstance ( f , fields . many2one ) :
#FIXME: Remove this try/except
try :
ref = self . pool . get ( f . _obj ) . _table
except AttributeError :
2008-08-12 14:44:56 +00:00
ref = f . _obj . replace ( ' . ' , ' _ ' )
2008-07-22 14:24:36 +00:00
# ir_actions is inherited so foreign key doesn't work on it
2008-08-12 14:44:56 +00:00
if ref != ' ir_actions ' :
2008-12-09 13:35:40 +00:00
cr . execute ( ' ALTER TABLE " %s " ADD FOREIGN KEY ( " %s " ) REFERENCES " %s " ON DELETE %s ' % ( self . _table , k , ref , f . ondelete ) )
2008-07-22 14:24:36 +00:00
if f . select :
2008-12-09 13:35:40 +00:00
cr . execute ( ' CREATE INDEX " %s _ %s _index " ON " %s " ( " %s " ) ' % ( self . _table , k , self . _table , k ) )
2008-07-22 14:24:36 +00:00
if f . required :
try :
2008-12-14 16:46:47 +00:00
cr . commit ( )
2008-12-09 13:35:40 +00:00
cr . execute ( ' ALTER TABLE " %s " ALTER COLUMN " %s " SET NOT NULL ' % ( self . _table , k ) )
2008-12-14 16:46:47 +00:00
except Exception , e :
2009-01-26 15:50:46 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_WARNING , ' WARNING: unable to set column %s of table %s not null ! \n Try to re-run: openerp-server.py --update=module \n If it doesn \' t work, update records and execute manually: \n ALTER TABLE %s ALTER COLUMN %s SET NOT NULL ' % ( k , self . _table , self . _table , k ) )
2008-07-22 14:24:36 +00:00
cr . commit ( )
elif len ( res ) == 1 :
f_pg_def = res [ 0 ]
f_pg_type = f_pg_def [ ' typname ' ]
f_pg_size = f_pg_def [ ' size ' ]
f_pg_notnull = f_pg_def [ ' attnotnull ' ]
if isinstance ( f , fields . function ) and not f . store :
2009-01-26 15:50:46 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_INFO , ' column %s ( %s ) in table %s removed: converted to a function ! \n ' % ( k , f . string , self . _table ) )
2008-12-15 04:34:26 +00:00
cr . execute ( ' ALTER TABLE %s DROP COLUMN %s ' % ( self . _table , k ) )
cr . commit ( )
2008-07-22 14:24:36 +00:00
f_obj_type = None
else :
f_obj_type = get_pg_type ( f ) and get_pg_type ( f ) [ 0 ]
if f_obj_type :
2008-12-14 16:46:47 +00:00
ok = False
casts = [
( ' text ' , ' char ' , ' VARCHAR( %d ) ' % ( f . size or 0 , ) , ' ::VARCHAR( %d ) ' % ( f . size or 0 , ) ) ,
( ' varchar ' , ' text ' , ' TEXT ' , ' ' ) ,
( ' int4 ' , ' float ' , get_pg_type ( f ) [ 1 ] , ' :: ' + get_pg_type ( f ) [ 1 ] ) ,
( ' date ' , ' datetime ' , ' TIMESTAMP ' , ' ::TIMESTAMP ' ) ,
]
2009-06-30 07:33:44 +00:00
# !!! Avoid reduction of varchar field !!!
if f_pg_type == ' varchar ' and f . _type == ' char ' and f_pg_size < f . size :
# if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size != f.size:
2009-01-26 15:50:46 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_INFO , " column ' %s ' in table ' %s ' changed size " % ( k , self . _table ) )
2008-12-14 16:46:47 +00:00
cr . execute ( ' ALTER TABLE " %s " RENAME COLUMN " %s " TO temp_change_size ' % ( self . _table , k ) )
cr . execute ( ' ALTER TABLE " %s " ADD COLUMN " %s " VARCHAR( %d ) ' % ( self . _table , k , f . size ) )
cr . execute ( ' UPDATE " %s " SET " %s " =temp_change_size::VARCHAR( %d ) ' % ( self . _table , k , f . size ) )
cr . execute ( ' ALTER TABLE " %s " DROP COLUMN temp_change_size ' % ( self . _table , ) )
cr . commit ( )
for c in casts :
if ( f_pg_type == c [ 0 ] ) and ( f . _type == c [ 1 ] ) :
2009-01-26 15:50:46 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_INFO , " column ' %s ' in table ' %s ' changed type to %s . " % ( k , self . _table , c [ 1 ] ) )
2008-12-14 16:46:47 +00:00
ok = True
cr . execute ( ' ALTER TABLE " %s " RENAME COLUMN " %s " TO temp_change_size ' % ( self . _table , k ) )
cr . execute ( ' ALTER TABLE " %s " ADD COLUMN " %s " %s ' % ( self . _table , k , c [ 2 ] ) )
cr . execute ( ( ' UPDATE " %s " SET " %s " =temp_change_size ' + c [ 3 ] ) % ( self . _table , k ) )
cr . execute ( ' ALTER TABLE " %s " DROP COLUMN temp_change_size CASCADE ' % ( self . _table , ) )
cr . commit ( )
if f_pg_type != f_obj_type :
if not ok :
2009-01-26 15:50:46 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_WARNING , " column ' %s ' in table ' %s ' has changed type (DB = %s , def = %s ) but unable to migrate this change ! " % ( k , self . _table , f_pg_type , f . _type ) )
2008-12-14 16:46:47 +00:00
2008-07-22 14:24:36 +00:00
# if the field is required and hasn't got a NOT NULL constraint
if f . required and f_pg_notnull == 0 :
# set the field to the default value if any
2008-08-12 14:44:56 +00:00
if k in self . _defaults :
2008-07-22 14:24:36 +00:00
default = self . _defaults [ k ] ( self , cr , 1 , { } )
2008-12-09 13:35:40 +00:00
if ( default is not None ) :
ss = self . _columns [ k ] . _symbol_set
query = ' UPDATE " %s " SET " %s " = %s WHERE %s is NULL ' % ( self . _table , k , ss [ 0 ] , k )
cr . execute ( query , ( ss [ 1 ] ( default ) , ) )
2008-07-22 14:24:36 +00:00
# add the NOT NULL constraint
2008-12-14 16:46:47 +00:00
cr . commit ( )
2008-07-22 14:24:36 +00:00
try :
2008-12-09 13:35:40 +00:00
cr . execute ( ' ALTER TABLE " %s " ALTER COLUMN " %s " SET NOT NULL ' % ( self . _table , k ) )
2008-07-22 14:24:36 +00:00
cr . commit ( )
2008-12-14 16:46:47 +00:00
except Exception , e :
2009-01-26 15:50:46 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_WARNING , ' unable to set a NOT NULL constraint on column %s of the %s table ! \n If you want to have it, you should update the records and execute manually: \n ALTER TABLE %s ALTER COLUMN %s SET NOT NULL ' % ( k , self . _table , self . _table , k ) )
2008-07-22 14:24:36 +00:00
cr . commit ( )
elif not f . required and f_pg_notnull == 1 :
2008-12-09 13:35:40 +00:00
cr . execute ( ' ALTER TABLE " %s " ALTER COLUMN " %s " DROP NOT NULL ' % ( self . _table , k ) )
2008-07-22 14:24:36 +00:00
cr . commit ( )
2008-12-09 13:35:40 +00:00
indexname = ' %s _ %s _index ' % ( self . _table , k )
cr . execute ( " SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s " , ( indexname , self . _table ) )
2008-07-22 14:24:36 +00:00
res = cr . dictfetchall ( )
if not res and f . select :
2008-12-09 13:35:40 +00:00
cr . execute ( ' CREATE INDEX " %s _ %s _index " ON " %s " ( " %s " ) ' % ( self . _table , k , self . _table , k ) )
2008-07-22 14:24:36 +00:00
cr . commit ( )
if res and not f . select :
2008-12-09 13:35:40 +00:00
cr . execute ( ' DROP INDEX " %s _ %s _index " ' % ( self . _table , k ) )
2008-07-22 14:24:36 +00:00
cr . commit ( )
if isinstance ( f , fields . many2one ) :
ref = self . pool . get ( f . _obj ) . _table
if ref != ' ir_actions ' :
2009-02-14 05:35:17 +00:00
cr . execute ( ' SELECT confdeltype, conname FROM pg_constraint as con, pg_class as cl1, pg_class as cl2, '
' pg_attribute as att1, pg_attribute as att2 '
' WHERE con.conrelid = cl1.oid '
' AND cl1.relname = %s '
' AND con.confrelid = cl2.oid '
' AND cl2.relname = %s '
' AND array_lower(con.conkey, 1) = 1 '
' AND con.conkey[1] = att1.attnum '
' AND att1.attrelid = cl1.oid '
' AND att1.attname = %s '
' AND array_lower(con.confkey, 1) = 1 '
' AND con.confkey[1] = att2.attnum '
' AND att2.attrelid = cl2.oid '
' AND att2.attname = %s '
2008-12-09 13:35:40 +00:00
" AND con.contype = ' f ' " , ( self . _table , ref , k , ' id ' ) )
2008-07-22 14:24:36 +00:00
res = cr . dictfetchall ( )
if res :
confdeltype = {
' RESTRICT ' : ' r ' ,
' NO ACTION ' : ' a ' ,
' CASCADE ' : ' c ' ,
' SET NULL ' : ' n ' ,
' SET DEFAULT ' : ' d ' ,
}
if res [ 0 ] [ ' confdeltype ' ] != confdeltype . get ( f . ondelete . upper ( ) , ' a ' ) :
cr . execute ( ' ALTER TABLE " ' + self . _table + ' " DROP CONSTRAINT " ' + res [ 0 ] [ ' conname ' ] + ' " ' )
cr . execute ( ' ALTER TABLE " ' + self . _table + ' " ADD FOREIGN KEY ( " ' + k + ' " ) REFERENCES " ' + ref + ' " ON DELETE ' + f . ondelete )
cr . commit ( )
else :
2008-12-10 22:08:00 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_ERROR , " Programming error ! " )
2008-12-14 16:46:47 +00:00
for order , f , k in todo_update_store :
todo_end . append ( ( order , self . _update_store , ( f , k ) ) )
2008-12-07 12:45:02 +00:00
2008-07-22 14:24:36 +00:00
else :
2008-12-09 13:35:40 +00:00
cr . execute ( " SELECT relname FROM pg_class WHERE relkind in ( ' r ' , ' v ' ) AND relname= %s " , ( self . _table , ) )
2008-07-22 14:24:36 +00:00
create = not bool ( cr . fetchone ( ) )
2008-08-12 14:44:56 +00:00
for ( key , con , _ ) in self . _sql_constraints :
2008-12-09 13:35:40 +00:00
conname = ' %s _ %s ' % ( self . _table , key )
cr . execute ( " SELECT conname FROM pg_constraint where conname= %s " , ( conname , ) )
2008-07-22 14:24:36 +00:00
if not cr . dictfetchall ( ) :
try :
2008-12-09 13:35:40 +00:00
cr . execute ( ' alter table " %s " add constraint " %s _ %s " %s ' % ( self . _table , self . _table , key , con , ) )
2008-07-22 14:24:36 +00:00
cr . commit ( )
except :
2009-01-26 15:50:46 +00:00
logger . notifyChannel ( ' orm ' , netsvc . LOG_WARNING , ' unable to add \' %s \' constraint on table %s ! \n If you want to have it, you should update the records and execute manually: \n ALTER table %s ADD CONSTRAINT %s _ %s %s ' % ( con , self . _table , self . _table , self . _table , key , con , ) )
2008-07-22 14:24:36 +00:00
if create :
2008-08-12 14:44:56 +00:00
if hasattr ( self , " _sql " ) :
2008-07-22 14:24:36 +00:00
for line in self . _sql . split ( ' ; ' ) :
2008-08-12 14:44:56 +00:00
line2 = line . replace ( ' \n ' , ' ' ) . strip ( )
2008-07-22 14:24:36 +00:00
if line2 :
cr . execute ( line2 )
cr . commit ( )
2008-08-13 10:47:38 +00:00
if store_compute :
2008-08-17 18:28:29 +00:00
self . _parent_store_compute ( cr )
2008-12-14 16:46:47 +00:00
return todo_end
2008-07-22 14:24:36 +00:00
def __init__ ( self , cr ) :
super ( orm , self ) . __init__ ( cr )
2009-02-14 05:35:17 +00:00
2009-01-16 09:52:17 +00:00
if not hasattr ( self , ' _log_access ' ) :
# if not access is not specify, it is the same value as _auto
self . _log_access = not hasattr ( self , " _auto " ) or self . _auto
2008-10-24 14:47:33 +00:00
self . _columns = self . _columns . copy ( )
2008-12-07 02:16:54 +00:00
for store_field in self . _columns :
f = self . _columns [ store_field ]
if not isinstance ( f , fields . function ) :
continue
if not f . store :
continue
if self . _columns [ store_field ] . store is True :
2008-12-14 16:46:47 +00:00
sm = { self . _name : ( lambda self , cr , uid , ids , c = { } : ids , None , 10 ) }
2008-12-07 02:16:54 +00:00
else :
sm = self . _columns [ store_field ] . store
for object , aa in sm . items ( ) :
2008-12-14 16:46:47 +00:00
if len ( aa ) == 3 :
2008-12-07 02:16:54 +00:00
( fnct , fields2 , order ) = aa
else :
2008-12-14 16:46:47 +00:00
raise except_orm ( ' Error ' ,
( ' Invalid function definition %s in object %s ! \n You must use the definition: store= { object:(fnct, fields, priority)}. ' % ( store_field , self . _name ) ) )
2008-12-07 02:16:54 +00:00
self . pool . _store_function . setdefault ( object , [ ] )
ok = True
for x , y , z , e , f in self . pool . _store_function [ object ] :
if ( x == self . _name ) and ( y == store_field ) and ( e == fields2 ) :
ok = False
if ok :
self . pool . _store_function [ object ] . append ( ( self . _name , store_field , fnct , fields2 , order ) )
self . pool . _store_function [ object ] . sort ( lambda x , y : cmp ( x [ 4 ] , y [ 4 ] ) )
2008-07-22 14:24:36 +00:00
2008-08-12 14:44:56 +00:00
for ( key , _ , msg ) in self . _sql_constraints :
2008-07-22 14:24:36 +00:00
self . pool . _sql_error [ self . _table + ' _ ' + key ] = msg
# Load manual fields
2008-08-12 14:44:56 +00:00
cr . execute ( " SELECT id FROM ir_model_fields WHERE name= %s AND model= %s " , ( ' state ' , ' ir.model.fields ' ) )
2008-07-22 14:24:36 +00:00
if cr . fetchone ( ) :
2008-08-12 14:44:56 +00:00
cr . execute ( ' SELECT * FROM ir_model_fields WHERE model= %s AND state= %s ' , ( self . _name , ' manual ' ) )
2008-07-22 14:24:36 +00:00
for field in cr . dictfetchall ( ) :
if field [ ' name ' ] in self . _columns :
continue
attrs = {
' string ' : field [ ' field_description ' ] ,
' required ' : bool ( field [ ' required ' ] ) ,
' readonly ' : bool ( field [ ' readonly ' ] ) ,
' domain ' : field [ ' domain ' ] or None ,
' size ' : field [ ' size ' ] ,
' ondelete ' : field [ ' on_delete ' ] ,
' translate ' : ( field [ ' translate ' ] ) ,
#'select': int(field['select_level'])
}
2008-12-19 13:07:58 +00:00
2008-07-22 14:24:36 +00:00
if field [ ' ttype ' ] == ' selection ' :
self . _columns [ field [ ' name ' ] ] = getattr ( fields , field [ ' ttype ' ] ) ( eval ( field [ ' selection ' ] ) , * * attrs )
2008-12-19 13:07:58 +00:00
elif field [ ' ttype ' ] == ' reference ' :
self . _columns [ field [ ' name ' ] ] = getattr ( fields , field [ ' ttype ' ] ) ( selection = eval ( field [ ' selection ' ] ) , * * attrs )
2008-07-22 14:24:36 +00:00
elif field [ ' ttype ' ] == ' many2one ' :
self . _columns [ field [ ' name ' ] ] = getattr ( fields , field [ ' ttype ' ] ) ( field [ ' relation ' ] , * * attrs )
2008-10-23 13:28:28 +00:00
elif field [ ' ttype ' ] == ' one2many ' :
self . _columns [ field [ ' name ' ] ] = getattr ( fields , field [ ' ttype ' ] ) ( field [ ' relation ' ] , field [ ' relation_field ' ] , * * attrs )
elif field [ ' ttype ' ] == ' many2many ' :
import random
_rel1 = field [ ' relation ' ] . replace ( ' . ' , ' _ ' )
_rel2 = field [ ' model ' ] . replace ( ' . ' , ' _ ' )
2009-03-06 09:15:39 +00:00
_rel_name = ' x_ %s _ %s _ %s _rel ' % ( _rel1 , _rel2 , field [ ' name ' ] )
2008-10-23 13:28:28 +00:00
self . _columns [ field [ ' name ' ] ] = getattr ( fields , field [ ' ttype ' ] ) ( field [ ' relation ' ] , _rel_name , ' id1 ' , ' id2 ' , * * attrs )
2008-07-22 14:24:36 +00:00
else :
self . _columns [ field [ ' name ' ] ] = getattr ( fields , field [ ' ttype ' ] ) ( * * attrs )
self . _inherits_reload ( )
if not self . _sequence :
self . _sequence = self . _table + ' _id_seq '
for k in self . _defaults :
assert ( k in self . _columns ) or ( k in self . _inherit_fields ) , ' Default function defined in %s but field %s does not exist ! ' % ( self . _name , k , )
for f in self . _columns :
self . _columns [ f ] . restart ( )
def default_get ( self , cr , uid , fields_list , context = None ) :
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
value = { }
# get the default values for the inherited fields
for t in self . _inherits . keys ( ) :
value . update ( self . pool . get ( t ) . default_get ( cr , uid , fields_list ,
context ) )
# get the default values defined in the object
for f in fields_list :
if f in self . _defaults :
value [ f ] = self . _defaults [ f ] ( self , cr , uid , context )
fld_def = ( ( f in self . _columns ) and self . _columns [ f ] ) \
2008-08-12 14:44:56 +00:00
or ( ( f in self . _inherit_fields ) and self . _inherit_fields [ f ] [ 2 ] ) \
2008-07-22 14:24:36 +00:00
or False
if isinstance ( fld_def , fields . property ) :
property_obj = self . pool . get ( ' ir.property ' )
definition_id = fld_def . _field_get ( cr , uid , self . _name , f )
nid = property_obj . search ( cr , uid , [ ( ' fields_id ' , ' = ' ,
definition_id ) , ( ' res_id ' , ' = ' , False ) ] )
if nid :
prop_value = property_obj . browse ( cr , uid , nid [ 0 ] ,
context = context ) . value
value [ f ] = ( prop_value and int ( prop_value . split ( ' , ' ) [ 1 ] ) ) \
or False
# get the default values set by the user and override the default
# values defined in the object
ir_values_obj = self . pool . get ( ' ir.values ' )
res = ir_values_obj . get ( cr , uid , ' default ' , False , [ self . _name ] )
for id , field , field_value in res :
if field in fields_list :
fld_def = ( field in self . _columns ) and self . _columns [ field ] or self . _inherit_fields [ field ] [ 2 ]
if fld_def . _type in ( ' many2one ' , ' one2one ' ) :
obj = self . pool . get ( fld_def . _obj )
if not obj . search ( cr , uid , [ ( ' id ' , ' = ' , field_value ) ] ) :
continue
if fld_def . _type in ( ' many2many ' ) :
obj = self . pool . get ( fld_def . _obj )
field_value2 = [ ]
for i in range ( len ( field_value ) ) :
if not obj . search ( cr , uid , [ ( ' id ' , ' = ' ,
field_value [ i ] ) ] ) :
continue
field_value2 . append ( field_value [ i ] )
field_value = field_value2
if fld_def . _type in ( ' one2many ' ) :
obj = self . pool . get ( fld_def . _obj )
field_value2 = [ ]
for i in range ( len ( field_value ) ) :
field_value2 . append ( { } )
for field2 in field_value [ i ] :
if obj . _columns [ field2 ] . _type in ( ' many2one ' , ' one2one ' ) :
obj2 = self . pool . get ( obj . _columns [ field2 ] . _obj )
if not obj2 . search ( cr , uid ,
[ ( ' id ' , ' = ' , field_value [ i ] [ field2 ] ) ] ) :
continue
# TODO add test for many2many and one2many
field_value2 [ i ] [ field2 ] = field_value [ i ] [ field2 ]
field_value = field_value2
value [ field ] = field_value
2008-10-31 14:10:48 +00:00
for key in context or { } :
if key . startswith ( ' default_ ' ) :
value [ key [ 8 : ] ] = context [ key ]
2008-07-22 14:24:36 +00:00
return value
#
# Update objects that uses this one to update their _inherits fields
#
def _inherits_reload_src ( self ) :
for obj in self . pool . obj_pool . values ( ) :
if self . _name in obj . _inherits :
obj . _inherits_reload ( )
def _inherits_reload ( self ) :
res = { }
for table in self . _inherits :
res . update ( self . pool . get ( table ) . _inherit_fields )
for col in self . pool . get ( table ) . _columns . keys ( ) :
2008-08-12 14:44:56 +00:00
res [ col ] = ( table , self . _inherits [ table ] , self . pool . get ( table ) . _columns [ col ] )
2008-07-22 14:24:36 +00:00
for col in self . pool . get ( table ) . _inherit_fields . keys ( ) :
2008-08-12 14:44:56 +00:00
res [ col ] = ( table , self . _inherits [ table ] , self . pool . get ( table ) . _inherit_fields [ col ] [ 2 ] )
self . _inherit_fields = res
2008-07-22 14:24:36 +00:00
self . _inherits_reload_src ( )
def fields_get ( self , cr , user , fields = None , context = None ) :
2009-08-19 15:18:20 +00:00
ira = self . pool . get ( ' ir.model.access ' )
read_access = ira . check ( cr , user , self . _name , ' write ' , raise_exception = False , context = context ) or \
ira . check ( cr , user , self . _name , ' create ' , raise_exception = False , context = context )
2008-07-22 14:24:36 +00:00
return super ( orm , self ) . fields_get ( cr , user , fields , context , read_access )
def read ( self , cr , user , ids , fields = None , context = None , load = ' _classic_read ' ) :
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2009-08-10 16:24:09 +00:00
self . pool . get ( ' ir.model.access ' ) . check ( cr , user , self . _name , ' read ' , context = context )
2008-07-22 14:24:36 +00:00
if not fields :
fields = self . _columns . keys ( ) + self . _inherit_fields . keys ( )
select = ids
if isinstance ( ids , ( int , long ) ) :
select = [ ids ]
2008-08-12 14:44:56 +00:00
result = self . _read_flat ( cr , user , select , fields , context , load )
2008-07-22 14:24:36 +00:00
for r in result :
2008-08-12 14:44:56 +00:00
for key , v in r . items ( ) :
2008-07-22 14:24:36 +00:00
if v == None :
2008-08-12 14:44:56 +00:00
r [ key ] = False
2008-07-22 14:24:36 +00:00
if isinstance ( ids , ( int , long ) ) :
2008-12-05 12:41:58 +00:00
return result and result [ 0 ] or False
2008-07-22 14:24:36 +00:00
return result
2008-08-29 13:08:14 +00:00
def _read_flat ( self , cr , user , ids , fields_to_read , context = None , load = ' _classic_read ' ) :
2008-07-22 14:24:36 +00:00
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
if not ids :
return [ ]
2008-08-29 13:08:14 +00:00
if fields_to_read == None :
fields_to_read = self . _columns . keys ( )
2008-07-22 14:24:36 +00:00
# construct a clause for the rules :
d1 , d2 = self . pool . get ( ' ir.rule ' ) . domain_get ( cr , user , self . _name )
# all inherited fields + all non inherited fields for which the attribute whose name is in load is True
2009-01-15 11:57:18 +00:00
fields_pre = [ f for f in fields_to_read if
2009-02-14 05:35:17 +00:00
f == self . CONCURRENCY_CHECK_FIELD
2009-01-15 11:57:18 +00:00
or ( f in self . _columns and getattr ( self . _columns [ f ] , ' _classic_write ' ) )
] + self . _inherits . values ( )
2008-07-22 14:24:36 +00:00
res = [ ]
2008-08-12 14:44:56 +00:00
if len ( fields_pre ) :
2008-08-29 13:08:14 +00:00
def convert_field ( f ) :
if f in ( ' create_date ' , ' write_date ' ) :
return " date_trunc( ' second ' , %s ) as %s " % ( f , f )
2009-01-15 11:57:18 +00:00
if f == self . CONCURRENCY_CHECK_FIELD :
if self . _log_access :
return " COALESCE(write_date, create_date, now())::timestamp AS %s " % ( f , )
return " now()::timestamp AS %s " % ( f , )
2008-09-23 10:47:45 +00:00
if isinstance ( self . _columns [ f ] , fields . binary ) and context . get ( ' bin_size ' , False ) :
2009-02-12 10:04:03 +00:00
return ' length( " %s " ) as " %s " ' % ( f , f )
2008-08-29 13:08:14 +00:00
return ' " %s " ' % ( f , )
fields_pre2 = map ( convert_field , fields_pre )
2008-08-05 08:39:45 +00:00
for i in range ( 0 , len ( ids ) , cr . IN_MAX ) :
sub_ids = ids [ i : i + cr . IN_MAX ]
2008-07-22 14:24:36 +00:00
if d1 :
cr . execute ( ' SELECT %s FROM \" %s \" WHERE id IN ( %s ) AND %s ORDER BY %s ' % \
( ' , ' . join ( fields_pre2 + [ ' id ' ] ) , self . _table ,
' , ' . join ( [ str ( x ) for x in sub_ids ] ) , d1 ,
2008-08-12 14:44:56 +00:00
self . _order ) , d2 )
2008-07-22 14:24:36 +00:00
if not cr . rowcount == len ( { } . fromkeys ( sub_ids ) ) :
raise except_orm ( _ ( ' AccessError ' ) ,
_ ( ' You try to bypass an access rule (Document type: %s ). ' ) % self . _description )
else :
cr . execute ( ' SELECT %s FROM \" %s \" WHERE id IN ( %s ) ORDER BY %s ' % \
( ' , ' . join ( fields_pre2 + [ ' id ' ] ) , self . _table ,
' , ' . join ( [ str ( x ) for x in sub_ids ] ) ,
self . _order ) )
res . extend ( cr . dictfetchall ( ) )
else :
res = map ( lambda x : { ' id ' : x } , ids )
for f in fields_pre :
2009-01-15 11:57:18 +00:00
if f == self . CONCURRENCY_CHECK_FIELD :
continue
2008-07-22 14:24:36 +00:00
if self . _columns [ f ] . translate :
ids = map ( lambda x : x [ ' id ' ] , res )
res_trans = self . pool . get ( ' ir.translation ' ) . _get_ids ( cr , user , self . _name + ' , ' + f , ' model ' , context . get ( ' lang ' , False ) or ' en_US ' , ids )
for r in res :
r [ f ] = res_trans . get ( r [ ' id ' ] , False ) or r [ f ]
for table in self . _inherits :
col = self . _inherits [ table ]
2008-09-03 10:36:26 +00:00
cols = intersect ( self . _inherit_fields . keys ( ) , fields_to_read )
2008-07-22 14:24:36 +00:00
if not cols :
continue
res2 = self . pool . get ( table ) . read ( cr , user , [ x [ col ] for x in res ] , cols , context , load )
res3 = { }
for r in res2 :
res3 [ r [ ' id ' ] ] = r
del r [ ' id ' ]
for record in res :
record . update ( res3 [ record [ col ] ] )
2008-09-03 11:14:29 +00:00
if col not in fields_to_read :
2008-07-22 14:24:36 +00:00
del record [ col ]
# all fields which need to be post-processed by a simple function (symbol_get)
2008-08-29 13:08:14 +00:00
fields_post = filter ( lambda x : x in self . _columns and self . _columns [ x ] . _symbol_get , fields_to_read )
2008-07-22 14:24:36 +00:00
if fields_post :
# maybe it would be faster to iterate on the fields then on res, so that we wouldn't need
# to get the _symbol_get in each occurence
for r in res :
for f in fields_post :
2008-12-09 13:35:40 +00:00
r [ f ] = self . _columns [ f ] . _symbol_get ( r [ f ] )
2008-07-22 14:24:36 +00:00
ids = map ( lambda x : x [ ' id ' ] , res )
# all non inherited fields for which the attribute whose name is in load is False
2008-08-29 13:08:14 +00:00
fields_post = filter ( lambda x : x in self . _columns and not getattr ( self . _columns [ x ] , load ) , fields_to_read )
2008-08-17 18:28:29 +00:00
# Compute POST fields
todo = { }
2008-07-22 14:24:36 +00:00
for f in fields_post :
2008-08-17 18:28:29 +00:00
todo . setdefault ( self . _columns [ f ] . _multi , [ ] )
todo [ self . _columns [ f ] . _multi ] . append ( f )
for key , val in todo . items ( ) :
if key :
res2 = self . _columns [ val [ 0 ] ] . get ( cr , self , ids , val , user , context = context , values = res )
2008-09-07 23:25:24 +00:00
for pos in val :
2008-08-17 18:28:29 +00:00
for record in res :
2008-09-07 23:25:24 +00:00
record [ pos ] = res2 [ record [ ' id ' ] ] [ pos ]
2008-08-17 18:28:29 +00:00
else :
for f in val :
res2 = self . _columns [ f ] . get ( cr , self , ids , f , user , context = context , values = res )
for record in res :
record [ f ] = res2 [ record [ ' id ' ] ]
#for f in fields_post:
# # get the value of that field for all records/ids
# res2 = self._columns[f].get(cr, self, ids, f, user, context=context, values=res)
# for record in res:
# record[f] = res2[record['id']]
2008-07-22 14:24:36 +00:00
readonly = None
for vals in res :
for field in vals . copy ( ) :
fobj = None
if field in self . _columns :
fobj = self . _columns [ field ]
if not fobj :
continue
groups = fobj . read
if groups :
edit = False
for group in groups :
module = group . split ( " . " ) [ 0 ]
grp = group . split ( " . " ) [ 1 ]
cr . execute ( " select count(*) from res_groups_users_rel where gid in (select res_id from ir_model_data where name= ' %s ' and module= ' %s ' and model= ' %s ' ) and uid= %s " % \
( grp , module , ' res.groups ' , user ) )
readonly = cr . fetchall ( )
if readonly [ 0 ] [ 0 ] > = 1 :
edit = True
break
elif readonly [ 0 ] [ 0 ] == 0 :
edit = False
else :
edit = False
if not edit :
if type ( vals [ field ] ) == type ( [ ] ) :
vals [ field ] = [ ]
elif type ( vals [ field ] ) == type ( 0.0 ) :
vals [ field ] = 0
elif type ( vals [ field ] ) == type ( ' ' ) :
vals [ field ] = ' =No Permission= '
else :
vals [ field ] = False
return res
def perm_read ( self , cr , user , ids , context = None , details = True ) :
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
if not ids :
return [ ]
fields = ' '
if self . _log_access :
2008-08-12 14:44:56 +00:00
fields = ' , u.create_uid, u.create_date, u.write_uid, u.write_date '
2008-07-22 14:24:36 +00:00
if isinstance ( ids , ( int , long ) ) :
ids_str = str ( ids )
else :
2008-08-12 14:44:56 +00:00
ids_str = string . join ( map ( lambda x : str ( x ) , ids ) , ' , ' )
2008-07-22 14:24:36 +00:00
cr . execute ( ' select u.id ' + fields + ' from " ' + self . _table + ' " u where u.id in ( ' + ids_str + ' ) ' )
res = cr . dictfetchall ( )
for r in res :
for key in r :
r [ key ] = r [ key ] or False
2008-08-12 14:44:56 +00:00
if key in ( ' write_uid ' , ' create_uid ' , ' uid ' ) and details :
2008-07-22 14:24:36 +00:00
if r [ key ] :
r [ key ] = self . pool . get ( ' res.users ' ) . name_get ( cr , user , [ r [ key ] ] ) [ 0 ]
if isinstance ( ids , ( int , long ) ) :
return res [ ids ]
return res
2009-01-15 11:57:18 +00:00
def _check_concurrency ( self , cr , ids , context ) :
2008-07-22 14:24:36 +00:00
if not context :
2009-01-15 11:57:18 +00:00
return
if context . get ( self . CONCURRENCY_CHECK_FIELD ) and self . _log_access :
2009-01-21 17:37:28 +00:00
def key ( oid ) :
return " %s , %s " % ( self . _name , oid )
2009-01-15 11:57:18 +00:00
santa = " (id = %s AND %s < COALESCE(write_date, create_date, now())::timestamp) "
for i in range ( 0 , len ( ids ) , cr . IN_MAX ) :
2009-02-14 05:35:17 +00:00
sub_ids = tools . flatten ( ( ( oid , context [ self . CONCURRENCY_CHECK_FIELD ] [ key ( oid ) ] )
for oid in ids [ i : i + cr . IN_MAX ]
2009-01-21 17:37:28 +00:00
if key ( oid ) in context [ self . CONCURRENCY_CHECK_FIELD ] ) )
2009-01-15 11:57:18 +00:00
if sub_ids :
cr . execute ( " SELECT count(1) FROM %s WHERE %s " % ( self . _table , " OR " . join ( [ santa ] * ( len ( sub_ids ) / 2 ) ) ) , sub_ids )
res = cr . fetchone ( )
if res and res [ 0 ] :
raise except_orm ( ' ConcurrencyException ' , _ ( ' Records were modified in the meanwhile ' ) )
def unlink ( self , cr , uid , ids , context = None ) :
2008-07-22 14:24:36 +00:00
if not ids :
return True
if isinstance ( ids , ( int , long ) ) :
ids = [ ids ]
2008-12-13 12:42:04 +00:00
result_store = self . _store_get_values ( cr , uid , ids , None , context )
2009-02-14 05:35:17 +00:00
2009-01-15 11:57:18 +00:00
self . _check_concurrency ( cr , ids , context )
2008-07-22 14:24:36 +00:00
2009-08-10 16:24:09 +00:00
self . pool . get ( ' ir.model.access ' ) . check ( cr , uid , self . _name , ' unlink ' , context = context )
2008-07-22 14:24:36 +00:00
2009-08-10 16:04:02 +00:00
properties = self . pool . get ( ' ir.property ' )
domain = [ ( ' res_id ' , ' = ' , False ) ,
( ' value ' , ' in ' , [ ' %s , %s ' % ( self . _name , i ) for i in ids ] ) ,
]
if properties . search ( cr , uid , domain , context = context ) :
raise except_orm ( _ ( ' Error ' ) , _ ( ' Unable to delete this document because it is used as a default property ' ) )
2008-07-22 14:24:36 +00:00
wf_service = netsvc . LocalService ( " workflow " )
2009-08-10 16:04:02 +00:00
for oid in ids :
wf_service . trg_delete ( uid , self . _name , oid , cr )
2008-07-22 14:24:36 +00:00
#cr.execute('select * from '+self._table+' where id in ('+str_d+')', ids)
#res = cr.dictfetchall()
#for key in self._inherits:
# ids2 = [x[self._inherits[key]] for x in res]
# self.pool.get(key).unlink(cr, uid, ids2)
d1 , d2 = self . pool . get ( ' ir.rule ' ) . domain_get ( cr , uid , self . _name )
if d1 :
d1 = ' AND ' + d1
2008-08-05 08:39:45 +00:00
for i in range ( 0 , len ( ids ) , cr . IN_MAX ) :
sub_ids = ids [ i : i + cr . IN_MAX ]
2008-12-09 13:35:40 +00:00
str_d = string . join ( ( ' %s ' , ) * len ( sub_ids ) , ' , ' )
2008-07-22 14:24:36 +00:00
if d1 :
cr . execute ( ' SELECT id FROM " ' + self . _table + ' " ' \
' WHERE id IN ( ' + str_d + ' ) ' + d1 , sub_ids + d2 )
2009-08-10 16:04:02 +00:00
if not cr . rowcount == len ( sub_ids ) :
2008-07-22 14:24:36 +00:00
raise except_orm ( _ ( ' AccessError ' ) ,
_ ( ' You try to bypass an access rule (Document type: %s ). ' ) % \
self . _description )
if d1 :
cr . execute ( ' delete from " ' + self . _table + ' " ' \
' where id in ( ' + str_d + ' ) ' + d1 , sub_ids + d2 )
else :
cr . execute ( ' delete from " ' + self . _table + ' " ' \
' where id in ( ' + str_d + ' ) ' , sub_ids )
2009-08-10 16:04:02 +00:00
for order , object , store_ids , fields in result_store :
2008-12-16 10:36:20 +00:00
if object < > self . _name :
2009-02-27 06:43:00 +00:00
obj = self . pool . get ( object )
2009-08-10 16:04:02 +00:00
cr . execute ( ' select id from ' + obj . _table + ' where id in ( ' + ' , ' . join ( map ( str , store_ids ) ) + ' ) ' )
rids = map ( lambda x : x [ 0 ] , cr . fetchall ( ) )
if rids :
obj . _store_set_values ( cr , uid , rids , fields , context )
2008-07-22 14:24:36 +00:00
return True
#
# TODO: Validate
#
def write ( self , cr , user , ids , vals , context = None ) :
readonly = None
for field in vals . copy ( ) :
fobj = None
if field in self . _columns :
fobj = self . _columns [ field ]
else :
fobj = self . _inherit_fields [ field ] [ 2 ]
if not fobj :
continue
groups = fobj . write
if groups :
edit = False
for group in groups :
module = group . split ( " . " ) [ 0 ]
grp = group . split ( " . " ) [ 1 ]
cr . execute ( " select count(*) from res_groups_users_rel where gid in (select res_id from ir_model_data where name= ' %s ' and module= ' %s ' and model= ' %s ' ) and uid= %s " % \
( grp , module , ' res.groups ' , user ) )
readonly = cr . fetchall ( )
if readonly [ 0 ] [ 0 ] > = 1 :
edit = True
break
elif readonly [ 0 ] [ 0 ] == 0 :
edit = False
else :
edit = False
if not edit :
vals . pop ( field )
2009-08-20 15:29:21 +00:00
2008-07-22 14:24:36 +00:00
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
if not ids :
return True
if isinstance ( ids , ( int , long ) ) :
ids = [ ids ]
2009-01-15 11:57:18 +00:00
self . _check_concurrency ( cr , ids , context )
2008-07-22 14:24:36 +00:00
2009-08-10 16:24:09 +00:00
self . pool . get ( ' ir.model.access ' ) . check ( cr , user , self . _name , ' write ' , context = context )
2008-07-22 14:24:36 +00:00
2009-08-20 15:29:21 +00:00
2008-08-12 14:44:56 +00:00
upd0 = [ ]
upd1 = [ ]
upd_todo = [ ]
updend = [ ]
2008-07-22 14:24:36 +00:00
direct = [ ]
totranslate = context . get ( ' lang ' , False ) and ( context [ ' lang ' ] != ' en_US ' )
for field in vals :
if field in self . _columns :
2009-01-02 14:09:03 +00:00
if self . _columns [ field ] . _classic_write and not ( hasattr ( self . _columns [ field ] , ' _fnct_inv ' ) ) :
2008-07-22 14:24:36 +00:00
if ( not totranslate ) or not self . _columns [ field ] . translate :
upd0 . append ( ' " ' + field + ' " = ' + self . _columns [ field ] . _symbol_set [ 0 ] )
upd1 . append ( self . _columns [ field ] . _symbol_set [ 1 ] ( vals [ field ] ) )
direct . append ( field )
else :
upd_todo . append ( field )
else :
updend . append ( field )
if field in self . _columns \
and hasattr ( self . _columns [ field ] , ' selection ' ) \
and vals [ field ] :
if self . _columns [ field ] . _type == ' reference ' :
val = vals [ field ] . split ( ' , ' ) [ 0 ]
else :
val = vals [ field ]
if isinstance ( self . _columns [ field ] . selection , ( tuple , list ) ) :
if val not in dict ( self . _columns [ field ] . selection ) :
raise except_orm ( _ ( ' ValidateError ' ) ,
_ ( ' The value " %s " for the field " %s " is not in the selection ' ) \
% ( vals [ field ] , field ) )
else :
if val not in dict ( self . _columns [ field ] . selection (
self , cr , user , context = context ) ) :
raise except_orm ( _ ( ' ValidateError ' ) ,
_ ( ' The value " %s " for the field " %s " is not in the selection ' ) \
% ( vals [ field ] , field ) )
if self . _log_access :
2008-12-09 13:35:40 +00:00
upd0 . append ( ' write_uid= %s ' )
2008-07-22 14:24:36 +00:00
upd0 . append ( ' write_date=now() ' )
upd1 . append ( user )
if len ( upd0 ) :
d1 , d2 = self . pool . get ( ' ir.rule ' ) . domain_get ( cr , user , self . _name )
if d1 :
d1 = ' and ' + d1
2008-08-05 08:39:45 +00:00
for i in range ( 0 , len ( ids ) , cr . IN_MAX ) :
sub_ids = ids [ i : i + cr . IN_MAX ]
2008-08-12 14:44:56 +00:00
ids_str = string . join ( map ( str , sub_ids ) , ' , ' )
2008-07-22 14:24:36 +00:00
if d1 :
cr . execute ( ' SELECT id FROM " ' + self . _table + ' " ' \
' WHERE id IN ( ' + ids_str + ' ) ' + d1 , d2 )
if not cr . rowcount == len ( { } . fromkeys ( sub_ids ) ) :
raise except_orm ( _ ( ' AccessError ' ) ,
_ ( ' You try to bypass an access rule (Document type: %s ). ' ) % \
self . _description )
else :
cr . execute ( ' SELECT id FROM " ' + self . _table + ' " WHERE id IN ( ' + ids_str + ' ) ' )
if not cr . rowcount == len ( { } . fromkeys ( sub_ids ) ) :
raise except_orm ( _ ( ' AccessError ' ) ,
_ ( ' You try to write on an record that doesn \' t exist ' \
' (Document type: %s ). ' ) % self . _description )
if d1 :
2008-08-12 14:44:56 +00:00
cr . execute ( ' update " ' + self . _table + ' " set ' + string . join ( upd0 , ' , ' ) + ' ' \
2008-07-22 14:24:36 +00:00
' where id in ( ' + ids_str + ' ) ' + d1 , upd1 + d2 )
else :
2008-08-12 14:44:56 +00:00
cr . execute ( ' update " ' + self . _table + ' " set ' + string . join ( upd0 , ' , ' ) + ' ' \
2008-07-22 14:24:36 +00:00
' where id in ( ' + ids_str + ' ) ' , upd1 )
2009-07-01 07:57:25 +00:00
2008-07-22 14:24:36 +00:00
if totranslate :
for f in direct :
if self . _columns [ f ] . translate :
2009-07-01 07:57:25 +00:00
src_trans = self . pool . get ( self . _name ) . read ( cr , user , ids , [ f ] )
self . pool . get ( ' ir.translation ' ) . _set_ids ( cr , user , self . _name + ' , ' + f , ' model ' , context [ ' lang ' ] , ids , vals [ f ] , src_trans [ 0 ] [ f ] )
2008-07-22 14:24:36 +00:00
2009-08-20 15:29:21 +00:00
2008-07-22 14:24:36 +00:00
# call the 'set' method of fields which are not classic_write
2008-08-12 14:44:56 +00:00
upd_todo . sort ( lambda x , y : self . _columns [ x ] . priority - self . _columns [ y ] . priority )
2009-06-17 13:00:53 +00:00
2009-08-04 10:46:50 +00:00
# default element in context must be removed when call a one2many or many2many
2009-06-19 05:26:25 +00:00
rel_context = context . copy ( )
2009-06-17 13:00:53 +00:00
for c in context . items ( ) :
if c [ 0 ] . startswith ( ' default_ ' ) :
del rel_context [ c [ 0 ] ]
2008-07-22 14:24:36 +00:00
for field in upd_todo :
for id in ids :
2009-06-17 13:00:53 +00:00
self . _columns [ field ] . set ( cr , self , id , field , vals [ field ] , user , context = rel_context )
2008-07-22 14:24:36 +00:00
for table in self . _inherits :
col = self . _inherits [ table ]
nids = [ ]
2008-08-05 08:39:45 +00:00
for i in range ( 0 , len ( ids ) , cr . IN_MAX ) :
sub_ids = ids [ i : i + cr . IN_MAX ]
2008-08-12 14:44:56 +00:00
ids_str = string . join ( map ( str , sub_ids ) , ' , ' )
2008-07-22 14:24:36 +00:00
cr . execute ( ' select distinct " ' + col + ' " from " ' + self . _table + ' " ' \
' where id in ( ' + ids_str + ' ) ' , upd1 )
nids . extend ( [ x [ 0 ] for x in cr . fetchall ( ) ] )
v = { }
for val in updend :
2008-08-12 14:44:56 +00:00
if self . _inherit_fields [ val ] [ 0 ] == table :
v [ val ] = vals [ val ]
2008-07-22 14:24:36 +00:00
self . pool . get ( table ) . write ( cr , user , nids , v , context )
self . _validate ( cr , user , ids , context )
2008-08-13 10:47:38 +00:00
# TODO: use _order to set dest at the right position and not first node of parent
2008-08-17 18:28:29 +00:00
if self . _parent_store and ( self . _parent_name in vals ) :
if self . pool . _init :
self . pool . _init_parent [ self . _name ] = True
2008-08-13 10:47:38 +00:00
else :
2009-02-05 00:01:31 +00:00
for id in ids :
2009-02-26 20:17:45 +00:00
# Find Position of the element
2009-02-05 00:01:31 +00:00
if vals [ self . _parent_name ] :
2009-02-05 17:15:31 +00:00
cr . execute ( ' select parent_left,parent_right,id from ' + self . _table + ' where ' + self . _parent_name + ' = %s order by ' + ( self . _parent_order or self . _order ) , ( vals [ self . _parent_name ] , ) )
2008-08-17 18:28:29 +00:00
else :
2009-02-26 20:17:45 +00:00
cr . execute ( ' select parent_left,parent_right,id from ' + self . _table + ' where ' + self . _parent_name + ' is null order by ' + ( self . _parent_order or self . _order ) )
result_p = cr . fetchall ( )
position = None
for ( pleft , pright , pid ) in result_p :
if pid == id :
break
position = pright + 1
# It's the first node of the parent: position = parent_left+1
if not position :
2009-04-28 10:24:21 +00:00
if not vals [ self . _parent_name ] :
2009-02-26 20:17:45 +00:00
position = 1
2009-02-05 00:01:31 +00:00
else :
2009-02-26 20:17:45 +00:00
cr . execute ( ' select parent_left from ' + self . _table + ' where id= %s ' , ( vals [ self . _parent_name ] , ) )
position = cr . fetchone ( ) [ 0 ] + 1
# We have the new position !
cr . execute ( ' select parent_left,parent_right from ' + self . _table + ' where id= %s ' , ( id , ) )
pleft , pright = cr . fetchone ( )
distance = pright - pleft + 1
if position > pleft and position < = pright :
raise except_orm ( _ ( ' UserError ' ) , _ ( ' Recursivity Detected. ' ) )
if pleft < position :
cr . execute ( ' update ' + self . _table + ' set parent_left=parent_left+ %s where parent_left>= %s ' , ( distance , position ) )
cr . execute ( ' update ' + self . _table + ' set parent_right=parent_right+ %s where parent_right>= %s ' , ( distance , position ) )
cr . execute ( ' update ' + self . _table + ' set parent_left=parent_left+ %s , parent_right=parent_right+ %s where parent_left>= %s and parent_left< %s ' , ( position - pleft , position - pleft , pleft , pright ) )
else :
cr . execute ( ' update ' + self . _table + ' set parent_left=parent_left+ %s where parent_left>= %s ' , ( distance , position ) )
cr . execute ( ' update ' + self . _table + ' set parent_right=parent_right+ %s where parent_right>= %s ' , ( distance , position ) )
cr . execute ( ' update ' + self . _table + ' set parent_left=parent_left- %s , parent_right=parent_right- %s where parent_left>= %s and parent_left< %s ' , ( pleft - position + distance , pleft - position + distance , pleft + distance , pright + distance ) )
2008-07-22 14:24:36 +00:00
2008-12-13 06:01:18 +00:00
result = self . _store_get_values ( cr , user , ids , vals . keys ( ) , context )
for order , object , ids , fields in result :
self . pool . get ( object ) . _store_set_values ( cr , user , ids , fields , context )
2008-07-22 14:24:36 +00:00
wf_service = netsvc . LocalService ( " workflow " )
for id in ids :
wf_service . trg_write ( user , self . _name , id , cr )
return True
#
# TODO: Should set perm to user.xxx
#
def create ( self , cr , user , vals , context = None ) :
""" create(cr, user, vals, context) -> int
cr = database cursor
user = user id
vals = dictionary of the form { ' field_name ' : field_value , . . . }
"""
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2009-08-10 16:24:09 +00:00
self . pool . get ( ' ir.model.access ' ) . check ( cr , user , self . _name , ' create ' , context = context )
2008-07-22 14:24:36 +00:00
default = [ ]
avoid_table = [ ]
2008-08-12 14:44:56 +00:00
for ( t , c ) in self . _inherits . items ( ) :
2008-07-22 14:24:36 +00:00
if c in vals :
avoid_table . append ( t )
2009-08-19 15:33:49 +00:00
for f in self . _columns . keys ( ) :
2009-08-19 13:39:10 +00:00
if ( not f in vals ) and ( not isinstance ( self . _columns [ f ] , fields . property ) ) :
2008-07-22 14:24:36 +00:00
default . append ( f )
2009-01-15 17:41:03 +00:00
2008-07-22 14:24:36 +00:00
for f in self . _inherit_fields . keys ( ) :
2009-08-19 15:33:49 +00:00
if ( not f in vals ) and ( self . _inherit_fields [ f ] [ 0 ] not in avoid_table ) and ( not isinstance ( self . _inherit_fields [ f ] [ 2 ] , fields . property ) ) :
2008-07-22 14:24:36 +00:00
default . append ( f )
if len ( default ) :
2009-01-28 15:42:52 +00:00
default_values = self . default_get ( cr , user , default , context )
for dv in default_values :
if dv in self . _columns and self . _columns [ dv ] . _type == ' many2many ' :
if default_values [ dv ] and isinstance ( default_values [ dv ] [ 0 ] , ( int , long ) ) :
default_values [ dv ] = [ ( 6 , 0 , default_values [ dv ] ) ]
vals . update ( default_values )
2008-07-22 14:24:36 +00:00
tocreate = { }
for v in self . _inherits :
if self . _inherits [ v ] not in vals :
tocreate [ v ] = { }
( upd0 , upd1 , upd2 ) = ( ' ' , ' ' , [ ] )
upd_todo = [ ]
for v in vals . keys ( ) :
if v in self . _inherit_fields :
2008-08-12 14:44:56 +00:00
( table , col , col_detail ) = self . _inherit_fields [ v ]
2008-07-22 14:24:36 +00:00
tocreate [ table ] [ v ] = vals [ v ]
del vals [ v ]
2009-01-15 17:41:03 +00:00
2009-01-01 13:39:56 +00:00
# Try-except added to filter the creation of those records whose filds are readonly.
# Example : any dashboard which has all the fields readonly.(due to Views(database views))
2009-02-14 05:35:17 +00:00
try :
2009-01-01 13:39:56 +00:00
cr . execute ( " SELECT nextval( ' " + self . _sequence + " ' ) " )
except :
raise except_orm ( _ ( ' UserError ' ) ,
2009-02-14 05:35:17 +00:00
_ ( ' You cannot perform this operation. ' ) )
2008-07-22 14:24:36 +00:00
id_new = cr . fetchone ( ) [ 0 ]
for table in tocreate :
id = self . pool . get ( table ) . create ( cr , user , tocreate [ table ] )
upd0 + = ' , ' + self . _inherits [ table ]
2008-12-09 13:35:40 +00:00
upd1 + = ' , %s '
2008-07-22 14:24:36 +00:00
upd2 . append ( id )
2009-05-13 09:13:03 +00:00
#Start : Set bool fields to be False if they are not touched(to make search more powerful)
bool_fields = [ x for x in self . _columns . keys ( ) if self . _columns [ x ] . _type == ' boolean ' ]
for bool_field in bool_fields :
if bool_field not in vals :
vals [ bool_field ] = False
#End
2008-07-22 14:24:36 +00:00
for field in vals :
2009-08-04 10:46:50 +00:00
if field in self . _columns :
if self . _columns [ field ] . _classic_write :
upd0 = upd0 + ' , " ' + field + ' " '
upd1 = upd1 + ' , ' + self . _columns [ field ] . _symbol_set [ 0 ]
upd2 . append ( self . _columns [ field ] . _symbol_set [ 1 ] ( vals [ field ] ) )
else :
upd_todo . append ( field )
2008-07-22 14:24:36 +00:00
if field in self . _columns \
and hasattr ( self . _columns [ field ] , ' selection ' ) \
and vals [ field ] :
if self . _columns [ field ] . _type == ' reference ' :
val = vals [ field ] . split ( ' , ' ) [ 0 ]
else :
val = vals [ field ]
if isinstance ( self . _columns [ field ] . selection , ( tuple , list ) ) :
if val not in dict ( self . _columns [ field ] . selection ) :
raise except_orm ( _ ( ' ValidateError ' ) ,
_ ( ' The value " %s " for the field " %s " is not in the selection ' ) \
% ( vals [ field ] , field ) )
else :
if val not in dict ( self . _columns [ field ] . selection (
self , cr , user , context = context ) ) :
raise except_orm ( _ ( ' ValidateError ' ) ,
_ ( ' The value " %s " for the field " %s " is not in the selection ' ) \
% ( vals [ field ] , field ) )
if self . _log_access :
upd0 + = ' ,create_uid,create_date '
2008-12-09 13:35:40 +00:00
upd1 + = ' , %s ,now() '
2008-07-22 14:24:36 +00:00
upd2 . append ( user )
cr . execute ( ' insert into " ' + self . _table + ' " (id ' + upd0 + " ) values ( " + str ( id_new ) + upd1 + ' ) ' , tuple ( upd2 ) )
2008-08-12 14:44:56 +00:00
upd_todo . sort ( lambda x , y : self . _columns [ x ] . priority - self . _columns [ y ] . priority )
2008-07-22 14:24:36 +00:00
2008-08-13 10:47:38 +00:00
if self . _parent_store :
2008-08-17 18:28:29 +00:00
if self . pool . _init :
self . pool . _init_parent [ self . _name ] = True
2008-08-13 10:47:38 +00:00
else :
2008-08-17 18:28:29 +00:00
parent = vals . get ( self . _parent_name , False )
if parent :
2009-02-05 17:15:31 +00:00
cr . execute ( ' select parent_right from ' + self . _table + ' where ' + self . _parent_name + ' = %s order by ' + ( self . _parent_order or self . _order ) , ( parent , ) )
2009-02-05 00:01:31 +00:00
pleft_old = None
result_p = cr . fetchall ( )
for ( pleft , ) in result_p :
if not pleft :
break
pleft_old = pleft
if not pleft_old :
cr . execute ( ' select parent_left from ' + self . _table + ' where id= %s ' , ( parent , ) )
pleft_old = cr . fetchone ( ) [ 0 ]
pleft = pleft_old
2008-08-17 18:28:29 +00:00
else :
cr . execute ( ' select max(parent_right) from ' + self . _table )
pleft = cr . fetchone ( ) [ 0 ] or 0
2008-12-09 13:35:40 +00:00
cr . execute ( ' update ' + self . _table + ' set parent_left=parent_left+2 where parent_left> %s ' , ( pleft , ) )
cr . execute ( ' update ' + self . _table + ' set parent_right=parent_right+2 where parent_right> %s ' , ( pleft , ) )
cr . execute ( ' update ' + self . _table + ' set parent_left= %s ,parent_right= %s where id= %s ' , ( pleft + 1 , pleft + 2 , id_new ) )
2009-06-17 13:00:53 +00:00
2009-08-04 10:46:50 +00:00
# default element in context must be removed when call a one2many or many2many
2009-06-19 05:26:25 +00:00
rel_context = context . copy ( )
2009-06-17 13:00:53 +00:00
for c in context . items ( ) :
if c [ 0 ] . startswith ( ' default_ ' ) :
del rel_context [ c [ 0 ] ]
2009-08-04 10:46:50 +00:00
2009-08-20 15:29:21 +00:00
result = [ ]
2009-04-27 18:17:01 +00:00
for field in upd_todo :
2009-08-20 15:29:21 +00:00
result + = self . _columns [ field ] . set ( cr , self , id_new , field , vals [ field ] , user , rel_context ) or [ ]
2009-04-27 18:17:01 +00:00
self . _validate ( cr , user , [ id_new ] , context )
2008-08-13 10:47:38 +00:00
2009-08-20 15:29:21 +00:00
if not context . get ( ' no_store_function ' , False ) :
result + = self . _store_get_values ( cr , user , [ id_new ] , vals . keys ( ) , context )
result . sort ( )
done = [ ]
for order , object , ids , fields2 in result :
if not ( object , ids , fields2 ) in done :
self . pool . get ( object ) . _store_set_values ( cr , user , ids , fields2 , context )
done . append ( ( object , ids , fields2 ) )
2008-08-13 10:47:38 +00:00
2008-07-22 14:24:36 +00:00
wf_service = netsvc . LocalService ( " workflow " )
wf_service . trg_create ( user , self . _name , id_new , cr )
return id_new
2008-12-13 06:01:18 +00:00
def _store_get_values ( self , cr , uid , ids , fields , context ) :
result = { }
fncts = self . pool . _store_function . get ( self . _name , [ ] )
for fnct in range ( len ( fncts ) ) :
2009-08-20 15:29:21 +00:00
if fncts [ fnct ] [ 3 ] :
ok = False
for f in ( fields or [ ] ) :
if f in fncts [ fnct ] [ 3 ] :
ok = True
break
if not ok :
continue
2008-12-13 06:01:18 +00:00
result . setdefault ( fncts [ fnct ] [ 0 ] , { } )
ids2 = fncts [ fnct ] [ 2 ] ( self , cr , uid , ids , context )
for id in filter ( None , ids2 ) :
result [ fncts [ fnct ] [ 0 ] ] . setdefault ( id , [ ] )
result [ fncts [ fnct ] [ 0 ] ] [ id ] . append ( fnct )
2009-02-18 14:24:54 +00:00
dict = { }
2008-12-13 06:01:18 +00:00
for object in result :
k2 = { }
for id , fnct in result [ object ] . items ( ) :
k2 . setdefault ( tuple ( fnct ) , [ ] )
k2 [ tuple ( fnct ) ] . append ( id )
for fnct , id in k2 . items ( ) :
2009-02-18 14:24:54 +00:00
dict . setdefault ( fncts [ fnct [ 0 ] ] [ 4 ] , [ ] )
dict [ fncts [ fnct [ 0 ] ] [ 4 ] ] . append ( ( fncts [ fnct [ 0 ] ] [ 4 ] , object , id , map ( lambda x : fncts [ x ] [ 1 ] , fnct ) ) )
result2 = [ ]
tmp = dict . keys ( )
tmp . sort ( )
for k in tmp :
result2 + = dict [ k ]
2008-12-13 06:01:18 +00:00
return result2
def _store_set_values ( self , cr , uid , ids , fields , context ) :
todo = { }
2008-12-14 16:46:47 +00:00
keys = [ ]
2008-12-13 06:01:18 +00:00
for f in fields :
2008-12-14 16:46:47 +00:00
if self . _columns [ f ] . _multi not in keys :
keys . append ( self . _columns [ f ] . _multi )
2008-12-13 06:01:18 +00:00
todo . setdefault ( self . _columns [ f ] . _multi , [ ] )
todo [ self . _columns [ f ] . _multi ] . append ( f )
2008-12-14 16:46:47 +00:00
for key in keys :
val = todo [ key ]
2008-12-13 06:01:18 +00:00
if key :
result = self . _columns [ val [ 0 ] ] . get ( cr , self , ids , val , uid , context = context )
for id , value in result . items ( ) :
upd0 = [ ]
upd1 = [ ]
for v in value :
2008-12-14 16:46:47 +00:00
if v not in val :
continue
2008-12-13 06:01:18 +00:00
if self . _columns [ v ] . _type in ( ' many2one ' , ' one2one ' ) :
try :
value [ v ] = value [ v ] [ 0 ]
except :
pass
upd0 . append ( ' " ' + v + ' " = ' + self . _columns [ v ] . _symbol_set [ 0 ] )
upd1 . append ( self . _columns [ v ] . _symbol_set [ 1 ] ( value [ v ] ) )
upd1 . append ( id )
cr . execute ( ' update " ' + self . _table + ' " set ' + \
string . join ( upd0 , ' , ' ) + ' where id = %s ' , upd1 )
else :
for f in val :
result = self . _columns [ f ] . get ( cr , self , ids , f , uid , context = context )
for id , value in result . items ( ) :
if self . _columns [ f ] . _type in ( ' many2one ' , ' one2one ' ) :
try :
value = value [ 0 ]
except :
pass
cr . execute ( ' update " ' + self . _table + ' " set ' + \
2008-12-14 16:46:47 +00:00
' " ' + f + ' " = ' + self . _columns [ f ] . _symbol_set [ 0 ] + ' where id = %s ' , ( self . _columns [ f ] . _symbol_set [ 1 ] ( value ) , id ) )
2008-07-22 14:24:36 +00:00
return True
#
# TODO: Validate
#
def perm_write ( self , cr , user , ids , fields , context = None ) :
raise _ ( ' This method does not exist anymore ' )
# TODO: ameliorer avec NULL
def _where_calc ( self , cr , user , args , active_test = True , context = None ) :
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
args = args [ : ]
# if the object has a field named 'active', filter out all inactive
# records unless they were explicitely asked for
if ' active ' in self . _columns and ( active_test and context . get ( ' active_test ' , True ) ) :
2008-08-14 12:04:41 +00:00
if args :
2008-11-05 13:35:25 +00:00
active_in_args = False
for a in args :
if a [ 0 ] == ' active ' :
active_in_args = True
if not active_in_args :
2008-10-30 15:42:26 +00:00
args . insert ( 0 , ( ' active ' , ' = ' , 1 ) )
2008-08-14 12:04:41 +00:00
else :
args = [ ( ' active ' , ' = ' , 1 ) ]
2008-08-04 15:32:50 +00:00
if args :
import expression
e = expression . expression ( args )
e . parse ( cr , user , self , context )
tables = e . get_tables ( )
qu1 , qu2 = e . to_sql ( )
qu1 = qu1 and [ qu1 ] or [ ]
else :
qu1 , qu2 , tables = [ ] , [ ] , [ ' " %s " ' % self . _table ]
2008-07-22 14:24:36 +00:00
2008-08-12 14:44:56 +00:00
return ( qu1 , qu2 , tables )
2008-07-22 14:24:36 +00:00
def _check_qorder ( self , word ) :
if not regex_order . match ( word ) :
raise except_orm ( _ ( ' AccessError ' ) , _ ( ' Bad query. ' ) )
return True
def search ( self , cr , user , args , offset = 0 , limit = None , order = None ,
context = None , count = False ) :
if not context :
context = { }
# compute the where, order by, limit and offset clauses
2008-08-12 14:44:56 +00:00
( qu1 , qu2 , tables ) = self . _where_calc ( cr , user , args , context = context )
2008-07-22 14:24:36 +00:00
if len ( qu1 ) :
2008-08-12 14:44:56 +00:00
qu1 = ' where ' + string . join ( qu1 , ' and ' )
2008-07-22 14:24:36 +00:00
else :
qu1 = ' '
if order :
self . _check_qorder ( order )
order_by = order or self . _order
limit_str = limit and ' limit %d ' % limit or ' '
offset_str = offset and ' offset %d ' % offset or ' '
# construct a clause for the rules :
d1 , d2 = self . pool . get ( ' ir.rule ' ) . domain_get ( cr , user , self . _name )
if d1 :
qu1 = qu1 and qu1 + ' and ' + d1 or ' where ' + d1
qu2 + = d2
if count :
cr . execute ( ' select count( %s .id) from ' % self . _table +
' , ' . join ( tables ) + qu1 + limit_str + offset_str , qu2 )
res = cr . fetchall ( )
return res [ 0 ] [ 0 ]
# execute the "main" query to fetch the ids we were searching for
cr . execute ( ' select %s .id from ' % self . _table + ' , ' . join ( tables ) + qu1 + ' order by ' + order_by + limit_str + offset_str , qu2 )
res = cr . fetchall ( )
return [ x [ 0 ] for x in res ]
# returns the different values ever entered for one field
# this is used, for example, in the client when the user hits enter on
# a char field
def distinct_field_get ( self , cr , uid , field , value , args = None , offset = 0 , limit = None ) :
if not args :
2008-08-12 14:44:56 +00:00
args = [ ]
2008-07-22 14:24:36 +00:00
if field in self . _inherit_fields :
2008-08-12 14:44:56 +00:00
return self . pool . get ( self . _inherit_fields [ field ] [ 0 ] ) . distinct_field_get ( cr , uid , field , value , args , offset , limit )
2008-07-22 14:24:36 +00:00
else :
return self . _columns [ field ] . search ( cr , self , args , field , value , offset , limit , uid )
def name_get ( self , cr , user , ids , context = None ) :
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
if not ids :
return [ ]
if isinstance ( ids , ( int , long ) ) :
ids = [ ids ]
2008-12-09 13:35:40 +00:00
return [ ( r [ ' id ' ] , tools . ustr ( r [ self . _rec_name ] ) ) for r in self . read ( cr , user , ids ,
2008-07-22 14:24:36 +00:00
[ self . _rec_name ] , context , load = ' _classic_write ' ) ]
2008-09-09 07:34:54 +00:00
def name_search ( self , cr , user , name = ' ' , args = None , operator = ' ilike ' , context = None , limit = None ) :
2008-07-22 14:24:36 +00:00
if not args :
2008-08-12 14:44:56 +00:00
args = [ ]
2008-07-22 14:24:36 +00:00
if not context :
2008-08-12 14:44:56 +00:00
context = { }
args = args [ : ]
2008-07-22 14:24:36 +00:00
if name :
2008-08-12 14:44:56 +00:00
args + = [ ( self . _rec_name , operator , name ) ]
2008-07-22 14:24:36 +00:00
ids = self . search ( cr , user , args , limit = limit , context = context )
res = self . name_get ( cr , user , ids , context )
return res
2009-02-04 13:05:37 +00:00
def copy_data ( self , cr , uid , id , default = None , context = None ) :
2008-07-22 14:24:36 +00:00
if not context :
2008-08-12 14:44:56 +00:00
context = { }
2008-07-22 14:24:36 +00:00
if not default :
default = { }
if ' state ' not in default :
if ' state ' in self . _defaults :
default [ ' state ' ] = self . _defaults [ ' state ' ] ( self , cr , uid , context )
data = self . read ( cr , uid , [ id ] , context = context ) [ 0 ]
2009-08-07 13:09:50 +00:00
fields = self . fields_get ( cr , uid , context = context )
2009-02-04 13:05:37 +00:00
trans_data = [ ]
2008-07-22 14:24:36 +00:00
for f in fields :
ftype = fields [ f ] [ ' type ' ]
if self . _log_access and f in ( ' create_date ' , ' create_uid ' , ' write_date ' , ' write_uid ' ) :
del data [ f ]
if f in default :
data [ f ] = default [ f ]
elif ftype == ' function ' :
del data [ f ]
elif ftype == ' many2one ' :
try :
data [ f ] = data [ f ] and data [ f ] [ 0 ]
except :
pass
elif ftype in ( ' one2many ' , ' one2one ' ) :
res = [ ]
rel = self . pool . get ( fields [ f ] [ ' relation ' ] )
2009-04-12 13:42:39 +00:00
if data [ f ] != False :
for rel_id in data [ f ] :
# the lines are first duplicated using the wrong (old)
# parent but then are reassigned to the correct one thanks
# to the (4, ...)
d , t = rel . copy_data ( cr , uid , rel_id , context = context )
res . append ( ( 0 , 0 , d ) )
trans_data + = t
2008-07-22 14:24:36 +00:00
data [ f ] = res
elif ftype == ' many2many ' :
data [ f ] = [ ( 6 , 0 , data [ f ] ) ]
2008-10-16 11:54:41 +00:00
trans_obj = self . pool . get ( ' ir.translation ' )
trans_name = ' '
for f in fields :
trans_flag = True
if f in self . _columns and self . _columns [ f ] . translate :
trans_name = self . _name + " , " + f
elif f in self . _inherit_fields and self . _inherit_fields [ f ] [ 2 ] . translate :
trans_name = self . _inherit_fields [ f ] [ 0 ] + " , " + f
else :
trans_flag = False
if trans_flag :
trans_ids = trans_obj . search ( cr , uid , [
( ' name ' , ' = ' , trans_name ) ,
( ' res_id ' , ' = ' , data [ ' id ' ] )
] )
trans_data . extend ( trans_obj . read ( cr , uid , trans_ids , context = context ) )
2008-07-22 14:24:36 +00:00
del data [ ' id ' ]
2008-10-16 11:54:41 +00:00
2008-07-22 14:24:36 +00:00
for v in self . _inherits :
del data [ self . _inherits [ v ] ]
2009-02-04 13:05:37 +00:00
return data , trans_data
2008-10-16 11:54:41 +00:00
2009-02-04 13:05:37 +00:00
def copy ( self , cr , uid , id , default = None , context = None ) :
2009-02-14 05:35:17 +00:00
trans_obj = self . pool . get ( ' ir.translation ' )
2009-02-04 13:05:37 +00:00
data , trans_data = self . copy_data ( cr , uid , id , default , context )
2009-08-07 13:09:50 +00:00
new_id = self . create ( cr , uid , data , context )
2008-10-16 11:54:41 +00:00
for record in trans_data :
del record [ ' id ' ]
2009-08-07 13:09:50 +00:00
record [ ' res_id ' ] = new_id
trans_obj . create ( cr , uid , record , context )
2008-10-16 11:54:41 +00:00
return new_id
2008-07-22 14:24:36 +00:00
2009-06-10 11:15:35 +00:00
def exists ( self , cr , uid , id , context = None ) :
cr . execute ( ' SELECT count(1) FROM " %s " where id= %% s ' % ( self . _table , ) , ( id , ) )
return bool ( cr . fetchone ( ) [ 0 ] )
2008-07-22 14:24:36 +00:00
def check_recursion ( self , cr , uid , ids , parent = None ) :
if not parent :
parent = self . _parent_name
ids_parent = ids [ : ]
while len ( ids_parent ) :
ids_parent2 = [ ]
2008-08-05 08:39:45 +00:00
for i in range ( 0 , len ( ids ) , cr . IN_MAX ) :
sub_ids_parent = ids_parent [ i : i + cr . IN_MAX ]
2008-07-22 14:24:36 +00:00
cr . execute ( ' SELECT distinct " ' + parent + ' " ' +
' FROM " ' + self . _table + ' " ' \
' WHERE id in ( ' + ' , ' . join ( map ( str , sub_ids_parent ) ) + ' ) ' )
ids_parent2 . extend ( filter ( None , map ( lambda x : x [ 0 ] , cr . fetchall ( ) ) ) )
ids_parent = ids_parent2
for i in ids_parent :
if i in ids :
return False
return True
2007-07-30 13:35:15 +00:00
2008-07-23 15:01:27 +00:00
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: